xref: /linux/arch/x86/include/asm/pgtable_types.h (revision 8dd765a5d769c521d73931850d1c8708fbc490cb)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_DEFS_H
3 #define _ASM_X86_PGTABLE_DEFS_H
4 
5 #include <linux/const.h>
6 #include <linux/mem_encrypt.h>
7 
8 #include <asm/page_types.h>
9 
10 #define _PAGE_BIT_PRESENT	0	/* is present */
11 #define _PAGE_BIT_RW		1	/* writeable */
12 #define _PAGE_BIT_USER		2	/* userspace addressable */
13 #define _PAGE_BIT_PWT		3	/* page write through */
14 #define _PAGE_BIT_PCD		4	/* page cache disabled */
15 #define _PAGE_BIT_ACCESSED	5	/* was accessed (raised by CPU) */
16 #define _PAGE_BIT_DIRTY		6	/* was written to (raised by CPU) */
17 #define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */
18 #define _PAGE_BIT_PAT		7	/* on 4KB pages */
19 #define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
20 #define _PAGE_BIT_SOFTW1	9	/* available for programmer */
21 #define _PAGE_BIT_SOFTW2	10	/* " */
22 #define _PAGE_BIT_SOFTW3	11	/* " */
23 #define _PAGE_BIT_PAT_LARGE	12	/* On 2MB or 1GB pages */
24 #define _PAGE_BIT_SOFTW4	57	/* available for programmer */
25 #define _PAGE_BIT_SOFTW5	58	/* available for programmer */
26 #define _PAGE_BIT_PKEY_BIT0	59	/* Protection Keys, bit 1/4 */
27 #define _PAGE_BIT_PKEY_BIT1	60	/* Protection Keys, bit 2/4 */
28 #define _PAGE_BIT_PKEY_BIT2	61	/* Protection Keys, bit 3/4 */
29 #define _PAGE_BIT_PKEY_BIT3	62	/* Protection Keys, bit 4/4 */
30 #define _PAGE_BIT_NX		63	/* No execute: only valid after cpuid check */
31 
32 #define _PAGE_BIT_SPECIAL	_PAGE_BIT_SOFTW1
33 #define _PAGE_BIT_CPA_TEST	_PAGE_BIT_SOFTW1
34 #define _PAGE_BIT_UFFD_WP	_PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */
35 #define _PAGE_BIT_SOFT_DIRTY	_PAGE_BIT_SOFTW3 /* software dirty tracking */
36 #define _PAGE_BIT_DEVMAP	_PAGE_BIT_SOFTW4
37 
38 #ifdef CONFIG_X86_64
39 #define _PAGE_BIT_SAVED_DIRTY	_PAGE_BIT_SOFTW5 /* Saved Dirty bit */
40 #else
41 /* Shared with _PAGE_BIT_UFFD_WP which is not supported on 32 bit */
42 #define _PAGE_BIT_SAVED_DIRTY	_PAGE_BIT_SOFTW2 /* Saved Dirty bit */
43 #endif
44 
45 /* If _PAGE_BIT_PRESENT is clear, we use these: */
46 /* - if the user mapped it with PROT_NONE; pte_present gives true */
47 #define _PAGE_BIT_PROTNONE	_PAGE_BIT_GLOBAL
48 
49 #define _PAGE_PRESENT	(_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
50 #define _PAGE_RW	(_AT(pteval_t, 1) << _PAGE_BIT_RW)
51 #define _PAGE_USER	(_AT(pteval_t, 1) << _PAGE_BIT_USER)
52 #define _PAGE_PWT	(_AT(pteval_t, 1) << _PAGE_BIT_PWT)
53 #define _PAGE_PCD	(_AT(pteval_t, 1) << _PAGE_BIT_PCD)
54 #define _PAGE_ACCESSED	(_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
55 #define _PAGE_DIRTY	(_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
56 #define _PAGE_PSE	(_AT(pteval_t, 1) << _PAGE_BIT_PSE)
57 #define _PAGE_GLOBAL	(_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
58 #define _PAGE_SOFTW1	(_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
59 #define _PAGE_SOFTW2	(_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
60 #define _PAGE_SOFTW3	(_AT(pteval_t, 1) << _PAGE_BIT_SOFTW3)
61 #define _PAGE_PAT	(_AT(pteval_t, 1) << _PAGE_BIT_PAT)
62 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
63 #define _PAGE_SPECIAL	(_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
64 #define _PAGE_CPA_TEST	(_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
65 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
66 #define _PAGE_PKEY_BIT0	(_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0)
67 #define _PAGE_PKEY_BIT1	(_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1)
68 #define _PAGE_PKEY_BIT2	(_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT2)
69 #define _PAGE_PKEY_BIT3	(_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT3)
70 #else
71 #define _PAGE_PKEY_BIT0	(_AT(pteval_t, 0))
72 #define _PAGE_PKEY_BIT1	(_AT(pteval_t, 0))
73 #define _PAGE_PKEY_BIT2	(_AT(pteval_t, 0))
74 #define _PAGE_PKEY_BIT3	(_AT(pteval_t, 0))
75 #endif
76 
77 #define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \
78 			 _PAGE_PKEY_BIT1 | \
79 			 _PAGE_PKEY_BIT2 | \
80 			 _PAGE_PKEY_BIT3)
81 
82 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
83 #define _PAGE_KNL_ERRATUM_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
84 #else
85 #define _PAGE_KNL_ERRATUM_MASK 0
86 #endif
87 
88 #ifdef CONFIG_MEM_SOFT_DIRTY
89 #define _PAGE_SOFT_DIRTY	(_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
90 #else
91 #define _PAGE_SOFT_DIRTY	(_AT(pteval_t, 0))
92 #endif
93 
94 /*
95  * Tracking soft dirty bit when a page goes to a swap is tricky.
96  * We need a bit which can be stored in pte _and_ not conflict
97  * with swap entry format. On x86 bits 1-4 are *not* involved
98  * into swap entry computation, but bit 7 is used for thp migration,
99  * so we borrow bit 1 for soft dirty tracking.
100  *
101  * Please note that this bit must be treated as swap dirty page
102  * mark if and only if the PTE/PMD has present bit clear!
103  */
104 #ifdef CONFIG_MEM_SOFT_DIRTY
105 #define _PAGE_SWP_SOFT_DIRTY	_PAGE_RW
106 #else
107 #define _PAGE_SWP_SOFT_DIRTY	(_AT(pteval_t, 0))
108 #endif
109 
110 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
111 #define _PAGE_UFFD_WP		(_AT(pteval_t, 1) << _PAGE_BIT_UFFD_WP)
112 #define _PAGE_SWP_UFFD_WP	_PAGE_USER
113 #else
114 #define _PAGE_UFFD_WP		(_AT(pteval_t, 0))
115 #define _PAGE_SWP_UFFD_WP	(_AT(pteval_t, 0))
116 #endif
117 
118 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
119 #define _PAGE_NX	(_AT(pteval_t, 1) << _PAGE_BIT_NX)
120 #define _PAGE_DEVMAP	(_AT(u64, 1) << _PAGE_BIT_DEVMAP)
121 #define _PAGE_SOFTW4	(_AT(pteval_t, 1) << _PAGE_BIT_SOFTW4)
122 #else
123 #define _PAGE_NX	(_AT(pteval_t, 0))
124 #define _PAGE_DEVMAP	(_AT(pteval_t, 0))
125 #define _PAGE_SOFTW4	(_AT(pteval_t, 0))
126 #endif
127 
128 /*
129  * The hardware requires shadow stack to be Write=0,Dirty=1. However,
130  * there are valid cases where the kernel might create read-only PTEs that
131  * are dirty (e.g., fork(), mprotect(), uffd-wp(), soft-dirty tracking). In
132  * this case, the _PAGE_SAVED_DIRTY bit is used instead of the HW-dirty bit,
133  * to avoid creating a wrong "shadow stack" PTEs. Such PTEs have
134  * (Write=0,SavedDirty=1,Dirty=0) set.
135  */
136 #define _PAGE_SAVED_DIRTY	(_AT(pteval_t, 1) << _PAGE_BIT_SAVED_DIRTY)
137 
138 #define _PAGE_DIRTY_BITS (_PAGE_DIRTY | _PAGE_SAVED_DIRTY)
139 
140 #define _PAGE_PROTNONE	(_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
141 
142 /*
143  * Set of bits not changed in pte_modify.  The pte's
144  * protection key is treated like _PAGE_RW, for
145  * instance, and is *not* included in this mask since
146  * pte_modify() does modify it.
147  */
148 #define _COMMON_PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |	\
149 				 _PAGE_SPECIAL | _PAGE_ACCESSED |	\
150 				 _PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY |	\
151 				 _PAGE_DEVMAP | _PAGE_ENC | _PAGE_UFFD_WP)
152 #define _PAGE_CHG_MASK	(_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
153 #define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
154 
155 /*
156  * The cache modes defined here are used to translate between pure SW usage
157  * and the HW defined cache mode bits and/or PAT entries.
158  *
159  * The resulting bits for PWT, PCD and PAT should be chosen in a way
160  * to have the WB mode at index 0 (all bits clear). This is the default
161  * right now and likely would break too much if changed.
162  */
163 #ifndef __ASSEMBLY__
164 enum page_cache_mode {
165 	_PAGE_CACHE_MODE_WB       = 0,
166 	_PAGE_CACHE_MODE_WC       = 1,
167 	_PAGE_CACHE_MODE_UC_MINUS = 2,
168 	_PAGE_CACHE_MODE_UC       = 3,
169 	_PAGE_CACHE_MODE_WT       = 4,
170 	_PAGE_CACHE_MODE_WP       = 5,
171 
172 	_PAGE_CACHE_MODE_NUM      = 8
173 };
174 #endif
175 
176 #define _PAGE_ENC		(_AT(pteval_t, sme_me_mask))
177 
178 #define _PAGE_CACHE_MASK	(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
179 #define _PAGE_LARGE_CACHE_MASK	(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT_LARGE)
180 
181 #define _PAGE_NOCACHE		(cachemode2protval(_PAGE_CACHE_MODE_UC))
182 #define _PAGE_CACHE_WP		(cachemode2protval(_PAGE_CACHE_MODE_WP))
183 
184 #define __PP _PAGE_PRESENT
185 #define __RW _PAGE_RW
186 #define _USR _PAGE_USER
187 #define ___A _PAGE_ACCESSED
188 #define ___D _PAGE_DIRTY
189 #define ___G _PAGE_GLOBAL
190 #define __NX _PAGE_NX
191 
192 #define _ENC _PAGE_ENC
193 #define __WP _PAGE_CACHE_WP
194 #define __NC _PAGE_NOCACHE
195 #define _PSE _PAGE_PSE
196 
197 #define pgprot_val(x)		((x).pgprot)
198 #define __pgprot(x)		((pgprot_t) { (x) } )
199 #define __pg(x)			__pgprot(x)
200 
201 #define PAGE_NONE	     __pg(   0|   0|   0|___A|   0|   0|   0|___G)
202 #define PAGE_SHARED	     __pg(__PP|__RW|_USR|___A|__NX|   0|   0|   0)
203 #define PAGE_SHARED_EXEC     __pg(__PP|__RW|_USR|___A|   0|   0|   0|   0)
204 #define PAGE_COPY_NOEXEC     __pg(__PP|   0|_USR|___A|__NX|   0|   0|   0)
205 #define PAGE_COPY_EXEC	     __pg(__PP|   0|_USR|___A|   0|   0|   0|   0)
206 #define PAGE_COPY	     __pg(__PP|   0|_USR|___A|__NX|   0|   0|   0)
207 #define PAGE_READONLY	     __pg(__PP|   0|_USR|___A|__NX|   0|   0|   0)
208 #define PAGE_READONLY_EXEC   __pg(__PP|   0|_USR|___A|   0|   0|   0|   0)
209 
210 #define __PAGE_KERNEL		 (__PP|__RW|   0|___A|__NX|___D|   0|___G)
211 #define __PAGE_KERNEL_EXEC	 (__PP|__RW|   0|___A|   0|___D|   0|___G)
212 
213 /*
214  * Page tables needs to have Write=1 in order for any lower PTEs to be
215  * writable. This includes shadow stack memory (Write=0, Dirty=1)
216  */
217 #define _KERNPG_TABLE_NOENC	 (__PP|__RW|   0|___A|   0|___D|   0|   0)
218 #define _KERNPG_TABLE		 (__PP|__RW|   0|___A|   0|___D|   0|   0| _ENC)
219 #define _PAGE_TABLE_NOENC	 (__PP|__RW|_USR|___A|   0|___D|   0|   0)
220 #define _PAGE_TABLE		 (__PP|__RW|_USR|___A|   0|___D|   0|   0| _ENC)
221 
222 #define __PAGE_KERNEL_RO	 (__PP|   0|   0|___A|__NX|   0|   0|___G)
223 #define __PAGE_KERNEL_ROX	 (__PP|   0|   0|___A|   0|   0|   0|___G)
224 #define __PAGE_KERNEL		 (__PP|__RW|   0|___A|__NX|___D|   0|___G)
225 #define __PAGE_KERNEL_EXEC	 (__PP|__RW|   0|___A|   0|___D|   0|___G)
226 #define __PAGE_KERNEL_NOCACHE	 (__PP|__RW|   0|___A|__NX|___D|   0|___G| __NC)
227 #define __PAGE_KERNEL_VVAR	 (__PP|   0|_USR|___A|__NX|   0|   0|___G)
228 #define __PAGE_KERNEL_LARGE	 (__PP|__RW|   0|___A|__NX|___D|_PSE|___G)
229 #define __PAGE_KERNEL_LARGE_EXEC (__PP|__RW|   0|___A|   0|___D|_PSE|___G)
230 #define __PAGE_KERNEL_WP	 (__PP|__RW|   0|___A|__NX|___D|   0|___G| __WP)
231 
232 
233 #define __PAGE_KERNEL_IO		__PAGE_KERNEL
234 #define __PAGE_KERNEL_IO_NOCACHE	__PAGE_KERNEL_NOCACHE
235 
236 
237 #ifndef __ASSEMBLY__
238 
239 #define __PAGE_KERNEL_ENC	(__PAGE_KERNEL    | _ENC)
240 #define __PAGE_KERNEL_ENC_WP	(__PAGE_KERNEL_WP | _ENC)
241 #define __PAGE_KERNEL_NOENC	(__PAGE_KERNEL    |    0)
242 #define __PAGE_KERNEL_NOENC_WP	(__PAGE_KERNEL_WP |    0)
243 
244 #define __pgprot_mask(x)	__pgprot((x) & __default_kernel_pte_mask)
245 
246 #define PAGE_KERNEL		__pgprot_mask(__PAGE_KERNEL            | _ENC)
247 #define PAGE_KERNEL_NOENC	__pgprot_mask(__PAGE_KERNEL            |    0)
248 #define PAGE_KERNEL_RO		__pgprot_mask(__PAGE_KERNEL_RO         | _ENC)
249 #define PAGE_KERNEL_EXEC	__pgprot_mask(__PAGE_KERNEL_EXEC       | _ENC)
250 #define PAGE_KERNEL_EXEC_NOENC	__pgprot_mask(__PAGE_KERNEL_EXEC       |    0)
251 #define PAGE_KERNEL_ROX		__pgprot_mask(__PAGE_KERNEL_ROX        | _ENC)
252 #define PAGE_KERNEL_NOCACHE	__pgprot_mask(__PAGE_KERNEL_NOCACHE    | _ENC)
253 #define PAGE_KERNEL_LARGE	__pgprot_mask(__PAGE_KERNEL_LARGE      | _ENC)
254 #define PAGE_KERNEL_LARGE_EXEC	__pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC)
255 #define PAGE_KERNEL_VVAR	__pgprot_mask(__PAGE_KERNEL_VVAR       | _ENC)
256 
257 #define PAGE_KERNEL_IO		__pgprot_mask(__PAGE_KERNEL_IO)
258 #define PAGE_KERNEL_IO_NOCACHE	__pgprot_mask(__PAGE_KERNEL_IO_NOCACHE)
259 
260 #endif	/* __ASSEMBLY__ */
261 
262 /*
263  * early identity mapping  pte attrib macros.
264  */
265 #ifdef CONFIG_X86_64
266 #define __PAGE_KERNEL_IDENT_LARGE_EXEC	__PAGE_KERNEL_LARGE_EXEC
267 #else
268 #define PTE_IDENT_ATTR	 0x003		/* PRESENT+RW */
269 #define PDE_IDENT_ATTR	 0x063		/* PRESENT+RW+DIRTY+ACCESSED */
270 #define PGD_IDENT_ATTR	 0x001		/* PRESENT (no other attributes) */
271 #endif
272 
273 #ifdef CONFIG_X86_32
274 # include <asm/pgtable_32_types.h>
275 #else
276 # include <asm/pgtable_64_types.h>
277 #endif
278 
279 #ifndef __ASSEMBLY__
280 
281 #include <linux/types.h>
282 
283 /* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
284 #define PTE_PFN_MASK		((pteval_t)PHYSICAL_PAGE_MASK)
285 
286 /*
287  *  Extracts the flags from a (pte|pmd|pud|pgd)val_t
288  *  This includes the protection key value.
289  */
290 #define PTE_FLAGS_MASK		(~PTE_PFN_MASK)
291 
292 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
293 
294 typedef struct { pgdval_t pgd; } pgd_t;
295 
296 static inline pgprot_t pgprot_nx(pgprot_t prot)
297 {
298 	return __pgprot(pgprot_val(prot) | _PAGE_NX);
299 }
300 #define pgprot_nx pgprot_nx
301 
302 #ifdef CONFIG_X86_PAE
303 
304 /*
305  * PHYSICAL_PAGE_MASK might be non-constant when SME is compiled in, so we can't
306  * use it here.
307  */
308 
309 #define PGD_PAE_PAGE_MASK	((signed long)PAGE_MASK)
310 #define PGD_PAE_PHYS_MASK	(((1ULL << __PHYSICAL_MASK_SHIFT)-1) & PGD_PAE_PAGE_MASK)
311 
312 /*
313  * PAE allows Base Address, P, PWT, PCD and AVL bits to be set in PGD entries.
314  * All other bits are Reserved MBZ
315  */
316 #define PGD_ALLOWED_BITS	(PGD_PAE_PHYS_MASK | _PAGE_PRESENT | \
317 				 _PAGE_PWT | _PAGE_PCD | \
318 				 _PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3)
319 
320 #else
321 /* No need to mask any bits for !PAE */
322 #define PGD_ALLOWED_BITS	(~0ULL)
323 #endif
324 
325 static inline pgd_t native_make_pgd(pgdval_t val)
326 {
327 	return (pgd_t) { val & PGD_ALLOWED_BITS };
328 }
329 
330 static inline pgdval_t native_pgd_val(pgd_t pgd)
331 {
332 	return pgd.pgd & PGD_ALLOWED_BITS;
333 }
334 
335 static inline pgdval_t pgd_flags(pgd_t pgd)
336 {
337 	return native_pgd_val(pgd) & PTE_FLAGS_MASK;
338 }
339 
340 #if CONFIG_PGTABLE_LEVELS > 4
341 typedef struct { p4dval_t p4d; } p4d_t;
342 
343 static inline p4d_t native_make_p4d(pudval_t val)
344 {
345 	return (p4d_t) { val };
346 }
347 
348 static inline p4dval_t native_p4d_val(p4d_t p4d)
349 {
350 	return p4d.p4d;
351 }
352 #else
353 #include <asm-generic/pgtable-nop4d.h>
354 
355 static inline p4d_t native_make_p4d(pudval_t val)
356 {
357 	return (p4d_t) { .pgd = native_make_pgd((pgdval_t)val) };
358 }
359 
360 static inline p4dval_t native_p4d_val(p4d_t p4d)
361 {
362 	return native_pgd_val(p4d.pgd);
363 }
364 #endif
365 
366 #if CONFIG_PGTABLE_LEVELS > 3
367 typedef struct { pudval_t pud; } pud_t;
368 
369 static inline pud_t native_make_pud(pmdval_t val)
370 {
371 	return (pud_t) { val };
372 }
373 
374 static inline pudval_t native_pud_val(pud_t pud)
375 {
376 	return pud.pud;
377 }
378 #else
379 #include <asm-generic/pgtable-nopud.h>
380 
381 static inline pud_t native_make_pud(pudval_t val)
382 {
383 	return (pud_t) { .p4d.pgd = native_make_pgd(val) };
384 }
385 
386 static inline pudval_t native_pud_val(pud_t pud)
387 {
388 	return native_pgd_val(pud.p4d.pgd);
389 }
390 #endif
391 
392 #if CONFIG_PGTABLE_LEVELS > 2
393 static inline pmd_t native_make_pmd(pmdval_t val)
394 {
395 	return (pmd_t) { .pmd = val };
396 }
397 
398 static inline pmdval_t native_pmd_val(pmd_t pmd)
399 {
400 	return pmd.pmd;
401 }
402 #else
403 #include <asm-generic/pgtable-nopmd.h>
404 
405 static inline pmd_t native_make_pmd(pmdval_t val)
406 {
407 	return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
408 }
409 
410 static inline pmdval_t native_pmd_val(pmd_t pmd)
411 {
412 	return native_pgd_val(pmd.pud.p4d.pgd);
413 }
414 #endif
415 
416 static inline p4dval_t p4d_pfn_mask(p4d_t p4d)
417 {
418 	/* No 512 GiB huge pages yet */
419 	return PTE_PFN_MASK;
420 }
421 
422 static inline p4dval_t p4d_flags_mask(p4d_t p4d)
423 {
424 	return ~p4d_pfn_mask(p4d);
425 }
426 
427 static inline p4dval_t p4d_flags(p4d_t p4d)
428 {
429 	return native_p4d_val(p4d) & p4d_flags_mask(p4d);
430 }
431 
432 static inline pudval_t pud_pfn_mask(pud_t pud)
433 {
434 	if (native_pud_val(pud) & _PAGE_PSE)
435 		return PHYSICAL_PUD_PAGE_MASK;
436 	else
437 		return PTE_PFN_MASK;
438 }
439 
440 static inline pudval_t pud_flags_mask(pud_t pud)
441 {
442 	return ~pud_pfn_mask(pud);
443 }
444 
445 static inline pudval_t pud_flags(pud_t pud)
446 {
447 	return native_pud_val(pud) & pud_flags_mask(pud);
448 }
449 
450 static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
451 {
452 	if (native_pmd_val(pmd) & _PAGE_PSE)
453 		return PHYSICAL_PMD_PAGE_MASK;
454 	else
455 		return PTE_PFN_MASK;
456 }
457 
458 static inline pmdval_t pmd_flags_mask(pmd_t pmd)
459 {
460 	return ~pmd_pfn_mask(pmd);
461 }
462 
463 static inline pmdval_t pmd_flags(pmd_t pmd)
464 {
465 	return native_pmd_val(pmd) & pmd_flags_mask(pmd);
466 }
467 
468 static inline pte_t native_make_pte(pteval_t val)
469 {
470 	return (pte_t) { .pte = val };
471 }
472 
473 static inline pteval_t native_pte_val(pte_t pte)
474 {
475 	return pte.pte;
476 }
477 
478 static inline pteval_t pte_flags(pte_t pte)
479 {
480 	return native_pte_val(pte) & PTE_FLAGS_MASK;
481 }
482 
483 #define __pte2cm_idx(cb)				\
484 	((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) |		\
485 	 (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) |		\
486 	 (((cb) >> _PAGE_BIT_PWT) & 1))
487 #define __cm_idx2pte(i)					\
488 	((((i) & 4) << (_PAGE_BIT_PAT - 2)) |		\
489 	 (((i) & 2) << (_PAGE_BIT_PCD - 1)) |		\
490 	 (((i) & 1) << _PAGE_BIT_PWT))
491 
492 unsigned long cachemode2protval(enum page_cache_mode pcm);
493 
494 static inline pgprotval_t protval_4k_2_large(pgprotval_t val)
495 {
496 	return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
497 		((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
498 }
499 static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
500 {
501 	return __pgprot(protval_4k_2_large(pgprot_val(pgprot)));
502 }
503 static inline pgprotval_t protval_large_2_4k(pgprotval_t val)
504 {
505 	return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
506 		((val & _PAGE_PAT_LARGE) >>
507 		 (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
508 }
509 static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
510 {
511 	return __pgprot(protval_large_2_4k(pgprot_val(pgprot)));
512 }
513 
514 
515 typedef struct page *pgtable_t;
516 
517 extern pteval_t __supported_pte_mask;
518 extern pteval_t __default_kernel_pte_mask;
519 extern void set_nx(void);
520 extern int nx_enabled;
521 
522 #define pgprot_writecombine	pgprot_writecombine
523 extern pgprot_t pgprot_writecombine(pgprot_t prot);
524 
525 #define pgprot_writethrough	pgprot_writethrough
526 extern pgprot_t pgprot_writethrough(pgprot_t prot);
527 
528 /* Indicate that x86 has its own track and untrack pfn vma functions */
529 #define __HAVE_PFNMAP_TRACKING
530 
531 #define __HAVE_PHYS_MEM_ACCESS_PROT
532 struct file;
533 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
534                               unsigned long size, pgprot_t vma_prot);
535 
536 /* Install a pte for a particular vaddr in kernel space. */
537 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
538 
539 #ifdef CONFIG_X86_32
540 extern void native_pagetable_init(void);
541 #else
542 #define native_pagetable_init        paging_init
543 #endif
544 
545 enum pg_level {
546 	PG_LEVEL_NONE,
547 	PG_LEVEL_4K,
548 	PG_LEVEL_2M,
549 	PG_LEVEL_1G,
550 	PG_LEVEL_512G,
551 	PG_LEVEL_NUM
552 };
553 
554 #ifdef CONFIG_PROC_FS
555 extern void update_page_count(int level, unsigned long pages);
556 #else
557 static inline void update_page_count(int level, unsigned long pages) { }
558 #endif
559 
560 /*
561  * Helper function that returns the kernel pagetable entry controlling
562  * the virtual address 'address'. NULL means no pagetable entry present.
563  * NOTE: the return type is pte_t but if the pmd is PSE then we return it
564  * as a pte too.
565  */
566 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
567 extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
568 				    unsigned int *level);
569 extern pmd_t *lookup_pmd_address(unsigned long address);
570 extern phys_addr_t slow_virt_to_phys(void *__address);
571 extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
572 					  unsigned long address,
573 					  unsigned numpages,
574 					  unsigned long page_flags);
575 extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
576 					    unsigned long numpages);
577 #endif	/* !__ASSEMBLY__ */
578 
579 #endif /* _ASM_X86_PGTABLE_DEFS_H */
580