xref: /linux/arch/x86/include/asm/pgtable.h (revision 3503d56cc7233ced602e38a4c13caa64f00ab2aa)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_H
3 #define _ASM_X86_PGTABLE_H
4 
5 #include <linux/mem_encrypt.h>
6 #include <asm/page.h>
7 #include <asm/pgtable_types.h>
8 
9 /*
10  * Macro to mark a page protection value as UC-
11  */
12 #define pgprot_noncached(prot)						\
13 	((boot_cpu_data.x86 > 3)					\
14 	 ? (__pgprot(pgprot_val(prot) |					\
15 		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
16 	 : (prot))
17 
18 /*
19  * Macros to add or remove encryption attribute
20  */
21 #define pgprot_encrypted(prot)	__pgprot(__sme_set(pgprot_val(prot)))
22 #define pgprot_decrypted(prot)	__pgprot(__sme_clr(pgprot_val(prot)))
23 
24 #ifndef __ASSEMBLY__
25 #include <asm/x86_init.h>
26 #include <asm/fpu/xstate.h>
27 #include <asm/fpu/api.h>
28 #include <asm-generic/pgtable_uffd.h>
29 
30 extern pgd_t early_top_pgt[PTRS_PER_PGD];
31 int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
32 
33 void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
34 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
35 				   bool user);
36 void ptdump_walk_pgd_level_checkwx(void);
37 void ptdump_walk_user_pgd_level_checkwx(void);
38 
39 #ifdef CONFIG_DEBUG_WX
40 #define debug_checkwx()		ptdump_walk_pgd_level_checkwx()
41 #define debug_checkwx_user()	ptdump_walk_user_pgd_level_checkwx()
42 #else
43 #define debug_checkwx()		do { } while (0)
44 #define debug_checkwx_user()	do { } while (0)
45 #endif
46 
47 /*
48  * ZERO_PAGE is a global shared page that is always zero: used
49  * for zero-mapped memory areas etc..
50  */
51 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
52 	__visible;
53 #define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
54 
55 extern spinlock_t pgd_lock;
56 extern struct list_head pgd_list;
57 
58 extern struct mm_struct *pgd_page_get_mm(struct page *page);
59 
60 extern pmdval_t early_pmd_flags;
61 
62 #ifdef CONFIG_PARAVIRT_XXL
63 #include <asm/paravirt.h>
64 #else  /* !CONFIG_PARAVIRT_XXL */
65 #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
66 #define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
67 
68 #define set_pte_atomic(ptep, pte)					\
69 	native_set_pte_atomic(ptep, pte)
70 
71 #define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
72 
73 #ifndef __PAGETABLE_P4D_FOLDED
74 #define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
75 #define pgd_clear(pgd)			(pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
76 #endif
77 
78 #ifndef set_p4d
79 # define set_p4d(p4dp, p4d)		native_set_p4d(p4dp, p4d)
80 #endif
81 
82 #ifndef __PAGETABLE_PUD_FOLDED
83 #define p4d_clear(p4d)			native_p4d_clear(p4d)
84 #endif
85 
86 #ifndef set_pud
87 # define set_pud(pudp, pud)		native_set_pud(pudp, pud)
88 #endif
89 
90 #ifndef __PAGETABLE_PUD_FOLDED
91 #define pud_clear(pud)			native_pud_clear(pud)
92 #endif
93 
94 #define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
95 #define pmd_clear(pmd)			native_pmd_clear(pmd)
96 
97 #define pgd_val(x)	native_pgd_val(x)
98 #define __pgd(x)	native_make_pgd(x)
99 
100 #ifndef __PAGETABLE_P4D_FOLDED
101 #define p4d_val(x)	native_p4d_val(x)
102 #define __p4d(x)	native_make_p4d(x)
103 #endif
104 
105 #ifndef __PAGETABLE_PUD_FOLDED
106 #define pud_val(x)	native_pud_val(x)
107 #define __pud(x)	native_make_pud(x)
108 #endif
109 
110 #ifndef __PAGETABLE_PMD_FOLDED
111 #define pmd_val(x)	native_pmd_val(x)
112 #define __pmd(x)	native_make_pmd(x)
113 #endif
114 
115 #define pte_val(x)	native_pte_val(x)
116 #define __pte(x)	native_make_pte(x)
117 
118 #define arch_end_context_switch(prev)	do {} while(0)
119 #endif	/* CONFIG_PARAVIRT_XXL */
120 
121 /*
122  * The following only work if pte_present() is true.
123  * Undefined behaviour if not..
124  */
125 static inline int pte_dirty(pte_t pte)
126 {
127 	return pte_flags(pte) & _PAGE_DIRTY;
128 }
129 
130 
131 static inline u32 read_pkru(void)
132 {
133 	if (boot_cpu_has(X86_FEATURE_OSPKE))
134 		return rdpkru();
135 	return 0;
136 }
137 
138 static inline void write_pkru(u32 pkru)
139 {
140 	struct pkru_state *pk;
141 
142 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
143 		return;
144 
145 	pk = get_xsave_addr(&current->thread.fpu.state.xsave, XFEATURE_PKRU);
146 
147 	/*
148 	 * The PKRU value in xstate needs to be in sync with the value that is
149 	 * written to the CPU. The FPU restore on return to userland would
150 	 * otherwise load the previous value again.
151 	 */
152 	fpregs_lock();
153 	if (pk)
154 		pk->pkru = pkru;
155 	__write_pkru(pkru);
156 	fpregs_unlock();
157 }
158 
159 static inline int pte_young(pte_t pte)
160 {
161 	return pte_flags(pte) & _PAGE_ACCESSED;
162 }
163 
164 static inline int pmd_dirty(pmd_t pmd)
165 {
166 	return pmd_flags(pmd) & _PAGE_DIRTY;
167 }
168 
169 static inline int pmd_young(pmd_t pmd)
170 {
171 	return pmd_flags(pmd) & _PAGE_ACCESSED;
172 }
173 
174 static inline int pud_dirty(pud_t pud)
175 {
176 	return pud_flags(pud) & _PAGE_DIRTY;
177 }
178 
179 static inline int pud_young(pud_t pud)
180 {
181 	return pud_flags(pud) & _PAGE_ACCESSED;
182 }
183 
184 static inline int pte_write(pte_t pte)
185 {
186 	return pte_flags(pte) & _PAGE_RW;
187 }
188 
189 static inline int pte_huge(pte_t pte)
190 {
191 	return pte_flags(pte) & _PAGE_PSE;
192 }
193 
194 static inline int pte_global(pte_t pte)
195 {
196 	return pte_flags(pte) & _PAGE_GLOBAL;
197 }
198 
199 static inline int pte_exec(pte_t pte)
200 {
201 	return !(pte_flags(pte) & _PAGE_NX);
202 }
203 
204 static inline int pte_special(pte_t pte)
205 {
206 	return pte_flags(pte) & _PAGE_SPECIAL;
207 }
208 
209 /* Entries that were set to PROT_NONE are inverted */
210 
211 static inline u64 protnone_mask(u64 val);
212 
213 static inline unsigned long pte_pfn(pte_t pte)
214 {
215 	phys_addr_t pfn = pte_val(pte);
216 	pfn ^= protnone_mask(pfn);
217 	return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
218 }
219 
220 static inline unsigned long pmd_pfn(pmd_t pmd)
221 {
222 	phys_addr_t pfn = pmd_val(pmd);
223 	pfn ^= protnone_mask(pfn);
224 	return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
225 }
226 
227 static inline unsigned long pud_pfn(pud_t pud)
228 {
229 	phys_addr_t pfn = pud_val(pud);
230 	pfn ^= protnone_mask(pfn);
231 	return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
232 }
233 
234 static inline unsigned long p4d_pfn(p4d_t p4d)
235 {
236 	return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
237 }
238 
239 static inline unsigned long pgd_pfn(pgd_t pgd)
240 {
241 	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
242 }
243 
244 #define p4d_leaf	p4d_large
245 static inline int p4d_large(p4d_t p4d)
246 {
247 	/* No 512 GiB pages yet */
248 	return 0;
249 }
250 
251 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
252 
253 #define pmd_leaf	pmd_large
254 static inline int pmd_large(pmd_t pte)
255 {
256 	return pmd_flags(pte) & _PAGE_PSE;
257 }
258 
259 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
260 /* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
261 static inline int pmd_trans_huge(pmd_t pmd)
262 {
263 	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
264 }
265 
266 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
267 static inline int pud_trans_huge(pud_t pud)
268 {
269 	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
270 }
271 #endif
272 
273 #define has_transparent_hugepage has_transparent_hugepage
274 static inline int has_transparent_hugepage(void)
275 {
276 	return boot_cpu_has(X86_FEATURE_PSE);
277 }
278 
279 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
280 static inline int pmd_devmap(pmd_t pmd)
281 {
282 	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
283 }
284 
285 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
286 static inline int pud_devmap(pud_t pud)
287 {
288 	return !!(pud_val(pud) & _PAGE_DEVMAP);
289 }
290 #else
291 static inline int pud_devmap(pud_t pud)
292 {
293 	return 0;
294 }
295 #endif
296 
297 static inline int pgd_devmap(pgd_t pgd)
298 {
299 	return 0;
300 }
301 #endif
302 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
303 
304 static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
305 {
306 	pteval_t v = native_pte_val(pte);
307 
308 	return native_make_pte(v | set);
309 }
310 
311 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
312 {
313 	pteval_t v = native_pte_val(pte);
314 
315 	return native_make_pte(v & ~clear);
316 }
317 
318 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
319 static inline int pte_uffd_wp(pte_t pte)
320 {
321 	return pte_flags(pte) & _PAGE_UFFD_WP;
322 }
323 
324 static inline pte_t pte_mkuffd_wp(pte_t pte)
325 {
326 	return pte_set_flags(pte, _PAGE_UFFD_WP);
327 }
328 
329 static inline pte_t pte_clear_uffd_wp(pte_t pte)
330 {
331 	return pte_clear_flags(pte, _PAGE_UFFD_WP);
332 }
333 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
334 
335 static inline pte_t pte_mkclean(pte_t pte)
336 {
337 	return pte_clear_flags(pte, _PAGE_DIRTY);
338 }
339 
340 static inline pte_t pte_mkold(pte_t pte)
341 {
342 	return pte_clear_flags(pte, _PAGE_ACCESSED);
343 }
344 
345 static inline pte_t pte_wrprotect(pte_t pte)
346 {
347 	return pte_clear_flags(pte, _PAGE_RW);
348 }
349 
350 static inline pte_t pte_mkexec(pte_t pte)
351 {
352 	return pte_clear_flags(pte, _PAGE_NX);
353 }
354 
355 static inline pte_t pte_mkdirty(pte_t pte)
356 {
357 	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
358 }
359 
360 static inline pte_t pte_mkyoung(pte_t pte)
361 {
362 	return pte_set_flags(pte, _PAGE_ACCESSED);
363 }
364 
365 static inline pte_t pte_mkwrite(pte_t pte)
366 {
367 	return pte_set_flags(pte, _PAGE_RW);
368 }
369 
370 static inline pte_t pte_mkhuge(pte_t pte)
371 {
372 	return pte_set_flags(pte, _PAGE_PSE);
373 }
374 
375 static inline pte_t pte_clrhuge(pte_t pte)
376 {
377 	return pte_clear_flags(pte, _PAGE_PSE);
378 }
379 
380 static inline pte_t pte_mkglobal(pte_t pte)
381 {
382 	return pte_set_flags(pte, _PAGE_GLOBAL);
383 }
384 
385 static inline pte_t pte_clrglobal(pte_t pte)
386 {
387 	return pte_clear_flags(pte, _PAGE_GLOBAL);
388 }
389 
390 static inline pte_t pte_mkspecial(pte_t pte)
391 {
392 	return pte_set_flags(pte, _PAGE_SPECIAL);
393 }
394 
395 static inline pte_t pte_mkdevmap(pte_t pte)
396 {
397 	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
398 }
399 
400 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
401 {
402 	pmdval_t v = native_pmd_val(pmd);
403 
404 	return native_make_pmd(v | set);
405 }
406 
407 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
408 {
409 	pmdval_t v = native_pmd_val(pmd);
410 
411 	return native_make_pmd(v & ~clear);
412 }
413 
414 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
415 static inline int pmd_uffd_wp(pmd_t pmd)
416 {
417 	return pmd_flags(pmd) & _PAGE_UFFD_WP;
418 }
419 
420 static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
421 {
422 	return pmd_set_flags(pmd, _PAGE_UFFD_WP);
423 }
424 
425 static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
426 {
427 	return pmd_clear_flags(pmd, _PAGE_UFFD_WP);
428 }
429 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
430 
431 static inline pmd_t pmd_mkold(pmd_t pmd)
432 {
433 	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
434 }
435 
436 static inline pmd_t pmd_mkclean(pmd_t pmd)
437 {
438 	return pmd_clear_flags(pmd, _PAGE_DIRTY);
439 }
440 
441 static inline pmd_t pmd_wrprotect(pmd_t pmd)
442 {
443 	return pmd_clear_flags(pmd, _PAGE_RW);
444 }
445 
446 static inline pmd_t pmd_mkdirty(pmd_t pmd)
447 {
448 	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
449 }
450 
451 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
452 {
453 	return pmd_set_flags(pmd, _PAGE_DEVMAP);
454 }
455 
456 static inline pmd_t pmd_mkhuge(pmd_t pmd)
457 {
458 	return pmd_set_flags(pmd, _PAGE_PSE);
459 }
460 
461 static inline pmd_t pmd_mkyoung(pmd_t pmd)
462 {
463 	return pmd_set_flags(pmd, _PAGE_ACCESSED);
464 }
465 
466 static inline pmd_t pmd_mkwrite(pmd_t pmd)
467 {
468 	return pmd_set_flags(pmd, _PAGE_RW);
469 }
470 
471 static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
472 {
473 	pudval_t v = native_pud_val(pud);
474 
475 	return native_make_pud(v | set);
476 }
477 
478 static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
479 {
480 	pudval_t v = native_pud_val(pud);
481 
482 	return native_make_pud(v & ~clear);
483 }
484 
485 static inline pud_t pud_mkold(pud_t pud)
486 {
487 	return pud_clear_flags(pud, _PAGE_ACCESSED);
488 }
489 
490 static inline pud_t pud_mkclean(pud_t pud)
491 {
492 	return pud_clear_flags(pud, _PAGE_DIRTY);
493 }
494 
495 static inline pud_t pud_wrprotect(pud_t pud)
496 {
497 	return pud_clear_flags(pud, _PAGE_RW);
498 }
499 
500 static inline pud_t pud_mkdirty(pud_t pud)
501 {
502 	return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
503 }
504 
505 static inline pud_t pud_mkdevmap(pud_t pud)
506 {
507 	return pud_set_flags(pud, _PAGE_DEVMAP);
508 }
509 
510 static inline pud_t pud_mkhuge(pud_t pud)
511 {
512 	return pud_set_flags(pud, _PAGE_PSE);
513 }
514 
515 static inline pud_t pud_mkyoung(pud_t pud)
516 {
517 	return pud_set_flags(pud, _PAGE_ACCESSED);
518 }
519 
520 static inline pud_t pud_mkwrite(pud_t pud)
521 {
522 	return pud_set_flags(pud, _PAGE_RW);
523 }
524 
525 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
526 static inline int pte_soft_dirty(pte_t pte)
527 {
528 	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
529 }
530 
531 static inline int pmd_soft_dirty(pmd_t pmd)
532 {
533 	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
534 }
535 
536 static inline int pud_soft_dirty(pud_t pud)
537 {
538 	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
539 }
540 
541 static inline pte_t pte_mksoft_dirty(pte_t pte)
542 {
543 	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
544 }
545 
546 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
547 {
548 	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
549 }
550 
551 static inline pud_t pud_mksoft_dirty(pud_t pud)
552 {
553 	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
554 }
555 
556 static inline pte_t pte_clear_soft_dirty(pte_t pte)
557 {
558 	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
559 }
560 
561 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
562 {
563 	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
564 }
565 
566 static inline pud_t pud_clear_soft_dirty(pud_t pud)
567 {
568 	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
569 }
570 
571 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
572 
573 /*
574  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
575  * can use those bits for other purposes, so leave them be.
576  */
577 static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
578 {
579 	pgprotval_t protval = pgprot_val(pgprot);
580 
581 	if (protval & _PAGE_PRESENT)
582 		protval &= __supported_pte_mask;
583 
584 	return protval;
585 }
586 
587 static inline pgprotval_t check_pgprot(pgprot_t pgprot)
588 {
589 	pgprotval_t massaged_val = massage_pgprot(pgprot);
590 
591 	/* mmdebug.h can not be included here because of dependencies */
592 #ifdef CONFIG_DEBUG_VM
593 	WARN_ONCE(pgprot_val(pgprot) != massaged_val,
594 		  "attempted to set unsupported pgprot: %016llx "
595 		  "bits: %016llx supported: %016llx\n",
596 		  (u64)pgprot_val(pgprot),
597 		  (u64)pgprot_val(pgprot) ^ massaged_val,
598 		  (u64)__supported_pte_mask);
599 #endif
600 
601 	return massaged_val;
602 }
603 
604 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
605 {
606 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
607 	pfn ^= protnone_mask(pgprot_val(pgprot));
608 	pfn &= PTE_PFN_MASK;
609 	return __pte(pfn | check_pgprot(pgprot));
610 }
611 
612 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
613 {
614 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
615 	pfn ^= protnone_mask(pgprot_val(pgprot));
616 	pfn &= PHYSICAL_PMD_PAGE_MASK;
617 	return __pmd(pfn | check_pgprot(pgprot));
618 }
619 
620 static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
621 {
622 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
623 	pfn ^= protnone_mask(pgprot_val(pgprot));
624 	pfn &= PHYSICAL_PUD_PAGE_MASK;
625 	return __pud(pfn | check_pgprot(pgprot));
626 }
627 
628 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
629 {
630 	return pfn_pmd(pmd_pfn(pmd),
631 		      __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
632 }
633 
634 static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
635 
636 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
637 {
638 	pteval_t val = pte_val(pte), oldval = val;
639 
640 	/*
641 	 * Chop off the NX bit (if present), and add the NX portion of
642 	 * the newprot (if present):
643 	 */
644 	val &= _PAGE_CHG_MASK;
645 	val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
646 	val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
647 	return __pte(val);
648 }
649 
650 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
651 {
652 	pmdval_t val = pmd_val(pmd), oldval = val;
653 
654 	val &= _HPAGE_CHG_MASK;
655 	val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
656 	val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
657 	return __pmd(val);
658 }
659 
660 /*
661  * mprotect needs to preserve PAT and encryption bits when updating
662  * vm_page_prot
663  */
664 #define pgprot_modify pgprot_modify
665 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
666 {
667 	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
668 	pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
669 	return __pgprot(preservebits | addbits);
670 }
671 
672 #define pte_pgprot(x) __pgprot(pte_flags(x))
673 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
674 #define pud_pgprot(x) __pgprot(pud_flags(x))
675 #define p4d_pgprot(x) __pgprot(p4d_flags(x))
676 
677 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
678 
679 static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
680 {
681 	return canon_pgprot(prot);
682 }
683 
684 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
685 					 enum page_cache_mode pcm,
686 					 enum page_cache_mode new_pcm)
687 {
688 	/*
689 	 * PAT type is always WB for untracked ranges, so no need to check.
690 	 */
691 	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
692 		return 1;
693 
694 	/*
695 	 * Certain new memtypes are not allowed with certain
696 	 * requested memtype:
697 	 * - request is uncached, return cannot be write-back
698 	 * - request is write-combine, return cannot be write-back
699 	 * - request is write-through, return cannot be write-back
700 	 * - request is write-through, return cannot be write-combine
701 	 */
702 	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
703 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
704 	    (pcm == _PAGE_CACHE_MODE_WC &&
705 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
706 	    (pcm == _PAGE_CACHE_MODE_WT &&
707 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
708 	    (pcm == _PAGE_CACHE_MODE_WT &&
709 	     new_pcm == _PAGE_CACHE_MODE_WC)) {
710 		return 0;
711 	}
712 
713 	return 1;
714 }
715 
716 pmd_t *populate_extra_pmd(unsigned long vaddr);
717 pte_t *populate_extra_pte(unsigned long vaddr);
718 
719 #ifdef CONFIG_PAGE_TABLE_ISOLATION
720 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
721 
722 /*
723  * Take a PGD location (pgdp) and a pgd value that needs to be set there.
724  * Populates the user and returns the resulting PGD that must be set in
725  * the kernel copy of the page tables.
726  */
727 static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
728 {
729 	if (!static_cpu_has(X86_FEATURE_PTI))
730 		return pgd;
731 	return __pti_set_user_pgtbl(pgdp, pgd);
732 }
733 #else   /* CONFIG_PAGE_TABLE_ISOLATION */
734 static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
735 {
736 	return pgd;
737 }
738 #endif  /* CONFIG_PAGE_TABLE_ISOLATION */
739 
740 #endif	/* __ASSEMBLY__ */
741 
742 
743 #ifdef CONFIG_X86_32
744 # include <asm/pgtable_32.h>
745 #else
746 # include <asm/pgtable_64.h>
747 #endif
748 
749 #ifndef __ASSEMBLY__
750 #include <linux/mm_types.h>
751 #include <linux/mmdebug.h>
752 #include <linux/log2.h>
753 #include <asm/fixmap.h>
754 
755 static inline int pte_none(pte_t pte)
756 {
757 	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
758 }
759 
760 #define __HAVE_ARCH_PTE_SAME
761 static inline int pte_same(pte_t a, pte_t b)
762 {
763 	return a.pte == b.pte;
764 }
765 
766 static inline int pte_present(pte_t a)
767 {
768 	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
769 }
770 
771 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
772 static inline int pte_devmap(pte_t a)
773 {
774 	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
775 }
776 #endif
777 
778 #define pte_accessible pte_accessible
779 static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
780 {
781 	if (pte_flags(a) & _PAGE_PRESENT)
782 		return true;
783 
784 	if ((pte_flags(a) & _PAGE_PROTNONE) &&
785 			mm_tlb_flush_pending(mm))
786 		return true;
787 
788 	return false;
789 }
790 
791 static inline int pmd_present(pmd_t pmd)
792 {
793 	/*
794 	 * Checking for _PAGE_PSE is needed too because
795 	 * split_huge_page will temporarily clear the present bit (but
796 	 * the _PAGE_PSE flag will remain set at all times while the
797 	 * _PAGE_PRESENT bit is clear).
798 	 */
799 	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
800 }
801 
802 #ifdef CONFIG_NUMA_BALANCING
803 /*
804  * These work without NUMA balancing but the kernel does not care. See the
805  * comment in include/linux/pgtable.h
806  */
807 static inline int pte_protnone(pte_t pte)
808 {
809 	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
810 		== _PAGE_PROTNONE;
811 }
812 
813 static inline int pmd_protnone(pmd_t pmd)
814 {
815 	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
816 		== _PAGE_PROTNONE;
817 }
818 #endif /* CONFIG_NUMA_BALANCING */
819 
820 static inline int pmd_none(pmd_t pmd)
821 {
822 	/* Only check low word on 32-bit platforms, since it might be
823 	   out of sync with upper half. */
824 	unsigned long val = native_pmd_val(pmd);
825 	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
826 }
827 
828 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
829 {
830 	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
831 }
832 
833 /*
834  * Currently stuck as a macro due to indirect forward reference to
835  * linux/mmzone.h's __section_mem_map_addr() definition:
836  */
837 #define pmd_page(pmd)	pfn_to_page(pmd_pfn(pmd))
838 
839 /*
840  * Conversion functions: convert a page and protection to a page entry,
841  * and a page entry and page directory to the page they refer to.
842  *
843  * (Currently stuck as a macro because of indirect forward reference
844  * to linux/mm.h:page_to_nid())
845  */
846 #define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
847 
848 static inline int pmd_bad(pmd_t pmd)
849 {
850 	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
851 }
852 
853 static inline unsigned long pages_to_mb(unsigned long npg)
854 {
855 	return npg >> (20 - PAGE_SHIFT);
856 }
857 
858 #if CONFIG_PGTABLE_LEVELS > 2
859 static inline int pud_none(pud_t pud)
860 {
861 	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
862 }
863 
864 static inline int pud_present(pud_t pud)
865 {
866 	return pud_flags(pud) & _PAGE_PRESENT;
867 }
868 
869 static inline unsigned long pud_page_vaddr(pud_t pud)
870 {
871 	return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
872 }
873 
874 /*
875  * Currently stuck as a macro due to indirect forward reference to
876  * linux/mmzone.h's __section_mem_map_addr() definition:
877  */
878 #define pud_page(pud)	pfn_to_page(pud_pfn(pud))
879 
880 #define pud_leaf	pud_large
881 static inline int pud_large(pud_t pud)
882 {
883 	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
884 		(_PAGE_PSE | _PAGE_PRESENT);
885 }
886 
887 static inline int pud_bad(pud_t pud)
888 {
889 	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
890 }
891 #else
892 #define pud_leaf	pud_large
893 static inline int pud_large(pud_t pud)
894 {
895 	return 0;
896 }
897 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
898 
899 #if CONFIG_PGTABLE_LEVELS > 3
900 static inline int p4d_none(p4d_t p4d)
901 {
902 	return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
903 }
904 
905 static inline int p4d_present(p4d_t p4d)
906 {
907 	return p4d_flags(p4d) & _PAGE_PRESENT;
908 }
909 
910 static inline unsigned long p4d_page_vaddr(p4d_t p4d)
911 {
912 	return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
913 }
914 
915 /*
916  * Currently stuck as a macro due to indirect forward reference to
917  * linux/mmzone.h's __section_mem_map_addr() definition:
918  */
919 #define p4d_page(p4d)	pfn_to_page(p4d_pfn(p4d))
920 
921 static inline int p4d_bad(p4d_t p4d)
922 {
923 	unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
924 
925 	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
926 		ignore_flags |= _PAGE_NX;
927 
928 	return (p4d_flags(p4d) & ~ignore_flags) != 0;
929 }
930 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
931 
932 static inline unsigned long p4d_index(unsigned long address)
933 {
934 	return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
935 }
936 
937 #if CONFIG_PGTABLE_LEVELS > 4
938 static inline int pgd_present(pgd_t pgd)
939 {
940 	if (!pgtable_l5_enabled())
941 		return 1;
942 	return pgd_flags(pgd) & _PAGE_PRESENT;
943 }
944 
945 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
946 {
947 	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
948 }
949 
950 /*
951  * Currently stuck as a macro due to indirect forward reference to
952  * linux/mmzone.h's __section_mem_map_addr() definition:
953  */
954 #define pgd_page(pgd)	pfn_to_page(pgd_pfn(pgd))
955 
956 /* to find an entry in a page-table-directory. */
957 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
958 {
959 	if (!pgtable_l5_enabled())
960 		return (p4d_t *)pgd;
961 	return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
962 }
963 
964 static inline int pgd_bad(pgd_t pgd)
965 {
966 	unsigned long ignore_flags = _PAGE_USER;
967 
968 	if (!pgtable_l5_enabled())
969 		return 0;
970 
971 	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
972 		ignore_flags |= _PAGE_NX;
973 
974 	return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
975 }
976 
977 static inline int pgd_none(pgd_t pgd)
978 {
979 	if (!pgtable_l5_enabled())
980 		return 0;
981 	/*
982 	 * There is no need to do a workaround for the KNL stray
983 	 * A/D bit erratum here.  PGDs only point to page tables
984 	 * except on 32-bit non-PAE which is not supported on
985 	 * KNL.
986 	 */
987 	return !native_pgd_val(pgd);
988 }
989 #endif	/* CONFIG_PGTABLE_LEVELS > 4 */
990 
991 #endif	/* __ASSEMBLY__ */
992 
993 #define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
994 #define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
995 
996 #ifndef __ASSEMBLY__
997 
998 extern int direct_gbpages;
999 void init_mem_mapping(void);
1000 void early_alloc_pgt_buf(void);
1001 extern void memblock_find_dma_reserve(void);
1002 
1003 
1004 #ifdef CONFIG_X86_64
1005 extern pgd_t trampoline_pgd_entry;
1006 
1007 void __init poking_init(void);
1008 
1009 unsigned long init_memory_mapping(unsigned long start,
1010 				  unsigned long end, pgprot_t prot);
1011 #endif
1012 
1013 /* local pte updates need not use xchg for locking */
1014 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
1015 {
1016 	pte_t res = *ptep;
1017 
1018 	/* Pure native function needs no input for mm, addr */
1019 	native_pte_clear(NULL, 0, ptep);
1020 	return res;
1021 }
1022 
1023 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
1024 {
1025 	pmd_t res = *pmdp;
1026 
1027 	native_pmd_clear(pmdp);
1028 	return res;
1029 }
1030 
1031 static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
1032 {
1033 	pud_t res = *pudp;
1034 
1035 	native_pud_clear(pudp);
1036 	return res;
1037 }
1038 
1039 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
1040 				     pte_t *ptep , pte_t pte)
1041 {
1042 	native_set_pte(ptep, pte);
1043 }
1044 
1045 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1046 			      pmd_t *pmdp, pmd_t pmd)
1047 {
1048 	set_pmd(pmdp, pmd);
1049 }
1050 
1051 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1052 			      pud_t *pudp, pud_t pud)
1053 {
1054 	native_set_pud(pudp, pud);
1055 }
1056 
1057 /*
1058  * We only update the dirty/accessed state if we set
1059  * the dirty bit by hand in the kernel, since the hardware
1060  * will do the accessed bit for us, and we don't want to
1061  * race with other CPU's that might be updating the dirty
1062  * bit at the same time.
1063  */
1064 struct vm_area_struct;
1065 
1066 #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1067 extern int ptep_set_access_flags(struct vm_area_struct *vma,
1068 				 unsigned long address, pte_t *ptep,
1069 				 pte_t entry, int dirty);
1070 
1071 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1072 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1073 				     unsigned long addr, pte_t *ptep);
1074 
1075 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1076 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1077 				  unsigned long address, pte_t *ptep);
1078 
1079 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1080 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1081 				       pte_t *ptep)
1082 {
1083 	pte_t pte = native_ptep_get_and_clear(ptep);
1084 	return pte;
1085 }
1086 
1087 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1088 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1089 					    unsigned long addr, pte_t *ptep,
1090 					    int full)
1091 {
1092 	pte_t pte;
1093 	if (full) {
1094 		/*
1095 		 * Full address destruction in progress; paravirt does not
1096 		 * care about updates and native needs no locking
1097 		 */
1098 		pte = native_local_ptep_get_and_clear(ptep);
1099 	} else {
1100 		pte = ptep_get_and_clear(mm, addr, ptep);
1101 	}
1102 	return pte;
1103 }
1104 
1105 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1106 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1107 				      unsigned long addr, pte_t *ptep)
1108 {
1109 	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
1110 }
1111 
1112 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1113 
1114 #define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
1115 
1116 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1117 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1118 				 unsigned long address, pmd_t *pmdp,
1119 				 pmd_t entry, int dirty);
1120 extern int pudp_set_access_flags(struct vm_area_struct *vma,
1121 				 unsigned long address, pud_t *pudp,
1122 				 pud_t entry, int dirty);
1123 
1124 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1125 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1126 				     unsigned long addr, pmd_t *pmdp);
1127 extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1128 				     unsigned long addr, pud_t *pudp);
1129 
1130 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1131 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1132 				  unsigned long address, pmd_t *pmdp);
1133 
1134 
1135 #define pmd_write pmd_write
1136 static inline int pmd_write(pmd_t pmd)
1137 {
1138 	return pmd_flags(pmd) & _PAGE_RW;
1139 }
1140 
1141 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1142 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
1143 				       pmd_t *pmdp)
1144 {
1145 	return native_pmdp_get_and_clear(pmdp);
1146 }
1147 
1148 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1149 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1150 					unsigned long addr, pud_t *pudp)
1151 {
1152 	return native_pudp_get_and_clear(pudp);
1153 }
1154 
1155 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1156 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1157 				      unsigned long addr, pmd_t *pmdp)
1158 {
1159 	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
1160 }
1161 
1162 #define pud_write pud_write
1163 static inline int pud_write(pud_t pud)
1164 {
1165 	return pud_flags(pud) & _PAGE_RW;
1166 }
1167 
1168 #ifndef pmdp_establish
1169 #define pmdp_establish pmdp_establish
1170 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1171 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
1172 {
1173 	if (IS_ENABLED(CONFIG_SMP)) {
1174 		return xchg(pmdp, pmd);
1175 	} else {
1176 		pmd_t old = *pmdp;
1177 		WRITE_ONCE(*pmdp, pmd);
1178 		return old;
1179 	}
1180 }
1181 #endif
1182 /*
1183  * Page table pages are page-aligned.  The lower half of the top
1184  * level is used for userspace and the top half for the kernel.
1185  *
1186  * Returns true for parts of the PGD that map userspace and
1187  * false for the parts that map the kernel.
1188  */
1189 static inline bool pgdp_maps_userspace(void *__ptr)
1190 {
1191 	unsigned long ptr = (unsigned long)__ptr;
1192 
1193 	return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
1194 }
1195 
1196 #define pgd_leaf	pgd_large
1197 static inline int pgd_large(pgd_t pgd) { return 0; }
1198 
1199 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1200 /*
1201  * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
1202  * (8k-aligned and 8k in size).  The kernel one is at the beginning 4k and
1203  * the user one is in the last 4k.  To switch between them, you
1204  * just need to flip the 12th bit in their addresses.
1205  */
1206 #define PTI_PGTABLE_SWITCH_BIT	PAGE_SHIFT
1207 
1208 /*
1209  * This generates better code than the inline assembly in
1210  * __set_bit().
1211  */
1212 static inline void *ptr_set_bit(void *ptr, int bit)
1213 {
1214 	unsigned long __ptr = (unsigned long)ptr;
1215 
1216 	__ptr |= BIT(bit);
1217 	return (void *)__ptr;
1218 }
1219 static inline void *ptr_clear_bit(void *ptr, int bit)
1220 {
1221 	unsigned long __ptr = (unsigned long)ptr;
1222 
1223 	__ptr &= ~BIT(bit);
1224 	return (void *)__ptr;
1225 }
1226 
1227 static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
1228 {
1229 	return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1230 }
1231 
1232 static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
1233 {
1234 	return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1235 }
1236 
1237 static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
1238 {
1239 	return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1240 }
1241 
1242 static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
1243 {
1244 	return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1245 }
1246 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
1247 
1248 /*
1249  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1250  *
1251  *  dst - pointer to pgd range anwhere on a pgd page
1252  *  src - ""
1253  *  count - the number of pgds to copy.
1254  *
1255  * dst and src can be on the same page, but the range must not overlap,
1256  * and must not cross a page boundary.
1257  */
1258 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1259 {
1260 	memcpy(dst, src, count * sizeof(pgd_t));
1261 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1262 	if (!static_cpu_has(X86_FEATURE_PTI))
1263 		return;
1264 	/* Clone the user space pgd as well */
1265 	memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1266 	       count * sizeof(pgd_t));
1267 #endif
1268 }
1269 
1270 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
1271 static inline int page_level_shift(enum pg_level level)
1272 {
1273 	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1274 }
1275 static inline unsigned long page_level_size(enum pg_level level)
1276 {
1277 	return 1UL << page_level_shift(level);
1278 }
1279 static inline unsigned long page_level_mask(enum pg_level level)
1280 {
1281 	return ~(page_level_size(level) - 1);
1282 }
1283 
1284 /*
1285  * The x86 doesn't have any external MMU info: the kernel page
1286  * tables contain all the necessary information.
1287  */
1288 static inline void update_mmu_cache(struct vm_area_struct *vma,
1289 		unsigned long addr, pte_t *ptep)
1290 {
1291 }
1292 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1293 		unsigned long addr, pmd_t *pmd)
1294 {
1295 }
1296 static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1297 		unsigned long addr, pud_t *pud)
1298 {
1299 }
1300 
1301 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1302 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1303 {
1304 	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1305 }
1306 
1307 static inline int pte_swp_soft_dirty(pte_t pte)
1308 {
1309 	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1310 }
1311 
1312 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1313 {
1314 	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1315 }
1316 
1317 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1318 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1319 {
1320 	return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1321 }
1322 
1323 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1324 {
1325 	return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1326 }
1327 
1328 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1329 {
1330 	return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1331 }
1332 #endif
1333 #endif
1334 
1335 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1336 static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
1337 {
1338 	return pte_set_flags(pte, _PAGE_SWP_UFFD_WP);
1339 }
1340 
1341 static inline int pte_swp_uffd_wp(pte_t pte)
1342 {
1343 	return pte_flags(pte) & _PAGE_SWP_UFFD_WP;
1344 }
1345 
1346 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
1347 {
1348 	return pte_clear_flags(pte, _PAGE_SWP_UFFD_WP);
1349 }
1350 
1351 static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
1352 {
1353 	return pmd_set_flags(pmd, _PAGE_SWP_UFFD_WP);
1354 }
1355 
1356 static inline int pmd_swp_uffd_wp(pmd_t pmd)
1357 {
1358 	return pmd_flags(pmd) & _PAGE_SWP_UFFD_WP;
1359 }
1360 
1361 static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
1362 {
1363 	return pmd_clear_flags(pmd, _PAGE_SWP_UFFD_WP);
1364 }
1365 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
1366 
1367 #define PKRU_AD_BIT 0x1
1368 #define PKRU_WD_BIT 0x2
1369 #define PKRU_BITS_PER_PKEY 2
1370 
1371 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1372 extern u32 init_pkru_value;
1373 #else
1374 #define init_pkru_value	0
1375 #endif
1376 
1377 static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
1378 {
1379 	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1380 	return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
1381 }
1382 
1383 static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
1384 {
1385 	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1386 	/*
1387 	 * Access-disable disables writes too so we need to check
1388 	 * both bits here.
1389 	 */
1390 	return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
1391 }
1392 
1393 static inline u16 pte_flags_pkey(unsigned long pte_flags)
1394 {
1395 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1396 	/* ifdef to avoid doing 59-bit shift on 32-bit values */
1397 	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1398 #else
1399 	return 0;
1400 #endif
1401 }
1402 
1403 static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1404 {
1405 	u32 pkru = read_pkru();
1406 
1407 	if (!__pkru_allows_read(pkru, pkey))
1408 		return false;
1409 	if (write && !__pkru_allows_write(pkru, pkey))
1410 		return false;
1411 
1412 	return true;
1413 }
1414 
1415 /*
1416  * 'pteval' can come from a PTE, PMD or PUD.  We only check
1417  * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1418  * same value on all 3 types.
1419  */
1420 static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1421 {
1422 	unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1423 
1424 	if (write)
1425 		need_pte_bits |= _PAGE_RW;
1426 
1427 	if ((pteval & need_pte_bits) != need_pte_bits)
1428 		return 0;
1429 
1430 	return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1431 }
1432 
1433 #define pte_access_permitted pte_access_permitted
1434 static inline bool pte_access_permitted(pte_t pte, bool write)
1435 {
1436 	return __pte_access_permitted(pte_val(pte), write);
1437 }
1438 
1439 #define pmd_access_permitted pmd_access_permitted
1440 static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1441 {
1442 	return __pte_access_permitted(pmd_val(pmd), write);
1443 }
1444 
1445 #define pud_access_permitted pud_access_permitted
1446 static inline bool pud_access_permitted(pud_t pud, bool write)
1447 {
1448 	return __pte_access_permitted(pud_val(pud), write);
1449 }
1450 
1451 #define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1452 extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
1453 
1454 static inline bool arch_has_pfn_modify_check(void)
1455 {
1456 	return boot_cpu_has_bug(X86_BUG_L1TF);
1457 }
1458 
1459 #define arch_faults_on_old_pte arch_faults_on_old_pte
1460 static inline bool arch_faults_on_old_pte(void)
1461 {
1462 	return false;
1463 }
1464 
1465 #endif	/* __ASSEMBLY__ */
1466 
1467 #endif /* _ASM_X86_PGTABLE_H */
1468