xref: /linux/arch/powerpc/include/asm/pte-walk.h (revision 307797159ac25fe5a2048bf5c6a5718298edca57)
1 #ifndef _ASM_POWERPC_PTE_WALK_H
2 #define _ASM_POWERPC_PTE_WALK_H
3 
4 #include <linux/sched.h>
5 
6 /* Don't use this directly */
7 extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
8 			       bool *is_thp, unsigned *hshift);
9 
10 static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
11 				    bool *is_thp, unsigned *hshift)
12 {
13 	VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
14 	return __find_linux_pte(pgdir, ea, is_thp, hshift);
15 }
16 
17 static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
18 {
19 	pgd_t *pgdir = init_mm.pgd;
20 	return __find_linux_pte(pgdir, ea, NULL, hshift);
21 }
22 /*
23  * This is what we should always use. Any other lockless page table lookup needs
24  * careful audit against THP split.
25  */
26 static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
27 					 bool *is_thp, unsigned *hshift)
28 {
29 	VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
30 	VM_WARN(pgdir != current->mm->pgd,
31 		"%s lock less page table lookup called on wrong mm\n", __func__);
32 	return __find_linux_pte(pgdir, ea, is_thp, hshift);
33 }
34 
35 #endif /* _ASM_POWERPC_PTE_WALK_H */
36