xref: /linux/arch/x86/include/asm/pgtable_64_types.h (revision b83deaa741558babf4b8d51d34f6637ccfff1b26)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_64_DEFS_H
3 #define _ASM_X86_PGTABLE_64_DEFS_H
4 
5 #include <asm/sparsemem.h>
6 
7 #ifndef __ASSEMBLY__
8 #include <linux/types.h>
9 #include <asm/kaslr.h>
10 
11 /*
12  * These are used to make use of C type-checking..
13  */
14 typedef unsigned long	pteval_t;
15 typedef unsigned long	pmdval_t;
16 typedef unsigned long	pudval_t;
17 typedef unsigned long	p4dval_t;
18 typedef unsigned long	pgdval_t;
19 typedef unsigned long	pgprotval_t;
20 
21 typedef struct { pteval_t pte; } pte_t;
22 
23 #ifdef CONFIG_X86_5LEVEL
24 extern unsigned int __pgtable_l5_enabled;
25 
26 #ifdef USE_EARLY_PGTABLE_L5
27 /*
28  * cpu_feature_enabled() is not available in early boot code.
29  * Use variable instead.
30  */
31 static inline bool pgtable_l5_enabled(void)
32 {
33 	return __pgtable_l5_enabled;
34 }
35 #else
36 #define pgtable_l5_enabled() cpu_feature_enabled(X86_FEATURE_LA57)
37 #endif /* USE_EARLY_PGTABLE_L5 */
38 
39 #else
40 #define pgtable_l5_enabled() 0
41 #endif /* CONFIG_X86_5LEVEL */
42 
43 extern unsigned int pgdir_shift;
44 extern unsigned int ptrs_per_p4d;
45 
46 #endif	/* !__ASSEMBLY__ */
47 
48 #define SHARED_KERNEL_PMD	0
49 
50 #ifdef CONFIG_X86_5LEVEL
51 
52 /*
53  * PGDIR_SHIFT determines what a top-level page table entry can map
54  */
55 #define PGDIR_SHIFT	pgdir_shift
56 #define PTRS_PER_PGD	512
57 
58 /*
59  * 4th level page in 5-level paging case
60  */
61 #define P4D_SHIFT		39
62 #define MAX_PTRS_PER_P4D	512
63 #define PTRS_PER_P4D		ptrs_per_p4d
64 #define P4D_SIZE		(_AC(1, UL) << P4D_SHIFT)
65 #define P4D_MASK		(~(P4D_SIZE - 1))
66 
67 #define MAX_POSSIBLE_PHYSMEM_BITS	52
68 
69 #else /* CONFIG_X86_5LEVEL */
70 
71 /*
72  * PGDIR_SHIFT determines what a top-level page table entry can map
73  */
74 #define PGDIR_SHIFT		39
75 #define PTRS_PER_PGD		512
76 #define MAX_PTRS_PER_P4D	1
77 
78 #endif /* CONFIG_X86_5LEVEL */
79 
80 /*
81  * 3rd level page
82  */
83 #define PUD_SHIFT	30
84 #define PTRS_PER_PUD	512
85 
86 /*
87  * PMD_SHIFT determines the size of the area a middle-level
88  * page table can map
89  */
90 #define PMD_SHIFT	21
91 #define PTRS_PER_PMD	512
92 
93 /*
94  * entries per page directory level
95  */
96 #define PTRS_PER_PTE	512
97 
98 #define PMD_SIZE	(_AC(1, UL) << PMD_SHIFT)
99 #define PMD_MASK	(~(PMD_SIZE - 1))
100 #define PUD_SIZE	(_AC(1, UL) << PUD_SHIFT)
101 #define PUD_MASK	(~(PUD_SIZE - 1))
102 #define PGDIR_SIZE	(_AC(1, UL) << PGDIR_SHIFT)
103 #define PGDIR_MASK	(~(PGDIR_SIZE - 1))
104 
105 /*
106  * See Documentation/x86/x86_64/mm.rst for a description of the memory map.
107  *
108  * Be very careful vs. KASLR when changing anything here. The KASLR address
109  * range must not overlap with anything except the KASAN shadow area, which
110  * is correct as KASAN disables KASLR.
111  */
112 #define MAXMEM			(1UL << MAX_PHYSMEM_BITS)
113 
114 #define GUARD_HOLE_PGD_ENTRY	-256UL
115 #define GUARD_HOLE_SIZE		(16UL << PGDIR_SHIFT)
116 #define GUARD_HOLE_BASE_ADDR	(GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
117 #define GUARD_HOLE_END_ADDR	(GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
118 
119 #define LDT_PGD_ENTRY		-240UL
120 #define LDT_BASE_ADDR		(LDT_PGD_ENTRY << PGDIR_SHIFT)
121 #define LDT_END_ADDR		(LDT_BASE_ADDR + PGDIR_SIZE)
122 
123 #define __VMALLOC_BASE_L4	0xffffc90000000000UL
124 #define __VMALLOC_BASE_L5 	0xffa0000000000000UL
125 
126 #define VMALLOC_SIZE_TB_L4	32UL
127 #define VMALLOC_SIZE_TB_L5	12800UL
128 
129 #define __VMEMMAP_BASE_L4	0xffffea0000000000UL
130 #define __VMEMMAP_BASE_L5	0xffd4000000000000UL
131 
132 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
133 # define VMALLOC_START		vmalloc_base
134 # define VMALLOC_SIZE_TB	(pgtable_l5_enabled() ? VMALLOC_SIZE_TB_L5 : VMALLOC_SIZE_TB_L4)
135 # define VMEMMAP_START		vmemmap_base
136 #else
137 # define VMALLOC_START		__VMALLOC_BASE_L4
138 # define VMALLOC_SIZE_TB	VMALLOC_SIZE_TB_L4
139 # define VMEMMAP_START		__VMEMMAP_BASE_L4
140 #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */
141 
142 #define VMALLOC_END		(VMALLOC_START + (VMALLOC_SIZE_TB << 40) - 1)
143 
144 #define MODULES_VADDR		(__START_KERNEL_map + KERNEL_IMAGE_SIZE)
145 /* The module sections ends with the start of the fixmap */
146 #ifndef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
147 # define MODULES_END		_AC(0xffffffffff000000, UL)
148 #else
149 # define MODULES_END		_AC(0xfffffffffe000000, UL)
150 #endif
151 #define MODULES_LEN		(MODULES_END - MODULES_VADDR)
152 
153 #define ESPFIX_PGD_ENTRY	_AC(-2, UL)
154 #define ESPFIX_BASE_ADDR	(ESPFIX_PGD_ENTRY << P4D_SHIFT)
155 
156 #define CPU_ENTRY_AREA_PGD	_AC(-4, UL)
157 #define CPU_ENTRY_AREA_BASE	(CPU_ENTRY_AREA_PGD << P4D_SHIFT)
158 
159 #define EFI_VA_START		( -4 * (_AC(1, UL) << 30))
160 #define EFI_VA_END		(-68 * (_AC(1, UL) << 30))
161 
162 #define EARLY_DYNAMIC_PAGE_TABLES	64
163 
164 #define PGD_KERNEL_START	((PAGE_SIZE / 2) / sizeof(pgd_t))
165 
166 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
167