xref: /linux/arch/s390/include/asm/hugetlb.h (revision cdb138080b78146d1cdadba9f5dadbeb97445b91)
1 /*
2  *  IBM System z Huge TLB Page Support for Kernel.
3  *
4  *    Copyright IBM Corp. 2008
5  *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
6  */
7 
8 #ifndef _ASM_S390_HUGETLB_H
9 #define _ASM_S390_HUGETLB_H
10 
11 #include <asm/page.h>
12 #include <asm/pgtable.h>
13 
14 
15 #define is_hugepage_only_range(mm, addr, len)	0
16 #define hugetlb_free_pgd_range			free_pgd_range
17 
18 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
19 		     pte_t *ptep, pte_t pte);
20 
21 /*
22  * If the arch doesn't supply something else, assume that hugepage
23  * size aligned regions are ok without further preparation.
24  */
25 static inline int prepare_hugepage_range(struct file *file,
26 			unsigned long addr, unsigned long len)
27 {
28 	if (len & ~HPAGE_MASK)
29 		return -EINVAL;
30 	if (addr & ~HPAGE_MASK)
31 		return -EINVAL;
32 	return 0;
33 }
34 
35 #define hugetlb_prefault_arch_hook(mm)		do { } while (0)
36 
37 int arch_prepare_hugepage(struct page *page);
38 void arch_release_hugepage(struct page *page);
39 
40 static inline pte_t pte_mkhuge(pte_t pte)
41 {
42 	/*
43 	 * PROT_NONE needs to be remapped from the pte type to the ste type.
44 	 * The HW invalid bit is also different for pte and ste. The pte
45 	 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
46 	 * bit, so we don't have to clear it.
47 	 */
48 	if (pte_val(pte) & _PAGE_INVALID) {
49 		if (pte_val(pte) & _PAGE_SWT)
50 			pte_val(pte) |= _HPAGE_TYPE_NONE;
51 		pte_val(pte) |= _SEGMENT_ENTRY_INV;
52 	}
53 	/*
54 	 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
55 	 * table entry.
56 	 */
57 	pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
58 	/*
59 	 * Also set the change-override bit because we don't need dirty bit
60 	 * tracking for hugetlbfs pages.
61 	 */
62 	pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
63 	return pte;
64 }
65 
66 static inline pte_t huge_pte_wrprotect(pte_t pte)
67 {
68 	pte_val(pte) |= _PAGE_RO;
69 	return pte;
70 }
71 
72 static inline int huge_pte_none(pte_t pte)
73 {
74 	return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
75 		!(pte_val(pte) & _SEGMENT_ENTRY_RO);
76 }
77 
78 static inline pte_t huge_ptep_get(pte_t *ptep)
79 {
80 	pte_t pte = *ptep;
81 	unsigned long mask;
82 
83 	if (!MACHINE_HAS_HPAGE) {
84 		ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
85 		if (ptep) {
86 			mask = pte_val(pte) &
87 				(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
88 			pte = pte_mkhuge(*ptep);
89 			pte_val(pte) |= mask;
90 		}
91 	}
92 	return pte;
93 }
94 
95 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
96 					    unsigned long addr, pte_t *ptep)
97 {
98 	pte_t pte = huge_ptep_get(ptep);
99 
100 	mm->context.flush_mm = 1;
101 	pmd_clear((pmd_t *) ptep);
102 	return pte;
103 }
104 
105 static inline void __pmd_csp(pmd_t *pmdp)
106 {
107 	register unsigned long reg2 asm("2") = pmd_val(*pmdp);
108 	register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
109 					       _SEGMENT_ENTRY_INV;
110 	register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
111 
112 	asm volatile(
113 		"	csp %1,%3"
114 		: "=m" (*pmdp)
115 		: "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
116 	pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
117 }
118 
119 static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
120 {
121 	unsigned long sto = (unsigned long) pmdp -
122 				pmd_index(address) * sizeof(pmd_t);
123 
124 	if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
125 		asm volatile(
126 			"	.insn	rrf,0xb98e0000,%2,%3,0,0"
127 			: "=m" (*pmdp)
128 			: "m" (*pmdp), "a" (sto),
129 			  "a" ((address & HPAGE_MASK))
130 		);
131 	}
132 	pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
133 }
134 
135 static inline void huge_ptep_invalidate(struct mm_struct *mm,
136 					unsigned long address, pte_t *ptep)
137 {
138 	pmd_t *pmdp = (pmd_t *) ptep;
139 
140 	if (!MACHINE_HAS_IDTE) {
141 		__pmd_csp(pmdp);
142 		if (mm->context.noexec) {
143 			pmdp = get_shadow_table(pmdp);
144 			__pmd_csp(pmdp);
145 		}
146 		return;
147 	}
148 
149 	__pmd_idte(address, pmdp);
150 	if (mm->context.noexec) {
151 		pmdp = get_shadow_table(pmdp);
152 		__pmd_idte(address, pmdp);
153 	}
154 	return;
155 }
156 
157 #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
158 ({									    \
159 	int __changed = !pte_same(huge_ptep_get(__ptep), __entry);	    \
160 	if (__changed) {						    \
161 		huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep);	    \
162 		set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry);   \
163 	}								    \
164 	__changed;							    \
165 })
166 
167 #define huge_ptep_set_wrprotect(__mm, __addr, __ptep)			\
168 ({									\
169 	pte_t __pte = huge_ptep_get(__ptep);				\
170 	if (pte_write(__pte)) {						\
171 		(__mm)->context.flush_mm = 1;				\
172 		if (atomic_read(&(__mm)->context.attach_count) > 1 ||	\
173 		    (__mm) != current->active_mm)			\
174 			huge_ptep_invalidate(__mm, __addr, __ptep);	\
175 		set_huge_pte_at(__mm, __addr, __ptep,			\
176 				huge_pte_wrprotect(__pte));		\
177 	}								\
178 })
179 
180 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
181 					 unsigned long address, pte_t *ptep)
182 {
183 	huge_ptep_invalidate(vma->vm_mm, address, ptep);
184 }
185 
186 #endif /* _ASM_S390_HUGETLB_H */
187