xref: /linux/arch/arm64/include/asm/tlbflush.h (revision ab520be8cd5d56867fc95cfbc34b90880faf1f9d)
1 /*
2  * Based on arch/arm/include/asm/tlbflush.h
3  *
4  * Copyright (C) 1999-2003 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASM_TLBFLUSH_H
20 #define __ASM_TLBFLUSH_H
21 
22 #ifndef __ASSEMBLY__
23 
24 #include <linux/sched.h>
25 #include <asm/cputype.h>
26 
27 /*
28  * Raw TLBI operations.
29  *
30  * Where necessary, use the __tlbi() macro to avoid asm()
31  * boilerplate. Drivers and most kernel code should use the TLB
32  * management routines in preference to the macro below.
33  *
34  * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
35  * on whether a particular TLBI operation takes an argument or
36  * not. The macros handles invoking the asm with or without the
37  * register argument as appropriate.
38  */
39 #define __TLBI_0(op, arg)		asm ("tlbi " #op)
40 #define __TLBI_1(op, arg)		asm ("tlbi " #op ", %0" : : "r" (arg))
41 #define __TLBI_N(op, arg, n, ...)	__TLBI_##n(op, arg)
42 
43 #define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)
44 
45 /*
46  *	TLB Management
47  *	==============
48  *
49  *	The TLB specific code is expected to perform whatever tests it needs
50  *	to determine if it should invalidate the TLB for each call.  Start
51  *	addresses are inclusive and end addresses are exclusive; it is safe to
52  *	round these addresses down.
53  *
54  *	flush_tlb_all()
55  *
56  *		Invalidate the entire TLB.
57  *
58  *	flush_tlb_mm(mm)
59  *
60  *		Invalidate all TLB entries in a particular address space.
61  *		- mm	- mm_struct describing address space
62  *
63  *	flush_tlb_range(mm,start,end)
64  *
65  *		Invalidate a range of TLB entries in the specified address
66  *		space.
67  *		- mm	- mm_struct describing address space
68  *		- start - start address (may not be aligned)
69  *		- end	- end address (exclusive, may not be aligned)
70  *
71  *	flush_tlb_page(vaddr,vma)
72  *
73  *		Invalidate the specified page in the specified address range.
74  *		- vaddr - virtual address (may not be aligned)
75  *		- vma	- vma_struct describing address range
76  *
77  *	flush_kern_tlb_page(kaddr)
78  *
79  *		Invalidate the TLB entry for the specified page.  The address
80  *		will be in the kernels virtual memory space.  Current uses
81  *		only require the D-TLB to be invalidated.
82  *		- kaddr - Kernel virtual memory address
83  */
84 static inline void local_flush_tlb_all(void)
85 {
86 	dsb(nshst);
87 	__tlbi(vmalle1);
88 	dsb(nsh);
89 	isb();
90 }
91 
92 static inline void flush_tlb_all(void)
93 {
94 	dsb(ishst);
95 	__tlbi(vmalle1is);
96 	dsb(ish);
97 	isb();
98 }
99 
100 static inline void flush_tlb_mm(struct mm_struct *mm)
101 {
102 	unsigned long asid = ASID(mm) << 48;
103 
104 	dsb(ishst);
105 	__tlbi(aside1is, asid);
106 	dsb(ish);
107 }
108 
109 static inline void flush_tlb_page(struct vm_area_struct *vma,
110 				  unsigned long uaddr)
111 {
112 	unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
113 
114 	dsb(ishst);
115 	__tlbi(vale1is, addr);
116 	dsb(ish);
117 }
118 
119 /*
120  * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
121  * necessarily a performance improvement.
122  */
123 #define MAX_TLB_RANGE	(1024UL << PAGE_SHIFT)
124 
125 static inline void __flush_tlb_range(struct vm_area_struct *vma,
126 				     unsigned long start, unsigned long end,
127 				     bool last_level)
128 {
129 	unsigned long asid = ASID(vma->vm_mm) << 48;
130 	unsigned long addr;
131 
132 	if ((end - start) > MAX_TLB_RANGE) {
133 		flush_tlb_mm(vma->vm_mm);
134 		return;
135 	}
136 
137 	start = asid | (start >> 12);
138 	end = asid | (end >> 12);
139 
140 	dsb(ishst);
141 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
142 		if (last_level)
143 			__tlbi(vale1is, addr);
144 		else
145 			__tlbi(vae1is, addr);
146 	}
147 	dsb(ish);
148 }
149 
150 static inline void flush_tlb_range(struct vm_area_struct *vma,
151 				   unsigned long start, unsigned long end)
152 {
153 	__flush_tlb_range(vma, start, end, false);
154 }
155 
156 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
157 {
158 	unsigned long addr;
159 
160 	if ((end - start) > MAX_TLB_RANGE) {
161 		flush_tlb_all();
162 		return;
163 	}
164 
165 	start >>= 12;
166 	end >>= 12;
167 
168 	dsb(ishst);
169 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
170 		__tlbi(vaae1is, addr);
171 	dsb(ish);
172 	isb();
173 }
174 
175 /*
176  * Used to invalidate the TLB (walk caches) corresponding to intermediate page
177  * table levels (pgd/pud/pmd).
178  */
179 static inline void __flush_tlb_pgtable(struct mm_struct *mm,
180 				       unsigned long uaddr)
181 {
182 	unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
183 
184 	__tlbi(vae1is, addr);
185 	dsb(ish);
186 }
187 
188 #endif
189 
190 #endif
191