xref: /linux/arch/riscv/mm/tlbflush.c (revision 42874e4eb35bdfc54f8514685e50434098ba4f6c)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/sched.h>
6 #include <linux/hugetlb.h>
7 #include <asm/sbi.h>
8 #include <asm/mmu_context.h>
9 
10 static inline void local_flush_tlb_all_asid(unsigned long asid)
11 {
12 	if (asid != FLUSH_TLB_NO_ASID)
13 		__asm__ __volatile__ ("sfence.vma x0, %0"
14 				:
15 				: "r" (asid)
16 				: "memory");
17 	else
18 		local_flush_tlb_all();
19 }
20 
21 static inline void local_flush_tlb_page_asid(unsigned long addr,
22 		unsigned long asid)
23 {
24 	if (asid != FLUSH_TLB_NO_ASID)
25 		__asm__ __volatile__ ("sfence.vma %0, %1"
26 				:
27 				: "r" (addr), "r" (asid)
28 				: "memory");
29 	else
30 		local_flush_tlb_page(addr);
31 }
32 
33 /*
34  * Flush entire TLB if number of entries to be flushed is greater
35  * than the threshold below.
36  */
37 static unsigned long tlb_flush_all_threshold __read_mostly = 64;
38 
39 static void local_flush_tlb_range_threshold_asid(unsigned long start,
40 						 unsigned long size,
41 						 unsigned long stride,
42 						 unsigned long asid)
43 {
44 	unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
45 	int i;
46 
47 	if (nr_ptes_in_range > tlb_flush_all_threshold) {
48 		local_flush_tlb_all_asid(asid);
49 		return;
50 	}
51 
52 	for (i = 0; i < nr_ptes_in_range; ++i) {
53 		local_flush_tlb_page_asid(start, asid);
54 		start += stride;
55 	}
56 }
57 
58 static inline void local_flush_tlb_range_asid(unsigned long start,
59 		unsigned long size, unsigned long stride, unsigned long asid)
60 {
61 	if (size <= stride)
62 		local_flush_tlb_page_asid(start, asid);
63 	else if (size == FLUSH_TLB_MAX_SIZE)
64 		local_flush_tlb_all_asid(asid);
65 	else
66 		local_flush_tlb_range_threshold_asid(start, size, stride, asid);
67 }
68 
69 static void __ipi_flush_tlb_all(void *info)
70 {
71 	local_flush_tlb_all();
72 }
73 
74 void flush_tlb_all(void)
75 {
76 	if (riscv_use_ipi_for_rfence())
77 		on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
78 	else
79 		sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
80 }
81 
82 struct flush_tlb_range_data {
83 	unsigned long asid;
84 	unsigned long start;
85 	unsigned long size;
86 	unsigned long stride;
87 };
88 
89 static void __ipi_flush_tlb_range_asid(void *info)
90 {
91 	struct flush_tlb_range_data *d = info;
92 
93 	local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
94 }
95 
96 static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
97 			      unsigned long size, unsigned long stride)
98 {
99 	struct flush_tlb_range_data ftd;
100 	const struct cpumask *cmask;
101 	unsigned long asid = FLUSH_TLB_NO_ASID;
102 	bool broadcast;
103 
104 	if (mm) {
105 		unsigned int cpuid;
106 
107 		cmask = mm_cpumask(mm);
108 		if (cpumask_empty(cmask))
109 			return;
110 
111 		cpuid = get_cpu();
112 		/* check if the tlbflush needs to be sent to other CPUs */
113 		broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
114 
115 		if (static_branch_unlikely(&use_asid_allocator))
116 			asid = atomic_long_read(&mm->context.id) & asid_mask;
117 	} else {
118 		cmask = cpu_online_mask;
119 		broadcast = true;
120 	}
121 
122 	if (broadcast) {
123 		if (riscv_use_ipi_for_rfence()) {
124 			ftd.asid = asid;
125 			ftd.start = start;
126 			ftd.size = size;
127 			ftd.stride = stride;
128 			on_each_cpu_mask(cmask,
129 					 __ipi_flush_tlb_range_asid,
130 					 &ftd, 1);
131 		} else
132 			sbi_remote_sfence_vma_asid(cmask,
133 						   start, size, asid);
134 	} else {
135 		local_flush_tlb_range_asid(start, size, stride, asid);
136 	}
137 
138 	if (mm)
139 		put_cpu();
140 }
141 
142 void flush_tlb_mm(struct mm_struct *mm)
143 {
144 	__flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
145 }
146 
147 void flush_tlb_mm_range(struct mm_struct *mm,
148 			unsigned long start, unsigned long end,
149 			unsigned int page_size)
150 {
151 	__flush_tlb_range(mm, start, end - start, page_size);
152 }
153 
154 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
155 {
156 	__flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
157 }
158 
159 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
160 		     unsigned long end)
161 {
162 	unsigned long stride_size;
163 
164 	if (!is_vm_hugetlb_page(vma)) {
165 		stride_size = PAGE_SIZE;
166 	} else {
167 		stride_size = huge_page_size(hstate_vma(vma));
168 
169 		/*
170 		 * As stated in the privileged specification, every PTE in a
171 		 * NAPOT region must be invalidated, so reset the stride in that
172 		 * case.
173 		 */
174 		if (has_svnapot()) {
175 			if (stride_size >= PGDIR_SIZE)
176 				stride_size = PGDIR_SIZE;
177 			else if (stride_size >= P4D_SIZE)
178 				stride_size = P4D_SIZE;
179 			else if (stride_size >= PUD_SIZE)
180 				stride_size = PUD_SIZE;
181 			else if (stride_size >= PMD_SIZE)
182 				stride_size = PMD_SIZE;
183 			else
184 				stride_size = PAGE_SIZE;
185 		}
186 	}
187 
188 	__flush_tlb_range(vma->vm_mm, start, end - start, stride_size);
189 }
190 
191 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
192 {
193 	__flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
194 }
195 
196 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
197 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
198 			unsigned long end)
199 {
200 	__flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
201 }
202 #endif
203