xref: /linux/arch/arm64/include/asm/tlb.h (revision 4413e16d9d21673bb5048a2e542f1aaa00015c2e)
1 /*
2  * Based on arch/arm/include/asm/tlb.h
3  *
4  * Copyright (C) 2002 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASM_TLB_H
20 #define __ASM_TLB_H
21 
22 #include <linux/pagemap.h>
23 #include <linux/swap.h>
24 
25 #include <asm/pgalloc.h>
26 #include <asm/tlbflush.h>
27 
28 #define MMU_GATHER_BUNDLE	8
29 
30 /*
31  * TLB handling.  This allows us to remove pages from the page
32  * tables, and efficiently handle the TLB issues.
33  */
34 struct mmu_gather {
35 	struct mm_struct	*mm;
36 	unsigned int		fullmm;
37 	struct vm_area_struct	*vma;
38 	unsigned long		range_start;
39 	unsigned long		range_end;
40 	unsigned int		nr;
41 	unsigned int		max;
42 	struct page		**pages;
43 	struct page		*local[MMU_GATHER_BUNDLE];
44 };
45 
46 /*
47  * This is unnecessarily complex.  There's three ways the TLB shootdown
48  * code is used:
49  *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
50  *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
51  *     tlb->vma will be non-NULL.
52  *  2. Unmapping all vmas.  See exit_mmap().
53  *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
54  *     tlb->vma will be non-NULL.  Additionally, page tables will be freed.
55  *  3. Unmapping argument pages.  See shift_arg_pages().
56  *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
57  *     tlb->vma will be NULL.
58  */
59 static inline void tlb_flush(struct mmu_gather *tlb)
60 {
61 	if (tlb->fullmm || !tlb->vma)
62 		flush_tlb_mm(tlb->mm);
63 	else if (tlb->range_end > 0) {
64 		flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
65 		tlb->range_start = TASK_SIZE;
66 		tlb->range_end = 0;
67 	}
68 }
69 
70 static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
71 {
72 	if (!tlb->fullmm) {
73 		if (addr < tlb->range_start)
74 			tlb->range_start = addr;
75 		if (addr + PAGE_SIZE > tlb->range_end)
76 			tlb->range_end = addr + PAGE_SIZE;
77 	}
78 }
79 
80 static inline void __tlb_alloc_page(struct mmu_gather *tlb)
81 {
82 	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
83 
84 	if (addr) {
85 		tlb->pages = (void *)addr;
86 		tlb->max = PAGE_SIZE / sizeof(struct page *);
87 	}
88 }
89 
90 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
91 {
92 	tlb_flush(tlb);
93 	free_pages_and_swap_cache(tlb->pages, tlb->nr);
94 	tlb->nr = 0;
95 	if (tlb->pages == tlb->local)
96 		__tlb_alloc_page(tlb);
97 }
98 
99 static inline void
100 tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
101 {
102 	tlb->mm = mm;
103 	tlb->fullmm = fullmm;
104 	tlb->vma = NULL;
105 	tlb->max = ARRAY_SIZE(tlb->local);
106 	tlb->pages = tlb->local;
107 	tlb->nr = 0;
108 	__tlb_alloc_page(tlb);
109 }
110 
111 static inline void
112 tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
113 {
114 	tlb_flush_mmu(tlb);
115 
116 	/* keep the page table cache within bounds */
117 	check_pgt_cache();
118 
119 	if (tlb->pages != tlb->local)
120 		free_pages((unsigned long)tlb->pages, 0);
121 }
122 
123 /*
124  * Memorize the range for the TLB flush.
125  */
126 static inline void
127 tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
128 {
129 	tlb_add_flush(tlb, addr);
130 }
131 
132 /*
133  * In the case of tlb vma handling, we can optimise these away in the
134  * case where we're doing a full MM flush.  When we're doing a munmap,
135  * the vmas are adjusted to only cover the region to be torn down.
136  */
137 static inline void
138 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
139 {
140 	if (!tlb->fullmm) {
141 		tlb->vma = vma;
142 		tlb->range_start = TASK_SIZE;
143 		tlb->range_end = 0;
144 	}
145 }
146 
147 static inline void
148 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
149 {
150 	if (!tlb->fullmm)
151 		tlb_flush(tlb);
152 }
153 
154 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
155 {
156 	tlb->pages[tlb->nr++] = page;
157 	VM_BUG_ON(tlb->nr > tlb->max);
158 	return tlb->max - tlb->nr;
159 }
160 
161 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
162 {
163 	if (!__tlb_remove_page(tlb, page))
164 		tlb_flush_mmu(tlb);
165 }
166 
167 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
168 	unsigned long addr)
169 {
170 	pgtable_page_dtor(pte);
171 	tlb_add_flush(tlb, addr);
172 	tlb_remove_page(tlb, pte);
173 }
174 
175 #ifndef CONFIG_ARM64_64K_PAGES
176 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
177 				  unsigned long addr)
178 {
179 	tlb_add_flush(tlb, addr);
180 	tlb_remove_page(tlb, virt_to_page(pmdp));
181 }
182 #endif
183 
184 #define pte_free_tlb(tlb, ptep, addr)	__pte_free_tlb(tlb, ptep, addr)
185 #define pmd_free_tlb(tlb, pmdp, addr)	__pmd_free_tlb(tlb, pmdp, addr)
186 #define pud_free_tlb(tlb, pudp, addr)	pud_free((tlb)->mm, pudp)
187 
188 #define tlb_migrate_finish(mm)		do { } while (0)
189 
190 #endif
191