xref: /linux/mm/kasan/hw_tags.c (revision eeb9f5c2dcec90009d7cf12e780e7f9631993fc5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains core hardware tag-based KASAN code.
4  *
5  * Copyright (c) 2020 Google, Inc.
6  * Author: Andrey Konovalov <andreyknvl@google.com>
7  */
8 
9 #define pr_fmt(fmt) "kasan: " fmt
10 
11 #include <linux/init.h>
12 #include <linux/kasan.h>
13 #include <linux/kernel.h>
14 #include <linux/memory.h>
15 #include <linux/mm.h>
16 #include <linux/static_key.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 
20 #include "kasan.h"
21 
22 enum kasan_arg {
23 	KASAN_ARG_DEFAULT,
24 	KASAN_ARG_OFF,
25 	KASAN_ARG_ON,
26 };
27 
28 enum kasan_arg_mode {
29 	KASAN_ARG_MODE_DEFAULT,
30 	KASAN_ARG_MODE_SYNC,
31 	KASAN_ARG_MODE_ASYNC,
32 	KASAN_ARG_MODE_ASYMM,
33 };
34 
35 enum kasan_arg_vmalloc {
36 	KASAN_ARG_VMALLOC_DEFAULT,
37 	KASAN_ARG_VMALLOC_OFF,
38 	KASAN_ARG_VMALLOC_ON,
39 };
40 
41 static enum kasan_arg kasan_arg __ro_after_init;
42 static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
43 static enum kasan_arg_vmalloc kasan_arg_vmalloc __initdata;
44 
45 /*
46  * Whether KASAN is enabled at all.
47  * The value remains false until KASAN is initialized by kasan_init_hw_tags().
48  */
49 DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
50 EXPORT_SYMBOL(kasan_flag_enabled);
51 
52 /*
53  * Whether the selected mode is synchronous, asynchronous, or asymmetric.
54  * Defaults to KASAN_MODE_SYNC.
55  */
56 enum kasan_mode kasan_mode __ro_after_init;
57 EXPORT_SYMBOL_GPL(kasan_mode);
58 
59 /* Whether to enable vmalloc tagging. */
60 #ifdef CONFIG_KASAN_VMALLOC
61 DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
62 #else
63 DEFINE_STATIC_KEY_FALSE(kasan_flag_vmalloc);
64 #endif
65 EXPORT_SYMBOL_GPL(kasan_flag_vmalloc);
66 
67 #define PAGE_ALLOC_SAMPLE_DEFAULT	1
68 #define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT	3
69 
70 /*
71  * Sampling interval of page_alloc allocation (un)poisoning.
72  * Defaults to no sampling.
73  */
74 unsigned long kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
75 
76 /*
77  * Minimum order of page_alloc allocations to be affected by sampling.
78  * The default value is chosen to match both
79  * PAGE_ALLOC_COSTLY_ORDER and SKB_FRAG_PAGE_ORDER.
80  */
81 unsigned int kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
82 
83 DEFINE_PER_CPU(long, kasan_page_alloc_skip);
84 
85 /* kasan=off/on */
86 static int __init early_kasan_flag(char *arg)
87 {
88 	if (!arg)
89 		return -EINVAL;
90 
91 	if (!strcmp(arg, "off"))
92 		kasan_arg = KASAN_ARG_OFF;
93 	else if (!strcmp(arg, "on"))
94 		kasan_arg = KASAN_ARG_ON;
95 	else
96 		return -EINVAL;
97 
98 	return 0;
99 }
100 early_param("kasan", early_kasan_flag);
101 
102 /* kasan.mode=sync/async/asymm */
103 static int __init early_kasan_mode(char *arg)
104 {
105 	if (!arg)
106 		return -EINVAL;
107 
108 	if (!strcmp(arg, "sync"))
109 		kasan_arg_mode = KASAN_ARG_MODE_SYNC;
110 	else if (!strcmp(arg, "async"))
111 		kasan_arg_mode = KASAN_ARG_MODE_ASYNC;
112 	else if (!strcmp(arg, "asymm"))
113 		kasan_arg_mode = KASAN_ARG_MODE_ASYMM;
114 	else
115 		return -EINVAL;
116 
117 	return 0;
118 }
119 early_param("kasan.mode", early_kasan_mode);
120 
121 /* kasan.vmalloc=off/on */
122 static int __init early_kasan_flag_vmalloc(char *arg)
123 {
124 	if (!arg)
125 		return -EINVAL;
126 
127 	if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
128 		return 0;
129 
130 	if (!strcmp(arg, "off"))
131 		kasan_arg_vmalloc = KASAN_ARG_VMALLOC_OFF;
132 	else if (!strcmp(arg, "on"))
133 		kasan_arg_vmalloc = KASAN_ARG_VMALLOC_ON;
134 	else
135 		return -EINVAL;
136 
137 	return 0;
138 }
139 early_param("kasan.vmalloc", early_kasan_flag_vmalloc);
140 
141 static inline const char *kasan_mode_info(void)
142 {
143 	if (kasan_mode == KASAN_MODE_ASYNC)
144 		return "async";
145 	else if (kasan_mode == KASAN_MODE_ASYMM)
146 		return "asymm";
147 	else
148 		return "sync";
149 }
150 
151 /* kasan.page_alloc.sample=<sampling interval> */
152 static int __init early_kasan_flag_page_alloc_sample(char *arg)
153 {
154 	int rv;
155 
156 	if (!arg)
157 		return -EINVAL;
158 
159 	rv = kstrtoul(arg, 0, &kasan_page_alloc_sample);
160 	if (rv)
161 		return rv;
162 
163 	if (!kasan_page_alloc_sample || kasan_page_alloc_sample > LONG_MAX) {
164 		kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
165 		return -EINVAL;
166 	}
167 
168 	return 0;
169 }
170 early_param("kasan.page_alloc.sample", early_kasan_flag_page_alloc_sample);
171 
172 /* kasan.page_alloc.sample.order=<minimum page order> */
173 static int __init early_kasan_flag_page_alloc_sample_order(char *arg)
174 {
175 	int rv;
176 
177 	if (!arg)
178 		return -EINVAL;
179 
180 	rv = kstrtouint(arg, 0, &kasan_page_alloc_sample_order);
181 	if (rv)
182 		return rv;
183 
184 	if (kasan_page_alloc_sample_order > INT_MAX) {
185 		kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
186 		return -EINVAL;
187 	}
188 
189 	return 0;
190 }
191 early_param("kasan.page_alloc.sample.order", early_kasan_flag_page_alloc_sample_order);
192 
193 /*
194  * kasan_init_hw_tags_cpu() is called for each CPU.
195  * Not marked as __init as a CPU can be hot-plugged after boot.
196  */
197 void kasan_init_hw_tags_cpu(void)
198 {
199 	/*
200 	 * There's no need to check that the hardware is MTE-capable here,
201 	 * as this function is only called for MTE-capable hardware.
202 	 */
203 
204 	/*
205 	 * If KASAN is disabled via command line, don't initialize it.
206 	 * When this function is called, kasan_flag_enabled is not yet
207 	 * set by kasan_init_hw_tags(). Thus, check kasan_arg instead.
208 	 */
209 	if (kasan_arg == KASAN_ARG_OFF)
210 		return;
211 
212 	/*
213 	 * Enable async or asymm modes only when explicitly requested
214 	 * through the command line.
215 	 */
216 	kasan_enable_hw_tags();
217 }
218 
219 /* kasan_init_hw_tags() is called once on boot CPU. */
220 void __init kasan_init_hw_tags(void)
221 {
222 	/* If hardware doesn't support MTE, don't initialize KASAN. */
223 	if (!system_supports_mte())
224 		return;
225 
226 	/* If KASAN is disabled via command line, don't initialize it. */
227 	if (kasan_arg == KASAN_ARG_OFF)
228 		return;
229 
230 	switch (kasan_arg_mode) {
231 	case KASAN_ARG_MODE_DEFAULT:
232 		/* Default is specified by kasan_mode definition. */
233 		break;
234 	case KASAN_ARG_MODE_SYNC:
235 		kasan_mode = KASAN_MODE_SYNC;
236 		break;
237 	case KASAN_ARG_MODE_ASYNC:
238 		kasan_mode = KASAN_MODE_ASYNC;
239 		break;
240 	case KASAN_ARG_MODE_ASYMM:
241 		kasan_mode = KASAN_MODE_ASYMM;
242 		break;
243 	}
244 
245 	switch (kasan_arg_vmalloc) {
246 	case KASAN_ARG_VMALLOC_DEFAULT:
247 		/* Default is specified by kasan_flag_vmalloc definition. */
248 		break;
249 	case KASAN_ARG_VMALLOC_OFF:
250 		static_branch_disable(&kasan_flag_vmalloc);
251 		break;
252 	case KASAN_ARG_VMALLOC_ON:
253 		static_branch_enable(&kasan_flag_vmalloc);
254 		break;
255 	}
256 
257 	kasan_init_tags();
258 
259 	/* KASAN is now initialized, enable it. */
260 	static_branch_enable(&kasan_flag_enabled);
261 
262 	pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, vmalloc=%s, stacktrace=%s)\n",
263 		kasan_mode_info(),
264 		kasan_vmalloc_enabled() ? "on" : "off",
265 		kasan_stack_collection_enabled() ? "on" : "off");
266 }
267 
268 #ifdef CONFIG_KASAN_VMALLOC
269 
270 static void unpoison_vmalloc_pages(const void *addr, u8 tag)
271 {
272 	struct vm_struct *area;
273 	int i;
274 
275 	/*
276 	 * As hardware tag-based KASAN only tags VM_ALLOC vmalloc allocations
277 	 * (see the comment in __kasan_unpoison_vmalloc), all of the pages
278 	 * should belong to a single area.
279 	 */
280 	area = find_vm_area((void *)addr);
281 	if (WARN_ON(!area))
282 		return;
283 
284 	for (i = 0; i < area->nr_pages; i++) {
285 		struct page *page = area->pages[i];
286 
287 		page_kasan_tag_set(page, tag);
288 	}
289 }
290 
291 static void init_vmalloc_pages(const void *start, unsigned long size)
292 {
293 	const void *addr;
294 
295 	for (addr = start; addr < start + size; addr += PAGE_SIZE) {
296 		struct page *page = vmalloc_to_page(addr);
297 
298 		clear_highpage_kasan_tagged(page);
299 	}
300 }
301 
302 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
303 				kasan_vmalloc_flags_t flags)
304 {
305 	u8 tag;
306 	unsigned long redzone_start, redzone_size;
307 
308 	if (!kasan_vmalloc_enabled()) {
309 		if (flags & KASAN_VMALLOC_INIT)
310 			init_vmalloc_pages(start, size);
311 		return (void *)start;
312 	}
313 
314 	/*
315 	 * Don't tag non-VM_ALLOC mappings, as:
316 	 *
317 	 * 1. Unlike the software KASAN modes, hardware tag-based KASAN only
318 	 *    supports tagging physical memory. Therefore, it can only tag a
319 	 *    single mapping of normal physical pages.
320 	 * 2. Hardware tag-based KASAN can only tag memory mapped with special
321 	 *    mapping protection bits, see arch_vmap_pgprot_tagged().
322 	 *    As non-VM_ALLOC mappings can be mapped outside of vmalloc code,
323 	 *    providing these bits would require tracking all non-VM_ALLOC
324 	 *    mappers.
325 	 *
326 	 * Thus, for VM_ALLOC mappings, hardware tag-based KASAN only tags
327 	 * the first virtual mapping, which is created by vmalloc().
328 	 * Tagging the page_alloc memory backing that vmalloc() allocation is
329 	 * skipped, see ___GFP_SKIP_KASAN.
330 	 *
331 	 * For non-VM_ALLOC allocations, page_alloc memory is tagged as usual.
332 	 */
333 	if (!(flags & KASAN_VMALLOC_VM_ALLOC)) {
334 		WARN_ON(flags & KASAN_VMALLOC_INIT);
335 		return (void *)start;
336 	}
337 
338 	/*
339 	 * Don't tag executable memory.
340 	 * The kernel doesn't tolerate having the PC register tagged.
341 	 */
342 	if (!(flags & KASAN_VMALLOC_PROT_NORMAL)) {
343 		WARN_ON(flags & KASAN_VMALLOC_INIT);
344 		return (void *)start;
345 	}
346 
347 	tag = kasan_random_tag();
348 	start = set_tag(start, tag);
349 
350 	/* Unpoison and initialize memory up to size. */
351 	kasan_unpoison(start, size, flags & KASAN_VMALLOC_INIT);
352 
353 	/*
354 	 * Explicitly poison and initialize the in-page vmalloc() redzone.
355 	 * Unlike software KASAN modes, hardware tag-based KASAN doesn't
356 	 * unpoison memory when populating shadow for vmalloc() space.
357 	 */
358 	redzone_start = round_up((unsigned long)start + size,
359 				 KASAN_GRANULE_SIZE);
360 	redzone_size = round_up(redzone_start, PAGE_SIZE) - redzone_start;
361 	kasan_poison((void *)redzone_start, redzone_size, KASAN_TAG_INVALID,
362 		     flags & KASAN_VMALLOC_INIT);
363 
364 	/*
365 	 * Set per-page tag flags to allow accessing physical memory for the
366 	 * vmalloc() mapping through page_address(vmalloc_to_page()).
367 	 */
368 	unpoison_vmalloc_pages(start, tag);
369 
370 	return (void *)start;
371 }
372 
373 void __kasan_poison_vmalloc(const void *start, unsigned long size)
374 {
375 	/*
376 	 * No tagging here.
377 	 * The physical pages backing the vmalloc() allocation are poisoned
378 	 * through the usual page_alloc paths.
379 	 */
380 }
381 
382 #endif
383 
384 void kasan_enable_hw_tags(void)
385 {
386 	if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
387 		hw_enable_tag_checks_async();
388 	else if (kasan_arg_mode == KASAN_ARG_MODE_ASYMM)
389 		hw_enable_tag_checks_asymm();
390 	else
391 		hw_enable_tag_checks_sync();
392 }
393 
394 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
395 
396 EXPORT_SYMBOL_GPL(kasan_enable_hw_tags);
397 
398 void kasan_force_async_fault(void)
399 {
400 	hw_force_async_tag_fault();
401 }
402 EXPORT_SYMBOL_GPL(kasan_force_async_fault);
403 
404 #endif
405