xref: /linux/mm/kasan/kasan.h (revision 42874e4eb35bdfc54f8514685e50434098ba4f6c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __MM_KASAN_KASAN_H
3 #define __MM_KASAN_KASAN_H
4 
5 #include <linux/atomic.h>
6 #include <linux/kasan.h>
7 #include <linux/kasan-tags.h>
8 #include <linux/kfence.h>
9 #include <linux/stackdepot.h>
10 
11 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
12 
13 #include <linux/static_key.h>
14 
15 DECLARE_STATIC_KEY_TRUE(kasan_flag_stacktrace);
16 
17 static inline bool kasan_stack_collection_enabled(void)
18 {
19 	return static_branch_unlikely(&kasan_flag_stacktrace);
20 }
21 
22 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
23 
24 static inline bool kasan_stack_collection_enabled(void)
25 {
26 	return true;
27 }
28 
29 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
30 
31 #ifdef CONFIG_KASAN_HW_TAGS
32 
33 #include "../slab.h"
34 
35 DECLARE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
36 
37 enum kasan_mode {
38 	KASAN_MODE_SYNC,
39 	KASAN_MODE_ASYNC,
40 	KASAN_MODE_ASYMM,
41 };
42 
43 extern enum kasan_mode kasan_mode __ro_after_init;
44 
45 extern unsigned long kasan_page_alloc_sample;
46 extern unsigned int kasan_page_alloc_sample_order;
47 DECLARE_PER_CPU(long, kasan_page_alloc_skip);
48 
49 static inline bool kasan_vmalloc_enabled(void)
50 {
51 	return static_branch_likely(&kasan_flag_vmalloc);
52 }
53 
54 static inline bool kasan_async_fault_possible(void)
55 {
56 	return kasan_mode == KASAN_MODE_ASYNC || kasan_mode == KASAN_MODE_ASYMM;
57 }
58 
59 static inline bool kasan_sync_fault_possible(void)
60 {
61 	return kasan_mode == KASAN_MODE_SYNC || kasan_mode == KASAN_MODE_ASYMM;
62 }
63 
64 static inline bool kasan_sample_page_alloc(unsigned int order)
65 {
66 	/* Fast-path for when sampling is disabled. */
67 	if (kasan_page_alloc_sample == 1)
68 		return true;
69 
70 	if (order < kasan_page_alloc_sample_order)
71 		return true;
72 
73 	if (this_cpu_dec_return(kasan_page_alloc_skip) < 0) {
74 		this_cpu_write(kasan_page_alloc_skip,
75 			       kasan_page_alloc_sample - 1);
76 		return true;
77 	}
78 
79 	return false;
80 }
81 
82 #else /* CONFIG_KASAN_HW_TAGS */
83 
84 static inline bool kasan_async_fault_possible(void)
85 {
86 	return false;
87 }
88 
89 static inline bool kasan_sync_fault_possible(void)
90 {
91 	return true;
92 }
93 
94 static inline bool kasan_sample_page_alloc(unsigned int order)
95 {
96 	return true;
97 }
98 
99 #endif /* CONFIG_KASAN_HW_TAGS */
100 
101 #ifdef CONFIG_KASAN_GENERIC
102 
103 /* Generic KASAN uses per-object metadata to store stack traces. */
104 static inline bool kasan_requires_meta(void)
105 {
106 	/*
107 	 * Technically, Generic KASAN always collects stack traces right now.
108 	 * However, let's use kasan_stack_collection_enabled() in case the
109 	 * kasan.stacktrace command-line argument is changed to affect
110 	 * Generic KASAN.
111 	 */
112 	return kasan_stack_collection_enabled();
113 }
114 
115 #else /* CONFIG_KASAN_GENERIC */
116 
117 /* Tag-based KASAN modes do not use per-object metadata. */
118 static inline bool kasan_requires_meta(void)
119 {
120 	return false;
121 }
122 
123 #endif /* CONFIG_KASAN_GENERIC */
124 
125 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
126 #define KASAN_GRANULE_SIZE	(1UL << KASAN_SHADOW_SCALE_SHIFT)
127 #else
128 #include <asm/mte-kasan.h>
129 #define KASAN_GRANULE_SIZE	MTE_GRANULE_SIZE
130 #endif
131 
132 #define KASAN_GRANULE_MASK	(KASAN_GRANULE_SIZE - 1)
133 
134 #define KASAN_MEMORY_PER_SHADOW_PAGE	(KASAN_GRANULE_SIZE << PAGE_SHIFT)
135 
136 #ifdef CONFIG_KASAN_GENERIC
137 #define KASAN_PAGE_FREE		0xFF  /* freed page */
138 #define KASAN_PAGE_REDZONE	0xFE  /* redzone for kmalloc_large allocation */
139 #define KASAN_SLAB_REDZONE	0xFC  /* redzone for slab object */
140 #define KASAN_SLAB_FREE		0xFB  /* freed slab object */
141 #define KASAN_VMALLOC_INVALID	0xF8  /* inaccessible space in vmap area */
142 #else
143 #define KASAN_PAGE_FREE		KASAN_TAG_INVALID
144 #define KASAN_PAGE_REDZONE	KASAN_TAG_INVALID
145 #define KASAN_SLAB_REDZONE	KASAN_TAG_INVALID
146 #define KASAN_SLAB_FREE		KASAN_TAG_INVALID
147 #define KASAN_VMALLOC_INVALID	KASAN_TAG_INVALID /* only used for SW_TAGS */
148 #endif
149 
150 #ifdef CONFIG_KASAN_GENERIC
151 
152 #define KASAN_SLAB_FREETRACK	0xFA  /* freed slab object with free track */
153 #define KASAN_GLOBAL_REDZONE	0xF9  /* redzone for global variable */
154 
155 /* Stack redzone shadow values. Compiler ABI, do not change. */
156 #define KASAN_STACK_LEFT	0xF1
157 #define KASAN_STACK_MID		0xF2
158 #define KASAN_STACK_RIGHT	0xF3
159 #define KASAN_STACK_PARTIAL	0xF4
160 
161 /* alloca redzone shadow values. */
162 #define KASAN_ALLOCA_LEFT	0xCA
163 #define KASAN_ALLOCA_RIGHT	0xCB
164 
165 /* alloca redzone size. Compiler ABI, do not change. */
166 #define KASAN_ALLOCA_REDZONE_SIZE	32
167 
168 /* Stack frame marker. Compiler ABI, do not change. */
169 #define KASAN_CURRENT_STACK_FRAME_MAGIC 0x41B58AB3
170 
171 /* Dummy value to avoid breaking randconfig/all*config builds. */
172 #ifndef KASAN_ABI_VERSION
173 #define KASAN_ABI_VERSION 1
174 #endif
175 
176 #endif /* CONFIG_KASAN_GENERIC */
177 
178 /* Metadata layout customization. */
179 #define META_BYTES_PER_BLOCK 1
180 #define META_BLOCKS_PER_ROW 16
181 #define META_BYTES_PER_ROW (META_BLOCKS_PER_ROW * META_BYTES_PER_BLOCK)
182 #define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE)
183 #define META_ROWS_AROUND_ADDR 2
184 
185 #define KASAN_STACK_DEPTH 64
186 
187 struct kasan_track {
188 	u32 pid;
189 	depot_stack_handle_t stack;
190 };
191 
192 enum kasan_report_type {
193 	KASAN_REPORT_ACCESS,
194 	KASAN_REPORT_INVALID_FREE,
195 	KASAN_REPORT_DOUBLE_FREE,
196 };
197 
198 struct kasan_report_info {
199 	/* Filled in by kasan_report_*(). */
200 	enum kasan_report_type type;
201 	const void *access_addr;
202 	size_t access_size;
203 	bool is_write;
204 	unsigned long ip;
205 
206 	/* Filled in by the common reporting code. */
207 	const void *first_bad_addr;
208 	struct kmem_cache *cache;
209 	void *object;
210 	size_t alloc_size;
211 
212 	/* Filled in by the mode-specific reporting code. */
213 	const char *bug_type;
214 	struct kasan_track alloc_track;
215 	struct kasan_track free_track;
216 };
217 
218 /* Do not change the struct layout: compiler ABI. */
219 struct kasan_source_location {
220 	const char *filename;
221 	int line_no;
222 	int column_no;
223 };
224 
225 /* Do not change the struct layout: compiler ABI. */
226 struct kasan_global {
227 	const void *beg;		/* Address of the beginning of the global variable. */
228 	size_t size;			/* Size of the global variable. */
229 	size_t size_with_redzone;	/* Size of the variable + size of the redzone. 32 bytes aligned. */
230 	const void *name;
231 	const void *module_name;	/* Name of the module where the global variable is declared. */
232 	unsigned long has_dynamic_init;	/* This is needed for C++. */
233 #if KASAN_ABI_VERSION >= 4
234 	struct kasan_source_location *location;
235 #endif
236 #if KASAN_ABI_VERSION >= 5
237 	char *odr_indicator;
238 #endif
239 };
240 
241 /* Structures for keeping alloc and free meta. */
242 
243 #ifdef CONFIG_KASAN_GENERIC
244 
245 struct kasan_alloc_meta {
246 	struct kasan_track alloc_track;
247 	/* Free track is stored in kasan_free_meta. */
248 	depot_stack_handle_t aux_stack[2];
249 };
250 
251 struct qlist_node {
252 	struct qlist_node *next;
253 };
254 
255 /*
256  * Free meta is stored either in the object itself or in the redzone after the
257  * object. In the former case, free meta offset is 0. In the latter case, the
258  * offset is between 0 and INT_MAX. INT_MAX marks that free meta is not present.
259  */
260 #define KASAN_NO_FREE_META INT_MAX
261 
262 /*
263  * Free meta is only used by Generic mode while the object is in quarantine.
264  * After that, slab allocator stores the freelist pointer in the object.
265  */
266 struct kasan_free_meta {
267 	struct qlist_node quarantine_link;
268 	struct kasan_track free_track;
269 };
270 
271 #endif /* CONFIG_KASAN_GENERIC */
272 
273 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
274 
275 struct kasan_stack_ring_entry {
276 	void *ptr;
277 	size_t size;
278 	u32 pid;
279 	depot_stack_handle_t stack;
280 	bool is_free;
281 };
282 
283 struct kasan_stack_ring {
284 	rwlock_t lock;
285 	size_t size;
286 	atomic64_t pos;
287 	struct kasan_stack_ring_entry *entries;
288 };
289 
290 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
291 
292 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
293 
294 #ifndef kasan_shadow_to_mem
295 static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
296 {
297 	return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
298 		<< KASAN_SHADOW_SCALE_SHIFT);
299 }
300 #endif
301 
302 #ifndef addr_has_metadata
303 static __always_inline bool addr_has_metadata(const void *addr)
304 {
305 	return (kasan_reset_tag(addr) >=
306 		kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
307 }
308 #endif
309 
310 /**
311  * kasan_check_range - Check memory region, and report if invalid access.
312  * @addr: the accessed address
313  * @size: the accessed size
314  * @write: true if access is a write access
315  * @ret_ip: return address
316  * @return: true if access was valid, false if invalid
317  */
318 bool kasan_check_range(const void *addr, size_t size, bool write,
319 				unsigned long ret_ip);
320 
321 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
322 
323 static __always_inline bool addr_has_metadata(const void *addr)
324 {
325 	return (is_vmalloc_addr(addr) || virt_addr_valid(addr));
326 }
327 
328 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
329 
330 const void *kasan_find_first_bad_addr(const void *addr, size_t size);
331 size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache);
332 void kasan_complete_mode_report_info(struct kasan_report_info *info);
333 void kasan_metadata_fetch_row(char *buffer, void *row);
334 
335 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
336 void kasan_print_tags(u8 addr_tag, const void *addr);
337 #else
338 static inline void kasan_print_tags(u8 addr_tag, const void *addr) { }
339 #endif
340 
341 #if defined(CONFIG_KASAN_STACK)
342 void kasan_print_address_stack_frame(const void *addr);
343 #else
344 static inline void kasan_print_address_stack_frame(const void *addr) { }
345 #endif
346 
347 #ifdef CONFIG_KASAN_GENERIC
348 void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object);
349 #else
350 static inline void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object) { }
351 #endif
352 
353 bool kasan_report(const void *addr, size_t size,
354 		bool is_write, unsigned long ip);
355 void kasan_report_invalid_free(void *object, unsigned long ip, enum kasan_report_type type);
356 
357 struct slab *kasan_addr_to_slab(const void *addr);
358 
359 #ifdef CONFIG_KASAN_GENERIC
360 void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size);
361 void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
362 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
363 						const void *object);
364 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
365 						const void *object);
366 #else
367 static inline void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size) { }
368 static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
369 #endif
370 
371 depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc);
372 void kasan_set_track(struct kasan_track *track, gfp_t flags);
373 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags);
374 void kasan_save_free_info(struct kmem_cache *cache, void *object);
375 
376 #if defined(CONFIG_KASAN_GENERIC) && \
377 	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
378 bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
379 void kasan_quarantine_reduce(void);
380 void kasan_quarantine_remove_cache(struct kmem_cache *cache);
381 #else
382 static inline bool kasan_quarantine_put(struct kmem_cache *cache, void *object) { return false; }
383 static inline void kasan_quarantine_reduce(void) { }
384 static inline void kasan_quarantine_remove_cache(struct kmem_cache *cache) { }
385 #endif
386 
387 #ifndef arch_kasan_set_tag
388 static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
389 {
390 	return addr;
391 }
392 #endif
393 #ifndef arch_kasan_get_tag
394 #define arch_kasan_get_tag(addr)	0
395 #endif
396 
397 #define set_tag(addr, tag)	((void *)arch_kasan_set_tag((addr), (tag)))
398 #define get_tag(addr)		arch_kasan_get_tag(addr)
399 
400 #ifdef CONFIG_KASAN_HW_TAGS
401 
402 #define hw_enable_tag_checks_sync()		arch_enable_tag_checks_sync()
403 #define hw_enable_tag_checks_async()		arch_enable_tag_checks_async()
404 #define hw_enable_tag_checks_asymm()		arch_enable_tag_checks_asymm()
405 #define hw_suppress_tag_checks_start()		arch_suppress_tag_checks_start()
406 #define hw_suppress_tag_checks_stop()		arch_suppress_tag_checks_stop()
407 #define hw_force_async_tag_fault()		arch_force_async_tag_fault()
408 #define hw_get_random_tag()			arch_get_random_tag()
409 #define hw_get_mem_tag(addr)			arch_get_mem_tag(addr)
410 #define hw_set_mem_tag_range(addr, size, tag, init) \
411 			arch_set_mem_tag_range((addr), (size), (tag), (init))
412 
413 void kasan_enable_hw_tags(void);
414 
415 #else /* CONFIG_KASAN_HW_TAGS */
416 
417 static inline void kasan_enable_hw_tags(void) { }
418 
419 #endif /* CONFIG_KASAN_HW_TAGS */
420 
421 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
422 void __init kasan_init_tags(void);
423 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
424 
425 #if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
426 
427 void kasan_force_async_fault(void);
428 
429 #else /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
430 
431 static inline void kasan_force_async_fault(void) { }
432 
433 #endif /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
434 
435 #ifdef CONFIG_KASAN_SW_TAGS
436 u8 kasan_random_tag(void);
437 #elif defined(CONFIG_KASAN_HW_TAGS)
438 static inline u8 kasan_random_tag(void) { return hw_get_random_tag(); }
439 #else
440 static inline u8 kasan_random_tag(void) { return 0; }
441 #endif
442 
443 #ifdef CONFIG_KASAN_HW_TAGS
444 
445 static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init)
446 {
447 	addr = kasan_reset_tag(addr);
448 
449 	/* Skip KFENCE memory if called explicitly outside of sl*b. */
450 	if (is_kfence_address(addr))
451 		return;
452 
453 	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
454 		return;
455 	if (WARN_ON(size & KASAN_GRANULE_MASK))
456 		return;
457 
458 	hw_set_mem_tag_range((void *)addr, size, value, init);
459 }
460 
461 static inline void kasan_unpoison(const void *addr, size_t size, bool init)
462 {
463 	u8 tag = get_tag(addr);
464 
465 	addr = kasan_reset_tag(addr);
466 
467 	/* Skip KFENCE memory if called explicitly outside of sl*b. */
468 	if (is_kfence_address(addr))
469 		return;
470 
471 	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
472 		return;
473 	size = round_up(size, KASAN_GRANULE_SIZE);
474 
475 	hw_set_mem_tag_range((void *)addr, size, tag, init);
476 }
477 
478 static inline bool kasan_byte_accessible(const void *addr)
479 {
480 	u8 ptr_tag = get_tag(addr);
481 	u8 mem_tag = hw_get_mem_tag((void *)addr);
482 
483 	return ptr_tag == KASAN_TAG_KERNEL || ptr_tag == mem_tag;
484 }
485 
486 #else /* CONFIG_KASAN_HW_TAGS */
487 
488 /**
489  * kasan_poison - mark the memory range as inaccessible
490  * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
491  * @size - range size, must be aligned to KASAN_GRANULE_SIZE
492  * @value - value that's written to metadata for the range
493  * @init - whether to initialize the memory range (only for hardware tag-based)
494  *
495  * The size gets aligned to KASAN_GRANULE_SIZE before marking the range.
496  */
497 void kasan_poison(const void *addr, size_t size, u8 value, bool init);
498 
499 /**
500  * kasan_unpoison - mark the memory range as accessible
501  * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
502  * @size - range size, can be unaligned
503  * @init - whether to initialize the memory range (only for hardware tag-based)
504  *
505  * For the tag-based modes, the @size gets aligned to KASAN_GRANULE_SIZE before
506  * marking the range.
507  * For the generic mode, the last granule of the memory range gets partially
508  * unpoisoned based on the @size.
509  */
510 void kasan_unpoison(const void *addr, size_t size, bool init);
511 
512 bool kasan_byte_accessible(const void *addr);
513 
514 #endif /* CONFIG_KASAN_HW_TAGS */
515 
516 #ifdef CONFIG_KASAN_GENERIC
517 
518 /**
519  * kasan_poison_last_granule - mark the last granule of the memory range as
520  * inaccessible
521  * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
522  * @size - range size
523  *
524  * This function is only available for the generic mode, as it's the only mode
525  * that has partially poisoned memory granules.
526  */
527 void kasan_poison_last_granule(const void *address, size_t size);
528 
529 #else /* CONFIG_KASAN_GENERIC */
530 
531 static inline void kasan_poison_last_granule(const void *address, size_t size) { }
532 
533 #endif /* CONFIG_KASAN_GENERIC */
534 
535 #ifndef kasan_arch_is_ready
536 static inline bool kasan_arch_is_ready(void)	{ return true; }
537 #elif !defined(CONFIG_KASAN_GENERIC) || !defined(CONFIG_KASAN_OUTLINE)
538 #error kasan_arch_is_ready only works in KASAN generic outline mode!
539 #endif
540 
541 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
542 
543 void kasan_kunit_test_suite_start(void);
544 void kasan_kunit_test_suite_end(void);
545 
546 #else /* CONFIG_KASAN_KUNIT_TEST */
547 
548 static inline void kasan_kunit_test_suite_start(void) { }
549 static inline void kasan_kunit_test_suite_end(void) { }
550 
551 #endif /* CONFIG_KASAN_KUNIT_TEST */
552 
553 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) || IS_ENABLED(CONFIG_KASAN_MODULE_TEST)
554 
555 bool kasan_save_enable_multi_shot(void);
556 void kasan_restore_multi_shot(bool enabled);
557 
558 #endif
559 
560 /*
561  * Exported functions for interfaces called from assembly or from generated
562  * code. Declared here to avoid warnings about missing declarations.
563  */
564 
565 void __asan_register_globals(void *globals, ssize_t size);
566 void __asan_unregister_globals(void *globals, ssize_t size);
567 void __asan_handle_no_return(void);
568 void __asan_alloca_poison(void *, ssize_t size);
569 void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom);
570 
571 void __asan_load1(void *);
572 void __asan_store1(void *);
573 void __asan_load2(void *);
574 void __asan_store2(void *);
575 void __asan_load4(void *);
576 void __asan_store4(void *);
577 void __asan_load8(void *);
578 void __asan_store8(void *);
579 void __asan_load16(void *);
580 void __asan_store16(void *);
581 void __asan_loadN(void *, ssize_t size);
582 void __asan_storeN(void *, ssize_t size);
583 
584 void __asan_load1_noabort(void *);
585 void __asan_store1_noabort(void *);
586 void __asan_load2_noabort(void *);
587 void __asan_store2_noabort(void *);
588 void __asan_load4_noabort(void *);
589 void __asan_store4_noabort(void *);
590 void __asan_load8_noabort(void *);
591 void __asan_store8_noabort(void *);
592 void __asan_load16_noabort(void *);
593 void __asan_store16_noabort(void *);
594 void __asan_loadN_noabort(void *, ssize_t size);
595 void __asan_storeN_noabort(void *, ssize_t size);
596 
597 void __asan_report_load1_noabort(void *);
598 void __asan_report_store1_noabort(void *);
599 void __asan_report_load2_noabort(void *);
600 void __asan_report_store2_noabort(void *);
601 void __asan_report_load4_noabort(void *);
602 void __asan_report_store4_noabort(void *);
603 void __asan_report_load8_noabort(void *);
604 void __asan_report_store8_noabort(void *);
605 void __asan_report_load16_noabort(void *);
606 void __asan_report_store16_noabort(void *);
607 void __asan_report_load_n_noabort(void *, ssize_t size);
608 void __asan_report_store_n_noabort(void *, ssize_t size);
609 
610 void __asan_set_shadow_00(const void *addr, ssize_t size);
611 void __asan_set_shadow_f1(const void *addr, ssize_t size);
612 void __asan_set_shadow_f2(const void *addr, ssize_t size);
613 void __asan_set_shadow_f3(const void *addr, ssize_t size);
614 void __asan_set_shadow_f5(const void *addr, ssize_t size);
615 void __asan_set_shadow_f8(const void *addr, ssize_t size);
616 
617 void *__asan_memset(void *addr, int c, ssize_t len);
618 void *__asan_memmove(void *dest, const void *src, ssize_t len);
619 void *__asan_memcpy(void *dest, const void *src, ssize_t len);
620 
621 void __hwasan_load1_noabort(void *);
622 void __hwasan_store1_noabort(void *);
623 void __hwasan_load2_noabort(void *);
624 void __hwasan_store2_noabort(void *);
625 void __hwasan_load4_noabort(void *);
626 void __hwasan_store4_noabort(void *);
627 void __hwasan_load8_noabort(void *);
628 void __hwasan_store8_noabort(void *);
629 void __hwasan_load16_noabort(void *);
630 void __hwasan_store16_noabort(void *);
631 void __hwasan_loadN_noabort(void *, ssize_t size);
632 void __hwasan_storeN_noabort(void *, ssize_t size);
633 
634 void __hwasan_tag_memory(void *, u8 tag, ssize_t size);
635 
636 void *__hwasan_memset(void *addr, int c, ssize_t len);
637 void *__hwasan_memmove(void *dest, const void *src, ssize_t len);
638 void *__hwasan_memcpy(void *dest, const void *src, ssize_t len);
639 
640 void kasan_tag_mismatch(void *addr, unsigned long access_info,
641 			unsigned long ret_ip);
642 
643 #endif /* __MM_KASAN_KASAN_H */
644