xref: /linux/kernel/audit_tree.c (revision 60063497a95e716c9a689af3be2687d261f115b4)
1 #include "audit.h"
2 #include <linux/fsnotify_backend.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
5 #include <linux/kthread.h>
6 #include <linux/slab.h>
7 
8 struct audit_tree;
9 struct audit_chunk;
10 
11 struct audit_tree {
12 	atomic_t count;
13 	int goner;
14 	struct audit_chunk *root;
15 	struct list_head chunks;
16 	struct list_head rules;
17 	struct list_head list;
18 	struct list_head same_root;
19 	struct rcu_head head;
20 	char pathname[];
21 };
22 
23 struct audit_chunk {
24 	struct list_head hash;
25 	struct fsnotify_mark mark;
26 	struct list_head trees;		/* with root here */
27 	int dead;
28 	int count;
29 	atomic_long_t refs;
30 	struct rcu_head head;
31 	struct node {
32 		struct list_head list;
33 		struct audit_tree *owner;
34 		unsigned index;		/* index; upper bit indicates 'will prune' */
35 	} owners[];
36 };
37 
38 static LIST_HEAD(tree_list);
39 static LIST_HEAD(prune_list);
40 
41 /*
42  * One struct chunk is attached to each inode of interest.
43  * We replace struct chunk on tagging/untagging.
44  * Rules have pointer to struct audit_tree.
45  * Rules have struct list_head rlist forming a list of rules over
46  * the same tree.
47  * References to struct chunk are collected at audit_inode{,_child}()
48  * time and used in AUDIT_TREE rule matching.
49  * These references are dropped at the same time we are calling
50  * audit_free_names(), etc.
51  *
52  * Cyclic lists galore:
53  * tree.chunks anchors chunk.owners[].list			hash_lock
54  * tree.rules anchors rule.rlist				audit_filter_mutex
55  * chunk.trees anchors tree.same_root				hash_lock
56  * chunk.hash is a hash with middle bits of watch.inode as
57  * a hash function.						RCU, hash_lock
58  *
59  * tree is refcounted; one reference for "some rules on rules_list refer to
60  * it", one for each chunk with pointer to it.
61  *
62  * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
63  * of watch contributes 1 to .refs).
64  *
65  * node.index allows to get from node.list to containing chunk.
66  * MSB of that sucker is stolen to mark taggings that we might have to
67  * revert - several operations have very unpleasant cleanup logics and
68  * that makes a difference.  Some.
69  */
70 
71 static struct fsnotify_group *audit_tree_group;
72 
73 static struct audit_tree *alloc_tree(const char *s)
74 {
75 	struct audit_tree *tree;
76 
77 	tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
78 	if (tree) {
79 		atomic_set(&tree->count, 1);
80 		tree->goner = 0;
81 		INIT_LIST_HEAD(&tree->chunks);
82 		INIT_LIST_HEAD(&tree->rules);
83 		INIT_LIST_HEAD(&tree->list);
84 		INIT_LIST_HEAD(&tree->same_root);
85 		tree->root = NULL;
86 		strcpy(tree->pathname, s);
87 	}
88 	return tree;
89 }
90 
91 static inline void get_tree(struct audit_tree *tree)
92 {
93 	atomic_inc(&tree->count);
94 }
95 
96 static inline void put_tree(struct audit_tree *tree)
97 {
98 	if (atomic_dec_and_test(&tree->count))
99 		kfree_rcu(tree, head);
100 }
101 
102 /* to avoid bringing the entire thing in audit.h */
103 const char *audit_tree_path(struct audit_tree *tree)
104 {
105 	return tree->pathname;
106 }
107 
108 static void free_chunk(struct audit_chunk *chunk)
109 {
110 	int i;
111 
112 	for (i = 0; i < chunk->count; i++) {
113 		if (chunk->owners[i].owner)
114 			put_tree(chunk->owners[i].owner);
115 	}
116 	kfree(chunk);
117 }
118 
119 void audit_put_chunk(struct audit_chunk *chunk)
120 {
121 	if (atomic_long_dec_and_test(&chunk->refs))
122 		free_chunk(chunk);
123 }
124 
125 static void __put_chunk(struct rcu_head *rcu)
126 {
127 	struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
128 	audit_put_chunk(chunk);
129 }
130 
131 static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
132 {
133 	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
134 	call_rcu(&chunk->head, __put_chunk);
135 }
136 
137 static struct audit_chunk *alloc_chunk(int count)
138 {
139 	struct audit_chunk *chunk;
140 	size_t size;
141 	int i;
142 
143 	size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
144 	chunk = kzalloc(size, GFP_KERNEL);
145 	if (!chunk)
146 		return NULL;
147 
148 	INIT_LIST_HEAD(&chunk->hash);
149 	INIT_LIST_HEAD(&chunk->trees);
150 	chunk->count = count;
151 	atomic_long_set(&chunk->refs, 1);
152 	for (i = 0; i < count; i++) {
153 		INIT_LIST_HEAD(&chunk->owners[i].list);
154 		chunk->owners[i].index = i;
155 	}
156 	fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
157 	return chunk;
158 }
159 
160 enum {HASH_SIZE = 128};
161 static struct list_head chunk_hash_heads[HASH_SIZE];
162 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
163 
164 static inline struct list_head *chunk_hash(const struct inode *inode)
165 {
166 	unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
167 	return chunk_hash_heads + n % HASH_SIZE;
168 }
169 
170 /* hash_lock & entry->lock is held by caller */
171 static void insert_hash(struct audit_chunk *chunk)
172 {
173 	struct fsnotify_mark *entry = &chunk->mark;
174 	struct list_head *list;
175 
176 	if (!entry->i.inode)
177 		return;
178 	list = chunk_hash(entry->i.inode);
179 	list_add_rcu(&chunk->hash, list);
180 }
181 
182 /* called under rcu_read_lock */
183 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
184 {
185 	struct list_head *list = chunk_hash(inode);
186 	struct audit_chunk *p;
187 
188 	list_for_each_entry_rcu(p, list, hash) {
189 		/* mark.inode may have gone NULL, but who cares? */
190 		if (p->mark.i.inode == inode) {
191 			atomic_long_inc(&p->refs);
192 			return p;
193 		}
194 	}
195 	return NULL;
196 }
197 
198 int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
199 {
200 	int n;
201 	for (n = 0; n < chunk->count; n++)
202 		if (chunk->owners[n].owner == tree)
203 			return 1;
204 	return 0;
205 }
206 
207 /* tagging and untagging inodes with trees */
208 
209 static struct audit_chunk *find_chunk(struct node *p)
210 {
211 	int index = p->index & ~(1U<<31);
212 	p -= index;
213 	return container_of(p, struct audit_chunk, owners[0]);
214 }
215 
216 static void untag_chunk(struct node *p)
217 {
218 	struct audit_chunk *chunk = find_chunk(p);
219 	struct fsnotify_mark *entry = &chunk->mark;
220 	struct audit_chunk *new = NULL;
221 	struct audit_tree *owner;
222 	int size = chunk->count - 1;
223 	int i, j;
224 
225 	fsnotify_get_mark(entry);
226 
227 	spin_unlock(&hash_lock);
228 
229 	if (size)
230 		new = alloc_chunk(size);
231 
232 	spin_lock(&entry->lock);
233 	if (chunk->dead || !entry->i.inode) {
234 		spin_unlock(&entry->lock);
235 		if (new)
236 			free_chunk(new);
237 		goto out;
238 	}
239 
240 	owner = p->owner;
241 
242 	if (!size) {
243 		chunk->dead = 1;
244 		spin_lock(&hash_lock);
245 		list_del_init(&chunk->trees);
246 		if (owner->root == chunk)
247 			owner->root = NULL;
248 		list_del_init(&p->list);
249 		list_del_rcu(&chunk->hash);
250 		spin_unlock(&hash_lock);
251 		spin_unlock(&entry->lock);
252 		fsnotify_destroy_mark(entry);
253 		fsnotify_put_mark(entry);
254 		goto out;
255 	}
256 
257 	if (!new)
258 		goto Fallback;
259 
260 	fsnotify_duplicate_mark(&new->mark, entry);
261 	if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
262 		free_chunk(new);
263 		goto Fallback;
264 	}
265 
266 	chunk->dead = 1;
267 	spin_lock(&hash_lock);
268 	list_replace_init(&chunk->trees, &new->trees);
269 	if (owner->root == chunk) {
270 		list_del_init(&owner->same_root);
271 		owner->root = NULL;
272 	}
273 
274 	for (i = j = 0; j <= size; i++, j++) {
275 		struct audit_tree *s;
276 		if (&chunk->owners[j] == p) {
277 			list_del_init(&p->list);
278 			i--;
279 			continue;
280 		}
281 		s = chunk->owners[j].owner;
282 		new->owners[i].owner = s;
283 		new->owners[i].index = chunk->owners[j].index - j + i;
284 		if (!s) /* result of earlier fallback */
285 			continue;
286 		get_tree(s);
287 		list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
288 	}
289 
290 	list_replace_rcu(&chunk->hash, &new->hash);
291 	list_for_each_entry(owner, &new->trees, same_root)
292 		owner->root = new;
293 	spin_unlock(&hash_lock);
294 	spin_unlock(&entry->lock);
295 	fsnotify_destroy_mark(entry);
296 	fsnotify_put_mark(entry);
297 	goto out;
298 
299 Fallback:
300 	// do the best we can
301 	spin_lock(&hash_lock);
302 	if (owner->root == chunk) {
303 		list_del_init(&owner->same_root);
304 		owner->root = NULL;
305 	}
306 	list_del_init(&p->list);
307 	p->owner = NULL;
308 	put_tree(owner);
309 	spin_unlock(&hash_lock);
310 	spin_unlock(&entry->lock);
311 out:
312 	fsnotify_put_mark(entry);
313 	spin_lock(&hash_lock);
314 }
315 
316 static int create_chunk(struct inode *inode, struct audit_tree *tree)
317 {
318 	struct fsnotify_mark *entry;
319 	struct audit_chunk *chunk = alloc_chunk(1);
320 	if (!chunk)
321 		return -ENOMEM;
322 
323 	entry = &chunk->mark;
324 	if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
325 		free_chunk(chunk);
326 		return -ENOSPC;
327 	}
328 
329 	spin_lock(&entry->lock);
330 	spin_lock(&hash_lock);
331 	if (tree->goner) {
332 		spin_unlock(&hash_lock);
333 		chunk->dead = 1;
334 		spin_unlock(&entry->lock);
335 		fsnotify_destroy_mark(entry);
336 		fsnotify_put_mark(entry);
337 		return 0;
338 	}
339 	chunk->owners[0].index = (1U << 31);
340 	chunk->owners[0].owner = tree;
341 	get_tree(tree);
342 	list_add(&chunk->owners[0].list, &tree->chunks);
343 	if (!tree->root) {
344 		tree->root = chunk;
345 		list_add(&tree->same_root, &chunk->trees);
346 	}
347 	insert_hash(chunk);
348 	spin_unlock(&hash_lock);
349 	spin_unlock(&entry->lock);
350 	return 0;
351 }
352 
353 /* the first tagged inode becomes root of tree */
354 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
355 {
356 	struct fsnotify_mark *old_entry, *chunk_entry;
357 	struct audit_tree *owner;
358 	struct audit_chunk *chunk, *old;
359 	struct node *p;
360 	int n;
361 
362 	old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
363 	if (!old_entry)
364 		return create_chunk(inode, tree);
365 
366 	old = container_of(old_entry, struct audit_chunk, mark);
367 
368 	/* are we already there? */
369 	spin_lock(&hash_lock);
370 	for (n = 0; n < old->count; n++) {
371 		if (old->owners[n].owner == tree) {
372 			spin_unlock(&hash_lock);
373 			fsnotify_put_mark(old_entry);
374 			return 0;
375 		}
376 	}
377 	spin_unlock(&hash_lock);
378 
379 	chunk = alloc_chunk(old->count + 1);
380 	if (!chunk) {
381 		fsnotify_put_mark(old_entry);
382 		return -ENOMEM;
383 	}
384 
385 	chunk_entry = &chunk->mark;
386 
387 	spin_lock(&old_entry->lock);
388 	if (!old_entry->i.inode) {
389 		/* old_entry is being shot, lets just lie */
390 		spin_unlock(&old_entry->lock);
391 		fsnotify_put_mark(old_entry);
392 		free_chunk(chunk);
393 		return -ENOENT;
394 	}
395 
396 	fsnotify_duplicate_mark(chunk_entry, old_entry);
397 	if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
398 		spin_unlock(&old_entry->lock);
399 		free_chunk(chunk);
400 		fsnotify_put_mark(old_entry);
401 		return -ENOSPC;
402 	}
403 
404 	/* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
405 	spin_lock(&chunk_entry->lock);
406 	spin_lock(&hash_lock);
407 
408 	/* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
409 	if (tree->goner) {
410 		spin_unlock(&hash_lock);
411 		chunk->dead = 1;
412 		spin_unlock(&chunk_entry->lock);
413 		spin_unlock(&old_entry->lock);
414 
415 		fsnotify_destroy_mark(chunk_entry);
416 
417 		fsnotify_put_mark(chunk_entry);
418 		fsnotify_put_mark(old_entry);
419 		return 0;
420 	}
421 	list_replace_init(&old->trees, &chunk->trees);
422 	for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
423 		struct audit_tree *s = old->owners[n].owner;
424 		p->owner = s;
425 		p->index = old->owners[n].index;
426 		if (!s) /* result of fallback in untag */
427 			continue;
428 		get_tree(s);
429 		list_replace_init(&old->owners[n].list, &p->list);
430 	}
431 	p->index = (chunk->count - 1) | (1U<<31);
432 	p->owner = tree;
433 	get_tree(tree);
434 	list_add(&p->list, &tree->chunks);
435 	list_replace_rcu(&old->hash, &chunk->hash);
436 	list_for_each_entry(owner, &chunk->trees, same_root)
437 		owner->root = chunk;
438 	old->dead = 1;
439 	if (!tree->root) {
440 		tree->root = chunk;
441 		list_add(&tree->same_root, &chunk->trees);
442 	}
443 	spin_unlock(&hash_lock);
444 	spin_unlock(&chunk_entry->lock);
445 	spin_unlock(&old_entry->lock);
446 	fsnotify_destroy_mark(old_entry);
447 	fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
448 	fsnotify_put_mark(old_entry); /* and kill it */
449 	return 0;
450 }
451 
452 static void kill_rules(struct audit_tree *tree)
453 {
454 	struct audit_krule *rule, *next;
455 	struct audit_entry *entry;
456 	struct audit_buffer *ab;
457 
458 	list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
459 		entry = container_of(rule, struct audit_entry, rule);
460 
461 		list_del_init(&rule->rlist);
462 		if (rule->tree) {
463 			/* not a half-baked one */
464 			ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
465 			audit_log_format(ab, "op=");
466 			audit_log_string(ab, "remove rule");
467 			audit_log_format(ab, " dir=");
468 			audit_log_untrustedstring(ab, rule->tree->pathname);
469 			audit_log_key(ab, rule->filterkey);
470 			audit_log_format(ab, " list=%d res=1", rule->listnr);
471 			audit_log_end(ab);
472 			rule->tree = NULL;
473 			list_del_rcu(&entry->list);
474 			list_del(&entry->rule.list);
475 			call_rcu(&entry->rcu, audit_free_rule_rcu);
476 		}
477 	}
478 }
479 
480 /*
481  * finish killing struct audit_tree
482  */
483 static void prune_one(struct audit_tree *victim)
484 {
485 	spin_lock(&hash_lock);
486 	while (!list_empty(&victim->chunks)) {
487 		struct node *p;
488 
489 		p = list_entry(victim->chunks.next, struct node, list);
490 
491 		untag_chunk(p);
492 	}
493 	spin_unlock(&hash_lock);
494 	put_tree(victim);
495 }
496 
497 /* trim the uncommitted chunks from tree */
498 
499 static void trim_marked(struct audit_tree *tree)
500 {
501 	struct list_head *p, *q;
502 	spin_lock(&hash_lock);
503 	if (tree->goner) {
504 		spin_unlock(&hash_lock);
505 		return;
506 	}
507 	/* reorder */
508 	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
509 		struct node *node = list_entry(p, struct node, list);
510 		q = p->next;
511 		if (node->index & (1U<<31)) {
512 			list_del_init(p);
513 			list_add(p, &tree->chunks);
514 		}
515 	}
516 
517 	while (!list_empty(&tree->chunks)) {
518 		struct node *node;
519 
520 		node = list_entry(tree->chunks.next, struct node, list);
521 
522 		/* have we run out of marked? */
523 		if (!(node->index & (1U<<31)))
524 			break;
525 
526 		untag_chunk(node);
527 	}
528 	if (!tree->root && !tree->goner) {
529 		tree->goner = 1;
530 		spin_unlock(&hash_lock);
531 		mutex_lock(&audit_filter_mutex);
532 		kill_rules(tree);
533 		list_del_init(&tree->list);
534 		mutex_unlock(&audit_filter_mutex);
535 		prune_one(tree);
536 	} else {
537 		spin_unlock(&hash_lock);
538 	}
539 }
540 
541 static void audit_schedule_prune(void);
542 
543 /* called with audit_filter_mutex */
544 int audit_remove_tree_rule(struct audit_krule *rule)
545 {
546 	struct audit_tree *tree;
547 	tree = rule->tree;
548 	if (tree) {
549 		spin_lock(&hash_lock);
550 		list_del_init(&rule->rlist);
551 		if (list_empty(&tree->rules) && !tree->goner) {
552 			tree->root = NULL;
553 			list_del_init(&tree->same_root);
554 			tree->goner = 1;
555 			list_move(&tree->list, &prune_list);
556 			rule->tree = NULL;
557 			spin_unlock(&hash_lock);
558 			audit_schedule_prune();
559 			return 1;
560 		}
561 		rule->tree = NULL;
562 		spin_unlock(&hash_lock);
563 		return 1;
564 	}
565 	return 0;
566 }
567 
568 static int compare_root(struct vfsmount *mnt, void *arg)
569 {
570 	return mnt->mnt_root->d_inode == arg;
571 }
572 
573 void audit_trim_trees(void)
574 {
575 	struct list_head cursor;
576 
577 	mutex_lock(&audit_filter_mutex);
578 	list_add(&cursor, &tree_list);
579 	while (cursor.next != &tree_list) {
580 		struct audit_tree *tree;
581 		struct path path;
582 		struct vfsmount *root_mnt;
583 		struct node *node;
584 		int err;
585 
586 		tree = container_of(cursor.next, struct audit_tree, list);
587 		get_tree(tree);
588 		list_del(&cursor);
589 		list_add(&cursor, &tree->list);
590 		mutex_unlock(&audit_filter_mutex);
591 
592 		err = kern_path(tree->pathname, 0, &path);
593 		if (err)
594 			goto skip_it;
595 
596 		root_mnt = collect_mounts(&path);
597 		path_put(&path);
598 		if (!root_mnt)
599 			goto skip_it;
600 
601 		spin_lock(&hash_lock);
602 		list_for_each_entry(node, &tree->chunks, list) {
603 			struct audit_chunk *chunk = find_chunk(node);
604 			/* this could be NULL if the watch is dying else where... */
605 			struct inode *inode = chunk->mark.i.inode;
606 			node->index |= 1U<<31;
607 			if (iterate_mounts(compare_root, inode, root_mnt))
608 				node->index &= ~(1U<<31);
609 		}
610 		spin_unlock(&hash_lock);
611 		trim_marked(tree);
612 		put_tree(tree);
613 		drop_collected_mounts(root_mnt);
614 skip_it:
615 		mutex_lock(&audit_filter_mutex);
616 	}
617 	list_del(&cursor);
618 	mutex_unlock(&audit_filter_mutex);
619 }
620 
621 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
622 {
623 
624 	if (pathname[0] != '/' ||
625 	    rule->listnr != AUDIT_FILTER_EXIT ||
626 	    op != Audit_equal ||
627 	    rule->inode_f || rule->watch || rule->tree)
628 		return -EINVAL;
629 	rule->tree = alloc_tree(pathname);
630 	if (!rule->tree)
631 		return -ENOMEM;
632 	return 0;
633 }
634 
635 void audit_put_tree(struct audit_tree *tree)
636 {
637 	put_tree(tree);
638 }
639 
640 static int tag_mount(struct vfsmount *mnt, void *arg)
641 {
642 	return tag_chunk(mnt->mnt_root->d_inode, arg);
643 }
644 
645 /* called with audit_filter_mutex */
646 int audit_add_tree_rule(struct audit_krule *rule)
647 {
648 	struct audit_tree *seed = rule->tree, *tree;
649 	struct path path;
650 	struct vfsmount *mnt;
651 	int err;
652 
653 	list_for_each_entry(tree, &tree_list, list) {
654 		if (!strcmp(seed->pathname, tree->pathname)) {
655 			put_tree(seed);
656 			rule->tree = tree;
657 			list_add(&rule->rlist, &tree->rules);
658 			return 0;
659 		}
660 	}
661 	tree = seed;
662 	list_add(&tree->list, &tree_list);
663 	list_add(&rule->rlist, &tree->rules);
664 	/* do not set rule->tree yet */
665 	mutex_unlock(&audit_filter_mutex);
666 
667 	err = kern_path(tree->pathname, 0, &path);
668 	if (err)
669 		goto Err;
670 	mnt = collect_mounts(&path);
671 	path_put(&path);
672 	if (!mnt) {
673 		err = -ENOMEM;
674 		goto Err;
675 	}
676 
677 	get_tree(tree);
678 	err = iterate_mounts(tag_mount, tree, mnt);
679 	drop_collected_mounts(mnt);
680 
681 	if (!err) {
682 		struct node *node;
683 		spin_lock(&hash_lock);
684 		list_for_each_entry(node, &tree->chunks, list)
685 			node->index &= ~(1U<<31);
686 		spin_unlock(&hash_lock);
687 	} else {
688 		trim_marked(tree);
689 		goto Err;
690 	}
691 
692 	mutex_lock(&audit_filter_mutex);
693 	if (list_empty(&rule->rlist)) {
694 		put_tree(tree);
695 		return -ENOENT;
696 	}
697 	rule->tree = tree;
698 	put_tree(tree);
699 
700 	return 0;
701 Err:
702 	mutex_lock(&audit_filter_mutex);
703 	list_del_init(&tree->list);
704 	list_del_init(&tree->rules);
705 	put_tree(tree);
706 	return err;
707 }
708 
709 int audit_tag_tree(char *old, char *new)
710 {
711 	struct list_head cursor, barrier;
712 	int failed = 0;
713 	struct path path1, path2;
714 	struct vfsmount *tagged;
715 	int err;
716 
717 	err = kern_path(new, 0, &path2);
718 	if (err)
719 		return err;
720 	tagged = collect_mounts(&path2);
721 	path_put(&path2);
722 	if (!tagged)
723 		return -ENOMEM;
724 
725 	err = kern_path(old, 0, &path1);
726 	if (err) {
727 		drop_collected_mounts(tagged);
728 		return err;
729 	}
730 
731 	mutex_lock(&audit_filter_mutex);
732 	list_add(&barrier, &tree_list);
733 	list_add(&cursor, &barrier);
734 
735 	while (cursor.next != &tree_list) {
736 		struct audit_tree *tree;
737 		int good_one = 0;
738 
739 		tree = container_of(cursor.next, struct audit_tree, list);
740 		get_tree(tree);
741 		list_del(&cursor);
742 		list_add(&cursor, &tree->list);
743 		mutex_unlock(&audit_filter_mutex);
744 
745 		err = kern_path(tree->pathname, 0, &path2);
746 		if (!err) {
747 			good_one = path_is_under(&path1, &path2);
748 			path_put(&path2);
749 		}
750 
751 		if (!good_one) {
752 			put_tree(tree);
753 			mutex_lock(&audit_filter_mutex);
754 			continue;
755 		}
756 
757 		failed = iterate_mounts(tag_mount, tree, tagged);
758 		if (failed) {
759 			put_tree(tree);
760 			mutex_lock(&audit_filter_mutex);
761 			break;
762 		}
763 
764 		mutex_lock(&audit_filter_mutex);
765 		spin_lock(&hash_lock);
766 		if (!tree->goner) {
767 			list_del(&tree->list);
768 			list_add(&tree->list, &tree_list);
769 		}
770 		spin_unlock(&hash_lock);
771 		put_tree(tree);
772 	}
773 
774 	while (barrier.prev != &tree_list) {
775 		struct audit_tree *tree;
776 
777 		tree = container_of(barrier.prev, struct audit_tree, list);
778 		get_tree(tree);
779 		list_del(&tree->list);
780 		list_add(&tree->list, &barrier);
781 		mutex_unlock(&audit_filter_mutex);
782 
783 		if (!failed) {
784 			struct node *node;
785 			spin_lock(&hash_lock);
786 			list_for_each_entry(node, &tree->chunks, list)
787 				node->index &= ~(1U<<31);
788 			spin_unlock(&hash_lock);
789 		} else {
790 			trim_marked(tree);
791 		}
792 
793 		put_tree(tree);
794 		mutex_lock(&audit_filter_mutex);
795 	}
796 	list_del(&barrier);
797 	list_del(&cursor);
798 	mutex_unlock(&audit_filter_mutex);
799 	path_put(&path1);
800 	drop_collected_mounts(tagged);
801 	return failed;
802 }
803 
804 /*
805  * That gets run when evict_chunk() ends up needing to kill audit_tree.
806  * Runs from a separate thread.
807  */
808 static int prune_tree_thread(void *unused)
809 {
810 	mutex_lock(&audit_cmd_mutex);
811 	mutex_lock(&audit_filter_mutex);
812 
813 	while (!list_empty(&prune_list)) {
814 		struct audit_tree *victim;
815 
816 		victim = list_entry(prune_list.next, struct audit_tree, list);
817 		list_del_init(&victim->list);
818 
819 		mutex_unlock(&audit_filter_mutex);
820 
821 		prune_one(victim);
822 
823 		mutex_lock(&audit_filter_mutex);
824 	}
825 
826 	mutex_unlock(&audit_filter_mutex);
827 	mutex_unlock(&audit_cmd_mutex);
828 	return 0;
829 }
830 
831 static void audit_schedule_prune(void)
832 {
833 	kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
834 }
835 
836 /*
837  * ... and that one is done if evict_chunk() decides to delay until the end
838  * of syscall.  Runs synchronously.
839  */
840 void audit_kill_trees(struct list_head *list)
841 {
842 	mutex_lock(&audit_cmd_mutex);
843 	mutex_lock(&audit_filter_mutex);
844 
845 	while (!list_empty(list)) {
846 		struct audit_tree *victim;
847 
848 		victim = list_entry(list->next, struct audit_tree, list);
849 		kill_rules(victim);
850 		list_del_init(&victim->list);
851 
852 		mutex_unlock(&audit_filter_mutex);
853 
854 		prune_one(victim);
855 
856 		mutex_lock(&audit_filter_mutex);
857 	}
858 
859 	mutex_unlock(&audit_filter_mutex);
860 	mutex_unlock(&audit_cmd_mutex);
861 }
862 
863 /*
864  *  Here comes the stuff asynchronous to auditctl operations
865  */
866 
867 static void evict_chunk(struct audit_chunk *chunk)
868 {
869 	struct audit_tree *owner;
870 	struct list_head *postponed = audit_killed_trees();
871 	int need_prune = 0;
872 	int n;
873 
874 	if (chunk->dead)
875 		return;
876 
877 	chunk->dead = 1;
878 	mutex_lock(&audit_filter_mutex);
879 	spin_lock(&hash_lock);
880 	while (!list_empty(&chunk->trees)) {
881 		owner = list_entry(chunk->trees.next,
882 				   struct audit_tree, same_root);
883 		owner->goner = 1;
884 		owner->root = NULL;
885 		list_del_init(&owner->same_root);
886 		spin_unlock(&hash_lock);
887 		if (!postponed) {
888 			kill_rules(owner);
889 			list_move(&owner->list, &prune_list);
890 			need_prune = 1;
891 		} else {
892 			list_move(&owner->list, postponed);
893 		}
894 		spin_lock(&hash_lock);
895 	}
896 	list_del_rcu(&chunk->hash);
897 	for (n = 0; n < chunk->count; n++)
898 		list_del_init(&chunk->owners[n].list);
899 	spin_unlock(&hash_lock);
900 	if (need_prune)
901 		audit_schedule_prune();
902 	mutex_unlock(&audit_filter_mutex);
903 }
904 
905 static int audit_tree_handle_event(struct fsnotify_group *group,
906 				   struct fsnotify_mark *inode_mark,
907 				   struct fsnotify_mark *vfsmonut_mark,
908 				   struct fsnotify_event *event)
909 {
910 	BUG();
911 	return -EOPNOTSUPP;
912 }
913 
914 static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
915 {
916 	struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
917 
918 	evict_chunk(chunk);
919 	fsnotify_put_mark(entry);
920 }
921 
922 static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
923 				  struct fsnotify_mark *inode_mark,
924 				  struct fsnotify_mark *vfsmount_mark,
925 				  __u32 mask, void *data, int data_type)
926 {
927 	return false;
928 }
929 
930 static const struct fsnotify_ops audit_tree_ops = {
931 	.handle_event = audit_tree_handle_event,
932 	.should_send_event = audit_tree_send_event,
933 	.free_group_priv = NULL,
934 	.free_event_priv = NULL,
935 	.freeing_mark = audit_tree_freeing_mark,
936 };
937 
938 static int __init audit_tree_init(void)
939 {
940 	int i;
941 
942 	audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
943 	if (IS_ERR(audit_tree_group))
944 		audit_panic("cannot initialize fsnotify group for rectree watches");
945 
946 	for (i = 0; i < HASH_SIZE; i++)
947 		INIT_LIST_HEAD(&chunk_hash_heads[i]);
948 
949 	return 0;
950 }
951 __initcall(audit_tree_init);
952