xref: /linux/tools/testing/radix-tree/iteration_check.c (revision fcc8487d477a3452a1d0ccbdd4c5e0e1e3cb8bed)
1 /*
2  * iteration_check.c: test races having to do with radix tree iteration
3  * Copyright (c) 2016 Intel Corporation
4  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  */
15 #include <linux/radix-tree.h>
16 #include <pthread.h>
17 #include "test.h"
18 
19 #define NUM_THREADS	5
20 #define MAX_IDX		100
21 #define TAG		0
22 #define NEW_TAG		1
23 
24 static pthread_mutex_t tree_lock = PTHREAD_MUTEX_INITIALIZER;
25 static pthread_t threads[NUM_THREADS];
26 static unsigned int seeds[3];
27 static RADIX_TREE(tree, GFP_KERNEL);
28 static bool test_complete;
29 static int max_order;
30 
31 /* relentlessly fill the tree with tagged entries */
32 static void *add_entries_fn(void *arg)
33 {
34 	rcu_register_thread();
35 
36 	while (!test_complete) {
37 		unsigned long pgoff;
38 		int order;
39 
40 		for (pgoff = 0; pgoff < MAX_IDX; pgoff++) {
41 			pthread_mutex_lock(&tree_lock);
42 			for (order = max_order; order >= 0; order--) {
43 				if (item_insert_order(&tree, pgoff, order)
44 						== 0) {
45 					item_tag_set(&tree, pgoff, TAG);
46 					break;
47 				}
48 			}
49 			pthread_mutex_unlock(&tree_lock);
50 		}
51 	}
52 
53 	rcu_unregister_thread();
54 
55 	return NULL;
56 }
57 
58 /*
59  * Iterate over the tagged entries, doing a radix_tree_iter_retry() as we find
60  * things that have been removed and randomly resetting our iteration to the
61  * next chunk with radix_tree_iter_resume().  Both radix_tree_iter_retry() and
62  * radix_tree_iter_resume() cause radix_tree_next_slot() to be called with a
63  * NULL 'slot' variable.
64  */
65 static void *tagged_iteration_fn(void *arg)
66 {
67 	struct radix_tree_iter iter;
68 	void **slot;
69 
70 	rcu_register_thread();
71 
72 	while (!test_complete) {
73 		rcu_read_lock();
74 		radix_tree_for_each_tagged(slot, &tree, &iter, 0, TAG) {
75 			void *entry = radix_tree_deref_slot(slot);
76 			if (unlikely(!entry))
77 				continue;
78 
79 			if (radix_tree_deref_retry(entry)) {
80 				slot = radix_tree_iter_retry(&iter);
81 				continue;
82 			}
83 
84 			if (rand_r(&seeds[0]) % 50 == 0) {
85 				slot = radix_tree_iter_resume(slot, &iter);
86 				rcu_read_unlock();
87 				rcu_barrier();
88 				rcu_read_lock();
89 			}
90 		}
91 		rcu_read_unlock();
92 	}
93 
94 	rcu_unregister_thread();
95 
96 	return NULL;
97 }
98 
99 /*
100  * Iterate over the entries, doing a radix_tree_iter_retry() as we find things
101  * that have been removed and randomly resetting our iteration to the next
102  * chunk with radix_tree_iter_resume().  Both radix_tree_iter_retry() and
103  * radix_tree_iter_resume() cause radix_tree_next_slot() to be called with a
104  * NULL 'slot' variable.
105  */
106 static void *untagged_iteration_fn(void *arg)
107 {
108 	struct radix_tree_iter iter;
109 	void **slot;
110 
111 	rcu_register_thread();
112 
113 	while (!test_complete) {
114 		rcu_read_lock();
115 		radix_tree_for_each_slot(slot, &tree, &iter, 0) {
116 			void *entry = radix_tree_deref_slot(slot);
117 			if (unlikely(!entry))
118 				continue;
119 
120 			if (radix_tree_deref_retry(entry)) {
121 				slot = radix_tree_iter_retry(&iter);
122 				continue;
123 			}
124 
125 			if (rand_r(&seeds[1]) % 50 == 0) {
126 				slot = radix_tree_iter_resume(slot, &iter);
127 				rcu_read_unlock();
128 				rcu_barrier();
129 				rcu_read_lock();
130 			}
131 		}
132 		rcu_read_unlock();
133 	}
134 
135 	rcu_unregister_thread();
136 
137 	return NULL;
138 }
139 
140 /*
141  * Randomly remove entries to help induce radix_tree_iter_retry() calls in the
142  * two iteration functions.
143  */
144 static void *remove_entries_fn(void *arg)
145 {
146 	rcu_register_thread();
147 
148 	while (!test_complete) {
149 		int pgoff;
150 
151 		pgoff = rand_r(&seeds[2]) % MAX_IDX;
152 
153 		pthread_mutex_lock(&tree_lock);
154 		item_delete(&tree, pgoff);
155 		pthread_mutex_unlock(&tree_lock);
156 	}
157 
158 	rcu_unregister_thread();
159 
160 	return NULL;
161 }
162 
163 static void *tag_entries_fn(void *arg)
164 {
165 	rcu_register_thread();
166 
167 	while (!test_complete) {
168 		tag_tagged_items(&tree, &tree_lock, 0, MAX_IDX, 10, TAG,
169 					NEW_TAG);
170 	}
171 	rcu_unregister_thread();
172 	return NULL;
173 }
174 
175 /* This is a unit test for a bug found by the syzkaller tester */
176 void iteration_test(unsigned order, unsigned test_duration)
177 {
178 	int i;
179 
180 	printv(1, "Running %siteration tests for %d seconds\n",
181 			order > 0 ? "multiorder " : "", test_duration);
182 
183 	max_order = order;
184 	test_complete = false;
185 
186 	for (i = 0; i < 3; i++)
187 		seeds[i] = rand();
188 
189 	if (pthread_create(&threads[0], NULL, tagged_iteration_fn, NULL)) {
190 		perror("create tagged iteration thread");
191 		exit(1);
192 	}
193 	if (pthread_create(&threads[1], NULL, untagged_iteration_fn, NULL)) {
194 		perror("create untagged iteration thread");
195 		exit(1);
196 	}
197 	if (pthread_create(&threads[2], NULL, add_entries_fn, NULL)) {
198 		perror("create add entry thread");
199 		exit(1);
200 	}
201 	if (pthread_create(&threads[3], NULL, remove_entries_fn, NULL)) {
202 		perror("create remove entry thread");
203 		exit(1);
204 	}
205 	if (pthread_create(&threads[4], NULL, tag_entries_fn, NULL)) {
206 		perror("create tag entry thread");
207 		exit(1);
208 	}
209 
210 	sleep(test_duration);
211 	test_complete = true;
212 
213 	for (i = 0; i < NUM_THREADS; i++) {
214 		if (pthread_join(threads[i], NULL)) {
215 			perror("pthread_join");
216 			exit(1);
217 		}
218 	}
219 
220 	item_kill_tree(&tree);
221 }
222