xref: /linux/block/blk-stat.c (revision 3ad0876554cafa368f574d4d408468510543e9ff)
1 /*
2  * Block stat tracking code
3  *
4  * Copyright (C) 2016 Jens Axboe
5  */
6 #include <linux/kernel.h>
7 #include <linux/rculist.h>
8 #include <linux/blk-mq.h>
9 
10 #include "blk-stat.h"
11 #include "blk-mq.h"
12 #include "blk.h"
13 
14 struct blk_queue_stats {
15 	struct list_head callbacks;
16 	spinlock_t lock;
17 	bool enable_accounting;
18 };
19 
20 static void blk_stat_init(struct blk_rq_stat *stat)
21 {
22 	stat->min = -1ULL;
23 	stat->max = stat->nr_samples = stat->mean = 0;
24 	stat->batch = 0;
25 }
26 
27 /* src is a per-cpu stat, mean isn't initialized */
28 static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
29 {
30 	if (!src->nr_samples)
31 		return;
32 
33 	dst->min = min(dst->min, src->min);
34 	dst->max = max(dst->max, src->max);
35 
36 	dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
37 				dst->nr_samples + src->nr_samples);
38 
39 	dst->nr_samples += src->nr_samples;
40 }
41 
42 static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
43 {
44 	stat->min = min(stat->min, value);
45 	stat->max = max(stat->max, value);
46 	stat->batch += value;
47 	stat->nr_samples++;
48 }
49 
50 void blk_stat_add(struct request *rq)
51 {
52 	struct request_queue *q = rq->q;
53 	struct blk_stat_callback *cb;
54 	struct blk_rq_stat *stat;
55 	int bucket;
56 	u64 now, value;
57 
58 	now = __blk_stat_time(ktime_to_ns(ktime_get()));
59 	if (now < blk_stat_time(&rq->issue_stat))
60 		return;
61 
62 	value = now - blk_stat_time(&rq->issue_stat);
63 
64 	blk_throtl_stat_add(rq, value);
65 
66 	rcu_read_lock();
67 	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
68 		if (!blk_stat_is_active(cb))
69 			continue;
70 
71 		bucket = cb->bucket_fn(rq);
72 		if (bucket < 0)
73 			continue;
74 
75 		stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
76 		__blk_stat_add(stat, value);
77 		put_cpu_ptr(cb->cpu_stat);
78 	}
79 	rcu_read_unlock();
80 }
81 
82 static void blk_stat_timer_fn(struct timer_list *t)
83 {
84 	struct blk_stat_callback *cb = from_timer(cb, t, timer);
85 	unsigned int bucket;
86 	int cpu;
87 
88 	for (bucket = 0; bucket < cb->buckets; bucket++)
89 		blk_stat_init(&cb->stat[bucket]);
90 
91 	for_each_online_cpu(cpu) {
92 		struct blk_rq_stat *cpu_stat;
93 
94 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
95 		for (bucket = 0; bucket < cb->buckets; bucket++) {
96 			blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
97 			blk_stat_init(&cpu_stat[bucket]);
98 		}
99 	}
100 
101 	cb->timer_fn(cb);
102 }
103 
104 struct blk_stat_callback *
105 blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
106 			int (*bucket_fn)(const struct request *),
107 			unsigned int buckets, void *data)
108 {
109 	struct blk_stat_callback *cb;
110 
111 	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
112 	if (!cb)
113 		return NULL;
114 
115 	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
116 				 GFP_KERNEL);
117 	if (!cb->stat) {
118 		kfree(cb);
119 		return NULL;
120 	}
121 	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
122 				      __alignof__(struct blk_rq_stat));
123 	if (!cb->cpu_stat) {
124 		kfree(cb->stat);
125 		kfree(cb);
126 		return NULL;
127 	}
128 
129 	cb->timer_fn = timer_fn;
130 	cb->bucket_fn = bucket_fn;
131 	cb->data = data;
132 	cb->buckets = buckets;
133 	timer_setup(&cb->timer, blk_stat_timer_fn, 0);
134 
135 	return cb;
136 }
137 EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
138 
139 void blk_stat_add_callback(struct request_queue *q,
140 			   struct blk_stat_callback *cb)
141 {
142 	unsigned int bucket;
143 	int cpu;
144 
145 	for_each_possible_cpu(cpu) {
146 		struct blk_rq_stat *cpu_stat;
147 
148 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
149 		for (bucket = 0; bucket < cb->buckets; bucket++)
150 			blk_stat_init(&cpu_stat[bucket]);
151 	}
152 
153 	spin_lock(&q->stats->lock);
154 	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
155 	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
156 	spin_unlock(&q->stats->lock);
157 }
158 EXPORT_SYMBOL_GPL(blk_stat_add_callback);
159 
160 void blk_stat_remove_callback(struct request_queue *q,
161 			      struct blk_stat_callback *cb)
162 {
163 	spin_lock(&q->stats->lock);
164 	list_del_rcu(&cb->list);
165 	if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
166 		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
167 	spin_unlock(&q->stats->lock);
168 
169 	del_timer_sync(&cb->timer);
170 }
171 EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
172 
173 static void blk_stat_free_callback_rcu(struct rcu_head *head)
174 {
175 	struct blk_stat_callback *cb;
176 
177 	cb = container_of(head, struct blk_stat_callback, rcu);
178 	free_percpu(cb->cpu_stat);
179 	kfree(cb->stat);
180 	kfree(cb);
181 }
182 
183 void blk_stat_free_callback(struct blk_stat_callback *cb)
184 {
185 	if (cb)
186 		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
187 }
188 EXPORT_SYMBOL_GPL(blk_stat_free_callback);
189 
190 void blk_stat_enable_accounting(struct request_queue *q)
191 {
192 	spin_lock(&q->stats->lock);
193 	q->stats->enable_accounting = true;
194 	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
195 	spin_unlock(&q->stats->lock);
196 }
197 
198 struct blk_queue_stats *blk_alloc_queue_stats(void)
199 {
200 	struct blk_queue_stats *stats;
201 
202 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
203 	if (!stats)
204 		return NULL;
205 
206 	INIT_LIST_HEAD(&stats->callbacks);
207 	spin_lock_init(&stats->lock);
208 	stats->enable_accounting = false;
209 
210 	return stats;
211 }
212 
213 void blk_free_queue_stats(struct blk_queue_stats *stats)
214 {
215 	if (!stats)
216 		return;
217 
218 	WARN_ON(!list_empty(&stats->callbacks));
219 
220 	kfree(stats);
221 }
222