xref: /linux/drivers/crypto/intel/iaa/iaa_crypto_stats.c (revision eeb9f5c2dcec90009d7cf12e780e7f9631993fc5)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
3 
4 #include <linux/module.h>
5 #include <linux/kernel.h>
6 #include <linux/highmem.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/delay.h>
10 #include <linux/smp.h>
11 #include <uapi/linux/idxd.h>
12 #include <linux/idxd.h>
13 #include <linux/dmaengine.h>
14 #include "../../dma/idxd/idxd.h"
15 #include <linux/debugfs.h>
16 #include <crypto/internal/acompress.h>
17 #include "iaa_crypto.h"
18 #include "iaa_crypto_stats.h"
19 
20 static u64 total_comp_calls;
21 static u64 total_decomp_calls;
22 static u64 total_sw_decomp_calls;
23 static u64 max_comp_delay_ns;
24 static u64 max_decomp_delay_ns;
25 static u64 max_acomp_delay_ns;
26 static u64 max_adecomp_delay_ns;
27 static u64 total_comp_bytes_out;
28 static u64 total_decomp_bytes_in;
29 static u64 total_completion_einval_errors;
30 static u64 total_completion_timeout_errors;
31 static u64 total_completion_comp_buf_overflow_errors;
32 
33 static struct dentry *iaa_crypto_debugfs_root;
34 
35 void update_total_comp_calls(void)
36 {
37 	total_comp_calls++;
38 }
39 
40 void update_total_comp_bytes_out(int n)
41 {
42 	total_comp_bytes_out += n;
43 }
44 
45 void update_total_decomp_calls(void)
46 {
47 	total_decomp_calls++;
48 }
49 
50 void update_total_sw_decomp_calls(void)
51 {
52 	total_sw_decomp_calls++;
53 }
54 
55 void update_total_decomp_bytes_in(int n)
56 {
57 	total_decomp_bytes_in += n;
58 }
59 
60 void update_completion_einval_errs(void)
61 {
62 	total_completion_einval_errors++;
63 }
64 
65 void update_completion_timeout_errs(void)
66 {
67 	total_completion_timeout_errors++;
68 }
69 
70 void update_completion_comp_buf_overflow_errs(void)
71 {
72 	total_completion_comp_buf_overflow_errors++;
73 }
74 
75 void update_max_comp_delay_ns(u64 start_time_ns)
76 {
77 	u64 time_diff;
78 
79 	time_diff = ktime_get_ns() - start_time_ns;
80 
81 	if (time_diff > max_comp_delay_ns)
82 		max_comp_delay_ns = time_diff;
83 }
84 
85 void update_max_decomp_delay_ns(u64 start_time_ns)
86 {
87 	u64 time_diff;
88 
89 	time_diff = ktime_get_ns() - start_time_ns;
90 
91 	if (time_diff > max_decomp_delay_ns)
92 		max_decomp_delay_ns = time_diff;
93 }
94 
95 void update_max_acomp_delay_ns(u64 start_time_ns)
96 {
97 	u64 time_diff;
98 
99 	time_diff = ktime_get_ns() - start_time_ns;
100 
101 	if (time_diff > max_acomp_delay_ns)
102 		max_acomp_delay_ns = time_diff;
103 }
104 
105 void update_max_adecomp_delay_ns(u64 start_time_ns)
106 {
107 	u64 time_diff;
108 
109 	time_diff = ktime_get_ns() - start_time_ns;
110 
111 	if (time_diff > max_adecomp_delay_ns)
112 		max_adecomp_delay_ns = time_diff;
113 }
114 
115 void update_wq_comp_calls(struct idxd_wq *idxd_wq)
116 {
117 	struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
118 
119 	wq->comp_calls++;
120 	wq->iaa_device->comp_calls++;
121 }
122 
123 void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n)
124 {
125 	struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
126 
127 	wq->comp_bytes += n;
128 	wq->iaa_device->comp_bytes += n;
129 }
130 
131 void update_wq_decomp_calls(struct idxd_wq *idxd_wq)
132 {
133 	struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
134 
135 	wq->decomp_calls++;
136 	wq->iaa_device->decomp_calls++;
137 }
138 
139 void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n)
140 {
141 	struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
142 
143 	wq->decomp_bytes += n;
144 	wq->iaa_device->decomp_bytes += n;
145 }
146 
147 static void reset_iaa_crypto_stats(void)
148 {
149 	total_comp_calls = 0;
150 	total_decomp_calls = 0;
151 	total_sw_decomp_calls = 0;
152 	max_comp_delay_ns = 0;
153 	max_decomp_delay_ns = 0;
154 	max_acomp_delay_ns = 0;
155 	max_adecomp_delay_ns = 0;
156 	total_comp_bytes_out = 0;
157 	total_decomp_bytes_in = 0;
158 	total_completion_einval_errors = 0;
159 	total_completion_timeout_errors = 0;
160 	total_completion_comp_buf_overflow_errors = 0;
161 }
162 
163 static void reset_wq_stats(struct iaa_wq *wq)
164 {
165 	wq->comp_calls = 0;
166 	wq->comp_bytes = 0;
167 	wq->decomp_calls = 0;
168 	wq->decomp_bytes = 0;
169 }
170 
171 static void reset_device_stats(struct iaa_device *iaa_device)
172 {
173 	struct iaa_wq *iaa_wq;
174 
175 	iaa_device->comp_calls = 0;
176 	iaa_device->comp_bytes = 0;
177 	iaa_device->decomp_calls = 0;
178 	iaa_device->decomp_bytes = 0;
179 
180 	list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
181 		reset_wq_stats(iaa_wq);
182 }
183 
184 static void wq_show(struct seq_file *m, struct iaa_wq *iaa_wq)
185 {
186 	seq_printf(m, "    name: %s\n", iaa_wq->wq->name);
187 	seq_printf(m, "    comp_calls: %llu\n", iaa_wq->comp_calls);
188 	seq_printf(m, "    comp_bytes: %llu\n", iaa_wq->comp_bytes);
189 	seq_printf(m, "    decomp_calls: %llu\n", iaa_wq->decomp_calls);
190 	seq_printf(m, "    decomp_bytes: %llu\n\n", iaa_wq->decomp_bytes);
191 }
192 
193 static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device)
194 {
195 	struct iaa_wq *iaa_wq;
196 
197 	seq_puts(m, "iaa device:\n");
198 	seq_printf(m, "  id: %d\n", iaa_device->idxd->id);
199 	seq_printf(m, "  n_wqs: %d\n", iaa_device->n_wq);
200 	seq_printf(m, "  comp_calls: %llu\n", iaa_device->comp_calls);
201 	seq_printf(m, "  comp_bytes: %llu\n", iaa_device->comp_bytes);
202 	seq_printf(m, "  decomp_calls: %llu\n", iaa_device->decomp_calls);
203 	seq_printf(m, "  decomp_bytes: %llu\n", iaa_device->decomp_bytes);
204 	seq_puts(m, "  wqs:\n");
205 
206 	list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
207 		wq_show(m, iaa_wq);
208 }
209 
210 static void global_stats_show(struct seq_file *m)
211 {
212 	seq_puts(m, "global stats:\n");
213 	seq_printf(m, "  total_comp_calls: %llu\n", total_comp_calls);
214 	seq_printf(m, "  total_decomp_calls: %llu\n", total_decomp_calls);
215 	seq_printf(m, "  total_sw_decomp_calls: %llu\n", total_sw_decomp_calls);
216 	seq_printf(m, "  total_comp_bytes_out: %llu\n", total_comp_bytes_out);
217 	seq_printf(m, "  total_decomp_bytes_in: %llu\n", total_decomp_bytes_in);
218 	seq_printf(m, "  total_completion_einval_errors: %llu\n",
219 		   total_completion_einval_errors);
220 	seq_printf(m, "  total_completion_timeout_errors: %llu\n",
221 		   total_completion_timeout_errors);
222 	seq_printf(m, "  total_completion_comp_buf_overflow_errors: %llu\n\n",
223 		   total_completion_comp_buf_overflow_errors);
224 }
225 
226 static int wq_stats_show(struct seq_file *m, void *v)
227 {
228 	struct iaa_device *iaa_device;
229 
230 	mutex_lock(&iaa_devices_lock);
231 
232 	global_stats_show(m);
233 
234 	list_for_each_entry(iaa_device, &iaa_devices, list)
235 		device_stats_show(m, iaa_device);
236 
237 	mutex_unlock(&iaa_devices_lock);
238 
239 	return 0;
240 }
241 
242 static int iaa_crypto_stats_reset(void *data, u64 value)
243 {
244 	struct iaa_device *iaa_device;
245 
246 	reset_iaa_crypto_stats();
247 
248 	mutex_lock(&iaa_devices_lock);
249 
250 	list_for_each_entry(iaa_device, &iaa_devices, list)
251 		reset_device_stats(iaa_device);
252 
253 	mutex_unlock(&iaa_devices_lock);
254 
255 	return 0;
256 }
257 
258 static int wq_stats_open(struct inode *inode, struct file *file)
259 {
260 	return single_open(file, wq_stats_show, file);
261 }
262 
263 static const struct file_operations wq_stats_fops = {
264 	.open = wq_stats_open,
265 	.read = seq_read,
266 	.llseek = seq_lseek,
267 	.release = single_release,
268 };
269 
270 DEFINE_DEBUGFS_ATTRIBUTE(wq_stats_reset_fops, NULL, iaa_crypto_stats_reset, "%llu\n");
271 
272 int __init iaa_crypto_debugfs_init(void)
273 {
274 	if (!debugfs_initialized())
275 		return -ENODEV;
276 
277 	iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL);
278 	if (!iaa_crypto_debugfs_root)
279 		return -ENOMEM;
280 
281 	debugfs_create_u64("max_comp_delay_ns", 0644,
282 			   iaa_crypto_debugfs_root, &max_comp_delay_ns);
283 	debugfs_create_u64("max_decomp_delay_ns", 0644,
284 			   iaa_crypto_debugfs_root, &max_decomp_delay_ns);
285 	debugfs_create_u64("max_acomp_delay_ns", 0644,
286 			   iaa_crypto_debugfs_root, &max_comp_delay_ns);
287 	debugfs_create_u64("max_adecomp_delay_ns", 0644,
288 			   iaa_crypto_debugfs_root, &max_decomp_delay_ns);
289 	debugfs_create_u64("total_comp_calls", 0644,
290 			   iaa_crypto_debugfs_root, &total_comp_calls);
291 	debugfs_create_u64("total_decomp_calls", 0644,
292 			   iaa_crypto_debugfs_root, &total_decomp_calls);
293 	debugfs_create_u64("total_sw_decomp_calls", 0644,
294 			   iaa_crypto_debugfs_root, &total_sw_decomp_calls);
295 	debugfs_create_u64("total_comp_bytes_out", 0644,
296 			   iaa_crypto_debugfs_root, &total_comp_bytes_out);
297 	debugfs_create_u64("total_decomp_bytes_in", 0644,
298 			   iaa_crypto_debugfs_root, &total_decomp_bytes_in);
299 	debugfs_create_file("wq_stats", 0644, iaa_crypto_debugfs_root, NULL,
300 			    &wq_stats_fops);
301 	debugfs_create_file("stats_reset", 0644, iaa_crypto_debugfs_root, NULL,
302 			    &wq_stats_reset_fops);
303 
304 	return 0;
305 }
306 
307 void __exit iaa_crypto_debugfs_cleanup(void)
308 {
309 	debugfs_remove_recursive(iaa_crypto_debugfs_root);
310 }
311 
312 MODULE_LICENSE("GPL");
313