xref: /linux/arch/x86/events/amd/iommu.c (revision fcc8487d477a3452a1d0ccbdd4c5e0e1e3cb8bed)
1 /*
2  * Copyright (C) 2013 Advanced Micro Devices, Inc.
3  *
4  * Author: Steven Kinney <Steven.Kinney@amd.com>
5  * Author: Suravee Suthikulpanit <Suraveee.Suthikulpanit@amd.com>
6  *
7  * Perf: amd_iommu - AMD IOMMU Performance Counter PMU implementation
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #define pr_fmt(fmt)	"perf/amd_iommu: " fmt
15 
16 #include <linux/perf_event.h>
17 #include <linux/init.h>
18 #include <linux/cpumask.h>
19 #include <linux/slab.h>
20 
21 #include "../perf_event.h"
22 #include "iommu.h"
23 
24 #define COUNTER_SHIFT		16
25 
26 /* iommu pmu conf masks */
27 #define GET_CSOURCE(x)     ((x)->conf & 0xFFULL)
28 #define GET_DEVID(x)       (((x)->conf >> 8)  & 0xFFFFULL)
29 #define GET_DOMID(x)       (((x)->conf >> 24) & 0xFFFFULL)
30 #define GET_PASID(x)       (((x)->conf >> 40) & 0xFFFFFULL)
31 
32 /* iommu pmu conf1 masks */
33 #define GET_DEVID_MASK(x)  ((x)->conf1  & 0xFFFFULL)
34 #define GET_DOMID_MASK(x)  (((x)->conf1 >> 16) & 0xFFFFULL)
35 #define GET_PASID_MASK(x)  (((x)->conf1 >> 32) & 0xFFFFFULL)
36 
37 #define IOMMU_NAME_SIZE 16
38 
39 struct perf_amd_iommu {
40 	struct list_head list;
41 	struct pmu pmu;
42 	struct amd_iommu *iommu;
43 	char name[IOMMU_NAME_SIZE];
44 	u8 max_banks;
45 	u8 max_counters;
46 	u64 cntr_assign_mask;
47 	raw_spinlock_t lock;
48 };
49 
50 static LIST_HEAD(perf_amd_iommu_list);
51 
52 /*---------------------------------------------
53  * sysfs format attributes
54  *---------------------------------------------*/
55 PMU_FORMAT_ATTR(csource,    "config:0-7");
56 PMU_FORMAT_ATTR(devid,      "config:8-23");
57 PMU_FORMAT_ATTR(domid,      "config:24-39");
58 PMU_FORMAT_ATTR(pasid,      "config:40-59");
59 PMU_FORMAT_ATTR(devid_mask, "config1:0-15");
60 PMU_FORMAT_ATTR(domid_mask, "config1:16-31");
61 PMU_FORMAT_ATTR(pasid_mask, "config1:32-51");
62 
63 static struct attribute *iommu_format_attrs[] = {
64 	&format_attr_csource.attr,
65 	&format_attr_devid.attr,
66 	&format_attr_pasid.attr,
67 	&format_attr_domid.attr,
68 	&format_attr_devid_mask.attr,
69 	&format_attr_pasid_mask.attr,
70 	&format_attr_domid_mask.attr,
71 	NULL,
72 };
73 
74 static struct attribute_group amd_iommu_format_group = {
75 	.name = "format",
76 	.attrs = iommu_format_attrs,
77 };
78 
79 /*---------------------------------------------
80  * sysfs events attributes
81  *---------------------------------------------*/
82 static struct attribute_group amd_iommu_events_group = {
83 	.name = "events",
84 };
85 
86 struct amd_iommu_event_desc {
87 	struct kobj_attribute attr;
88 	const char *event;
89 };
90 
91 static ssize_t _iommu_event_show(struct kobject *kobj,
92 				struct kobj_attribute *attr, char *buf)
93 {
94 	struct amd_iommu_event_desc *event =
95 		container_of(attr, struct amd_iommu_event_desc, attr);
96 	return sprintf(buf, "%s\n", event->event);
97 }
98 
99 #define AMD_IOMMU_EVENT_DESC(_name, _event)			\
100 {								\
101 	.attr  = __ATTR(_name, 0444, _iommu_event_show, NULL),	\
102 	.event = _event,					\
103 }
104 
105 static struct amd_iommu_event_desc amd_iommu_v2_event_descs[] = {
106 	AMD_IOMMU_EVENT_DESC(mem_pass_untrans,        "csource=0x01"),
107 	AMD_IOMMU_EVENT_DESC(mem_pass_pretrans,       "csource=0x02"),
108 	AMD_IOMMU_EVENT_DESC(mem_pass_excl,           "csource=0x03"),
109 	AMD_IOMMU_EVENT_DESC(mem_target_abort,        "csource=0x04"),
110 	AMD_IOMMU_EVENT_DESC(mem_trans_total,         "csource=0x05"),
111 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_hit,   "csource=0x06"),
112 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_mis,   "csource=0x07"),
113 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_hit,   "csource=0x08"),
114 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_mis,   "csource=0x09"),
115 	AMD_IOMMU_EVENT_DESC(mem_dte_hit,             "csource=0x0a"),
116 	AMD_IOMMU_EVENT_DESC(mem_dte_mis,             "csource=0x0b"),
117 	AMD_IOMMU_EVENT_DESC(page_tbl_read_tot,       "csource=0x0c"),
118 	AMD_IOMMU_EVENT_DESC(page_tbl_read_nst,       "csource=0x0d"),
119 	AMD_IOMMU_EVENT_DESC(page_tbl_read_gst,       "csource=0x0e"),
120 	AMD_IOMMU_EVENT_DESC(int_dte_hit,             "csource=0x0f"),
121 	AMD_IOMMU_EVENT_DESC(int_dte_mis,             "csource=0x10"),
122 	AMD_IOMMU_EVENT_DESC(cmd_processed,           "csource=0x11"),
123 	AMD_IOMMU_EVENT_DESC(cmd_processed_inv,       "csource=0x12"),
124 	AMD_IOMMU_EVENT_DESC(tlb_inv,                 "csource=0x13"),
125 	AMD_IOMMU_EVENT_DESC(ign_rd_wr_mmio_1ff8h,    "csource=0x14"),
126 	AMD_IOMMU_EVENT_DESC(vapic_int_non_guest,     "csource=0x15"),
127 	AMD_IOMMU_EVENT_DESC(vapic_int_guest,         "csource=0x16"),
128 	AMD_IOMMU_EVENT_DESC(smi_recv,                "csource=0x17"),
129 	AMD_IOMMU_EVENT_DESC(smi_blk,                 "csource=0x18"),
130 	{ /* end: all zeroes */ },
131 };
132 
133 /*---------------------------------------------
134  * sysfs cpumask attributes
135  *---------------------------------------------*/
136 static cpumask_t iommu_cpumask;
137 
138 static ssize_t _iommu_cpumask_show(struct device *dev,
139 				   struct device_attribute *attr,
140 				   char *buf)
141 {
142 	return cpumap_print_to_pagebuf(true, buf, &iommu_cpumask);
143 }
144 static DEVICE_ATTR(cpumask, S_IRUGO, _iommu_cpumask_show, NULL);
145 
146 static struct attribute *iommu_cpumask_attrs[] = {
147 	&dev_attr_cpumask.attr,
148 	NULL,
149 };
150 
151 static struct attribute_group amd_iommu_cpumask_group = {
152 	.attrs = iommu_cpumask_attrs,
153 };
154 
155 /*---------------------------------------------*/
156 
157 static int get_next_avail_iommu_bnk_cntr(struct perf_event *event)
158 {
159 	struct perf_amd_iommu *piommu = container_of(event->pmu, struct perf_amd_iommu, pmu);
160 	int max_cntrs = piommu->max_counters;
161 	int max_banks = piommu->max_banks;
162 	u32 shift, bank, cntr;
163 	unsigned long flags;
164 	int retval;
165 
166 	raw_spin_lock_irqsave(&piommu->lock, flags);
167 
168 	for (bank = 0, shift = 0; bank < max_banks; bank++) {
169 		for (cntr = 0; cntr < max_cntrs; cntr++) {
170 			shift = bank + (bank*3) + cntr;
171 			if (piommu->cntr_assign_mask & BIT_ULL(shift)) {
172 				continue;
173 			} else {
174 				piommu->cntr_assign_mask |= BIT_ULL(shift);
175 				event->hw.iommu_bank = bank;
176 				event->hw.iommu_cntr = cntr;
177 				retval = 0;
178 				goto out;
179 			}
180 		}
181 	}
182 	retval = -ENOSPC;
183 out:
184 	raw_spin_unlock_irqrestore(&piommu->lock, flags);
185 	return retval;
186 }
187 
188 static int clear_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu,
189 					u8 bank, u8 cntr)
190 {
191 	unsigned long flags;
192 	int max_banks, max_cntrs;
193 	int shift = 0;
194 
195 	max_banks = perf_iommu->max_banks;
196 	max_cntrs = perf_iommu->max_counters;
197 
198 	if ((bank > max_banks) || (cntr > max_cntrs))
199 		return -EINVAL;
200 
201 	shift = bank + cntr + (bank*3);
202 
203 	raw_spin_lock_irqsave(&perf_iommu->lock, flags);
204 	perf_iommu->cntr_assign_mask &= ~(1ULL<<shift);
205 	raw_spin_unlock_irqrestore(&perf_iommu->lock, flags);
206 
207 	return 0;
208 }
209 
210 static int perf_iommu_event_init(struct perf_event *event)
211 {
212 	struct hw_perf_event *hwc = &event->hw;
213 
214 	/* test the event attr type check for PMU enumeration */
215 	if (event->attr.type != event->pmu->type)
216 		return -ENOENT;
217 
218 	/*
219 	 * IOMMU counters are shared across all cores.
220 	 * Therefore, it does not support per-process mode.
221 	 * Also, it does not support event sampling mode.
222 	 */
223 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
224 		return -EINVAL;
225 
226 	/* IOMMU counters do not have usr/os/guest/host bits */
227 	if (event->attr.exclude_user || event->attr.exclude_kernel ||
228 	    event->attr.exclude_host || event->attr.exclude_guest)
229 		return -EINVAL;
230 
231 	if (event->cpu < 0)
232 		return -EINVAL;
233 
234 	/* update the hw_perf_event struct with the iommu config data */
235 	hwc->conf  = event->attr.config;
236 	hwc->conf1 = event->attr.config1;
237 
238 	return 0;
239 }
240 
241 static inline struct amd_iommu *perf_event_2_iommu(struct perf_event *ev)
242 {
243 	return (container_of(ev->pmu, struct perf_amd_iommu, pmu))->iommu;
244 }
245 
246 static void perf_iommu_enable_event(struct perf_event *ev)
247 {
248 	struct amd_iommu *iommu = perf_event_2_iommu(ev);
249 	struct hw_perf_event *hwc = &ev->hw;
250 	u8 bank = hwc->iommu_bank;
251 	u8 cntr = hwc->iommu_cntr;
252 	u64 reg = 0ULL;
253 
254 	reg = GET_CSOURCE(hwc);
255 	amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_COUNTER_SRC_REG, &reg);
256 
257 	reg = GET_DEVID_MASK(hwc);
258 	reg = GET_DEVID(hwc) | (reg << 32);
259 	if (reg)
260 		reg |= BIT(31);
261 	amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DEVID_MATCH_REG, &reg);
262 
263 	reg = GET_PASID_MASK(hwc);
264 	reg = GET_PASID(hwc) | (reg << 32);
265 	if (reg)
266 		reg |= BIT(31);
267 	amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_PASID_MATCH_REG, &reg);
268 
269 	reg = GET_DOMID_MASK(hwc);
270 	reg = GET_DOMID(hwc) | (reg << 32);
271 	if (reg)
272 		reg |= BIT(31);
273 	amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DOMID_MATCH_REG, &reg);
274 }
275 
276 static void perf_iommu_disable_event(struct perf_event *event)
277 {
278 	struct amd_iommu *iommu = perf_event_2_iommu(event);
279 	struct hw_perf_event *hwc = &event->hw;
280 	u64 reg = 0ULL;
281 
282 	amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
283 			     IOMMU_PC_COUNTER_SRC_REG, &reg);
284 }
285 
286 static void perf_iommu_start(struct perf_event *event, int flags)
287 {
288 	struct hw_perf_event *hwc = &event->hw;
289 
290 	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
291 		return;
292 
293 	WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
294 	hwc->state = 0;
295 
296 	if (flags & PERF_EF_RELOAD) {
297 		u64 prev_raw_count = local64_read(&hwc->prev_count);
298 		struct amd_iommu *iommu = perf_event_2_iommu(event);
299 
300 		amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
301 				     IOMMU_PC_COUNTER_REG, &prev_raw_count);
302 	}
303 
304 	perf_iommu_enable_event(event);
305 	perf_event_update_userpage(event);
306 
307 }
308 
309 static void perf_iommu_read(struct perf_event *event)
310 {
311 	u64 count, prev, delta;
312 	struct hw_perf_event *hwc = &event->hw;
313 	struct amd_iommu *iommu = perf_event_2_iommu(event);
314 
315 	if (amd_iommu_pc_get_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
316 				 IOMMU_PC_COUNTER_REG, &count))
317 		return;
318 
319 	/* IOMMU pc counter register is only 48 bits */
320 	count &= GENMASK_ULL(47, 0);
321 
322 	prev = local64_read(&hwc->prev_count);
323 	if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev)
324 		return;
325 
326 	/* Handle 48-bit counter overflow */
327 	delta = (count << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
328 	delta >>= COUNTER_SHIFT;
329 	local64_add(delta, &event->count);
330 }
331 
332 static void perf_iommu_stop(struct perf_event *event, int flags)
333 {
334 	struct hw_perf_event *hwc = &event->hw;
335 
336 	if (hwc->state & PERF_HES_UPTODATE)
337 		return;
338 
339 	perf_iommu_disable_event(event);
340 	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
341 	hwc->state |= PERF_HES_STOPPED;
342 
343 	if (hwc->state & PERF_HES_UPTODATE)
344 		return;
345 
346 	perf_iommu_read(event);
347 	hwc->state |= PERF_HES_UPTODATE;
348 }
349 
350 static int perf_iommu_add(struct perf_event *event, int flags)
351 {
352 	int retval;
353 
354 	event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
355 
356 	/* request an iommu bank/counter */
357 	retval = get_next_avail_iommu_bnk_cntr(event);
358 	if (retval)
359 		return retval;
360 
361 	if (flags & PERF_EF_START)
362 		perf_iommu_start(event, PERF_EF_RELOAD);
363 
364 	return 0;
365 }
366 
367 static void perf_iommu_del(struct perf_event *event, int flags)
368 {
369 	struct hw_perf_event *hwc = &event->hw;
370 	struct perf_amd_iommu *perf_iommu =
371 			container_of(event->pmu, struct perf_amd_iommu, pmu);
372 
373 	perf_iommu_stop(event, PERF_EF_UPDATE);
374 
375 	/* clear the assigned iommu bank/counter */
376 	clear_avail_iommu_bnk_cntr(perf_iommu,
377 				   hwc->iommu_bank, hwc->iommu_cntr);
378 
379 	perf_event_update_userpage(event);
380 }
381 
382 static __init int _init_events_attrs(void)
383 {
384 	int i = 0, j;
385 	struct attribute **attrs;
386 
387 	while (amd_iommu_v2_event_descs[i].attr.attr.name)
388 		i++;
389 
390 	attrs = kzalloc(sizeof(struct attribute **) * (i + 1), GFP_KERNEL);
391 	if (!attrs)
392 		return -ENOMEM;
393 
394 	for (j = 0; j < i; j++)
395 		attrs[j] = &amd_iommu_v2_event_descs[j].attr.attr;
396 
397 	amd_iommu_events_group.attrs = attrs;
398 	return 0;
399 }
400 
401 const struct attribute_group *amd_iommu_attr_groups[] = {
402 	&amd_iommu_format_group,
403 	&amd_iommu_cpumask_group,
404 	&amd_iommu_events_group,
405 	NULL,
406 };
407 
408 static struct pmu iommu_pmu = {
409 	.event_init	= perf_iommu_event_init,
410 	.add		= perf_iommu_add,
411 	.del		= perf_iommu_del,
412 	.start		= perf_iommu_start,
413 	.stop		= perf_iommu_stop,
414 	.read		= perf_iommu_read,
415 	.task_ctx_nr	= perf_invalid_context,
416 	.attr_groups	= amd_iommu_attr_groups,
417 };
418 
419 static __init int init_one_iommu(unsigned int idx)
420 {
421 	struct perf_amd_iommu *perf_iommu;
422 	int ret;
423 
424 	perf_iommu = kzalloc(sizeof(struct perf_amd_iommu), GFP_KERNEL);
425 	if (!perf_iommu)
426 		return -ENOMEM;
427 
428 	raw_spin_lock_init(&perf_iommu->lock);
429 
430 	perf_iommu->pmu          = iommu_pmu;
431 	perf_iommu->iommu        = get_amd_iommu(idx);
432 	perf_iommu->max_banks    = amd_iommu_pc_get_max_banks(idx);
433 	perf_iommu->max_counters = amd_iommu_pc_get_max_counters(idx);
434 
435 	if (!perf_iommu->iommu ||
436 	    !perf_iommu->max_banks ||
437 	    !perf_iommu->max_counters) {
438 		kfree(perf_iommu);
439 		return -EINVAL;
440 	}
441 
442 	snprintf(perf_iommu->name, IOMMU_NAME_SIZE, "amd_iommu_%u", idx);
443 
444 	ret = perf_pmu_register(&perf_iommu->pmu, perf_iommu->name, -1);
445 	if (!ret) {
446 		pr_info("Detected AMD IOMMU #%d (%d banks, %d counters/bank).\n",
447 			idx, perf_iommu->max_banks, perf_iommu->max_counters);
448 		list_add_tail(&perf_iommu->list, &perf_amd_iommu_list);
449 	} else {
450 		pr_warn("Error initializing IOMMU %d.\n", idx);
451 		kfree(perf_iommu);
452 	}
453 	return ret;
454 }
455 
456 static __init int amd_iommu_pc_init(void)
457 {
458 	unsigned int i, cnt = 0;
459 	int ret;
460 
461 	/* Make sure the IOMMU PC resource is available */
462 	if (!amd_iommu_pc_supported())
463 		return -ENODEV;
464 
465 	ret = _init_events_attrs();
466 	if (ret)
467 		return ret;
468 
469 	/*
470 	 * An IOMMU PMU is specific to an IOMMU, and can function independently.
471 	 * So we go through all IOMMUs and ignore the one that fails init
472 	 * unless all IOMMU are failing.
473 	 */
474 	for (i = 0; i < amd_iommu_get_num_iommus(); i++) {
475 		ret = init_one_iommu(i);
476 		if (!ret)
477 			cnt++;
478 	}
479 
480 	if (!cnt) {
481 		kfree(amd_iommu_events_group.attrs);
482 		return -ENODEV;
483 	}
484 
485 	/* Init cpumask attributes to only core 0 */
486 	cpumask_set_cpu(0, &iommu_cpumask);
487 	return 0;
488 }
489 
490 device_initcall(amd_iommu_pc_init);
491