xref: /linux/drivers/s390/crypto/vfio_ap_ops.c (revision d2912cb15bdda8ba4a5dd73396ad62641af2f520)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Adjunct processor matrix VFIO device driver callbacks.
4  *
5  * Copyright IBM Corp. 2018
6  *
7  * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
8  *	      Halil Pasic <pasic@linux.ibm.com>
9  *	      Pierre Morel <pmorel@linux.ibm.com>
10  */
11 #include <linux/string.h>
12 #include <linux/vfio.h>
13 #include <linux/device.h>
14 #include <linux/list.h>
15 #include <linux/ctype.h>
16 #include <linux/bitops.h>
17 #include <linux/kvm_host.h>
18 #include <linux/module.h>
19 #include <asm/kvm.h>
20 #include <asm/zcrypt.h>
21 
22 #include "vfio_ap_private.h"
23 
24 #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
25 #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
26 
27 static void vfio_ap_matrix_init(struct ap_config_info *info,
28 				struct ap_matrix *matrix)
29 {
30 	matrix->apm_max = info->apxa ? info->Na : 63;
31 	matrix->aqm_max = info->apxa ? info->Nd : 15;
32 	matrix->adm_max = info->apxa ? info->Nd : 15;
33 }
34 
35 static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
36 {
37 	struct ap_matrix_mdev *matrix_mdev;
38 
39 	if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
40 		return -EPERM;
41 
42 	matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
43 	if (!matrix_mdev) {
44 		atomic_inc(&matrix_dev->available_instances);
45 		return -ENOMEM;
46 	}
47 
48 	vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
49 	mdev_set_drvdata(mdev, matrix_mdev);
50 	mutex_lock(&matrix_dev->lock);
51 	list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
52 	mutex_unlock(&matrix_dev->lock);
53 
54 	return 0;
55 }
56 
57 static int vfio_ap_mdev_remove(struct mdev_device *mdev)
58 {
59 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
60 
61 	if (matrix_mdev->kvm)
62 		return -EBUSY;
63 
64 	mutex_lock(&matrix_dev->lock);
65 	list_del(&matrix_mdev->node);
66 	mutex_unlock(&matrix_dev->lock);
67 
68 	kfree(matrix_mdev);
69 	mdev_set_drvdata(mdev, NULL);
70 	atomic_inc(&matrix_dev->available_instances);
71 
72 	return 0;
73 }
74 
75 static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
76 {
77 	return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT);
78 }
79 
80 static MDEV_TYPE_ATTR_RO(name);
81 
82 static ssize_t available_instances_show(struct kobject *kobj,
83 					struct device *dev, char *buf)
84 {
85 	return sprintf(buf, "%d\n",
86 		       atomic_read(&matrix_dev->available_instances));
87 }
88 
89 static MDEV_TYPE_ATTR_RO(available_instances);
90 
91 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
92 			       char *buf)
93 {
94 	return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING);
95 }
96 
97 static MDEV_TYPE_ATTR_RO(device_api);
98 
99 static struct attribute *vfio_ap_mdev_type_attrs[] = {
100 	&mdev_type_attr_name.attr,
101 	&mdev_type_attr_device_api.attr,
102 	&mdev_type_attr_available_instances.attr,
103 	NULL,
104 };
105 
106 static struct attribute_group vfio_ap_mdev_hwvirt_type_group = {
107 	.name = VFIO_AP_MDEV_TYPE_HWVIRT,
108 	.attrs = vfio_ap_mdev_type_attrs,
109 };
110 
111 static struct attribute_group *vfio_ap_mdev_type_groups[] = {
112 	&vfio_ap_mdev_hwvirt_type_group,
113 	NULL,
114 };
115 
116 struct vfio_ap_queue_reserved {
117 	unsigned long *apid;
118 	unsigned long *apqi;
119 	bool reserved;
120 };
121 
122 /**
123  * vfio_ap_has_queue
124  *
125  * @dev: an AP queue device
126  * @data: a struct vfio_ap_queue_reserved reference
127  *
128  * Flags whether the AP queue device (@dev) has a queue ID containing the APQN,
129  * apid or apqi specified in @data:
130  *
131  * - If @data contains both an apid and apqi value, then @data will be flagged
132  *   as reserved if the APID and APQI fields for the AP queue device matches
133  *
134  * - If @data contains only an apid value, @data will be flagged as
135  *   reserved if the APID field in the AP queue device matches
136  *
137  * - If @data contains only an apqi value, @data will be flagged as
138  *   reserved if the APQI field in the AP queue device matches
139  *
140  * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if
141  * @data does not contain either an apid or apqi.
142  */
143 static int vfio_ap_has_queue(struct device *dev, void *data)
144 {
145 	struct vfio_ap_queue_reserved *qres = data;
146 	struct ap_queue *ap_queue = to_ap_queue(dev);
147 	ap_qid_t qid;
148 	unsigned long id;
149 
150 	if (qres->apid && qres->apqi) {
151 		qid = AP_MKQID(*qres->apid, *qres->apqi);
152 		if (qid == ap_queue->qid)
153 			qres->reserved = true;
154 	} else if (qres->apid && !qres->apqi) {
155 		id = AP_QID_CARD(ap_queue->qid);
156 		if (id == *qres->apid)
157 			qres->reserved = true;
158 	} else if (!qres->apid && qres->apqi) {
159 		id = AP_QID_QUEUE(ap_queue->qid);
160 		if (id == *qres->apqi)
161 			qres->reserved = true;
162 	} else {
163 		return -EINVAL;
164 	}
165 
166 	return 0;
167 }
168 
169 /**
170  * vfio_ap_verify_queue_reserved
171  *
172  * @matrix_dev: a mediated matrix device
173  * @apid: an AP adapter ID
174  * @apqi: an AP queue index
175  *
176  * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device
177  * driver according to the following rules:
178  *
179  * - If both @apid and @apqi are not NULL, then there must be an AP queue
180  *   device bound to the vfio_ap driver with the APQN identified by @apid and
181  *   @apqi
182  *
183  * - If only @apid is not NULL, then there must be an AP queue device bound
184  *   to the vfio_ap driver with an APQN containing @apid
185  *
186  * - If only @apqi is not NULL, then there must be an AP queue device bound
187  *   to the vfio_ap driver with an APQN containing @apqi
188  *
189  * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
190  */
191 static int vfio_ap_verify_queue_reserved(unsigned long *apid,
192 					 unsigned long *apqi)
193 {
194 	int ret;
195 	struct vfio_ap_queue_reserved qres;
196 
197 	qres.apid = apid;
198 	qres.apqi = apqi;
199 	qres.reserved = false;
200 
201 	ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL,
202 				     &qres, vfio_ap_has_queue);
203 	if (ret)
204 		return ret;
205 
206 	if (qres.reserved)
207 		return 0;
208 
209 	return -EADDRNOTAVAIL;
210 }
211 
212 static int
213 vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
214 					     unsigned long apid)
215 {
216 	int ret;
217 	unsigned long apqi;
218 	unsigned long nbits = matrix_mdev->matrix.aqm_max + 1;
219 
220 	if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits)
221 		return vfio_ap_verify_queue_reserved(&apid, NULL);
222 
223 	for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) {
224 		ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
225 		if (ret)
226 			return ret;
227 	}
228 
229 	return 0;
230 }
231 
232 /**
233  * vfio_ap_mdev_verify_no_sharing
234  *
235  * Verifies that the APQNs derived from the cross product of the AP adapter IDs
236  * and AP queue indexes comprising the AP matrix are not configured for another
237  * mediated device. AP queue sharing is not allowed.
238  *
239  * @matrix_mdev: the mediated matrix device
240  *
241  * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE.
242  */
243 static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
244 {
245 	struct ap_matrix_mdev *lstdev;
246 	DECLARE_BITMAP(apm, AP_DEVICES);
247 	DECLARE_BITMAP(aqm, AP_DOMAINS);
248 
249 	list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) {
250 		if (matrix_mdev == lstdev)
251 			continue;
252 
253 		memset(apm, 0, sizeof(apm));
254 		memset(aqm, 0, sizeof(aqm));
255 
256 		/*
257 		 * We work on full longs, as we can only exclude the leftover
258 		 * bits in non-inverse order. The leftover is all zeros.
259 		 */
260 		if (!bitmap_and(apm, matrix_mdev->matrix.apm,
261 				lstdev->matrix.apm, AP_DEVICES))
262 			continue;
263 
264 		if (!bitmap_and(aqm, matrix_mdev->matrix.aqm,
265 				lstdev->matrix.aqm, AP_DOMAINS))
266 			continue;
267 
268 		return -EADDRINUSE;
269 	}
270 
271 	return 0;
272 }
273 
274 /**
275  * assign_adapter_store
276  *
277  * @dev:	the matrix device
278  * @attr:	the mediated matrix device's assign_adapter attribute
279  * @buf:	a buffer containing the AP adapter number (APID) to
280  *		be assigned
281  * @count:	the number of bytes in @buf
282  *
283  * Parses the APID from @buf and sets the corresponding bit in the mediated
284  * matrix device's APM.
285  *
286  * Returns the number of bytes processed if the APID is valid; otherwise,
287  * returns one of the following errors:
288  *
289  *	1. -EINVAL
290  *	   The APID is not a valid number
291  *
292  *	2. -ENODEV
293  *	   The APID exceeds the maximum value configured for the system
294  *
295  *	3. -EADDRNOTAVAIL
296  *	   An APQN derived from the cross product of the APID being assigned
297  *	   and the APQIs previously assigned is not bound to the vfio_ap device
298  *	   driver; or, if no APQIs have yet been assigned, the APID is not
299  *	   contained in an APQN bound to the vfio_ap device driver.
300  *
301  *	4. -EADDRINUSE
302  *	   An APQN derived from the cross product of the APID being assigned
303  *	   and the APQIs previously assigned is being used by another mediated
304  *	   matrix device
305  */
306 static ssize_t assign_adapter_store(struct device *dev,
307 				    struct device_attribute *attr,
308 				    const char *buf, size_t count)
309 {
310 	int ret;
311 	unsigned long apid;
312 	struct mdev_device *mdev = mdev_from_dev(dev);
313 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
314 
315 	/* If the guest is running, disallow assignment of adapter */
316 	if (matrix_mdev->kvm)
317 		return -EBUSY;
318 
319 	ret = kstrtoul(buf, 0, &apid);
320 	if (ret)
321 		return ret;
322 
323 	if (apid > matrix_mdev->matrix.apm_max)
324 		return -ENODEV;
325 
326 	/*
327 	 * Set the bit in the AP mask (APM) corresponding to the AP adapter
328 	 * number (APID). The bits in the mask, from most significant to least
329 	 * significant bit, correspond to APIDs 0-255.
330 	 */
331 	mutex_lock(&matrix_dev->lock);
332 
333 	ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
334 	if (ret)
335 		goto done;
336 
337 	set_bit_inv(apid, matrix_mdev->matrix.apm);
338 
339 	ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
340 	if (ret)
341 		goto share_err;
342 
343 	ret = count;
344 	goto done;
345 
346 share_err:
347 	clear_bit_inv(apid, matrix_mdev->matrix.apm);
348 done:
349 	mutex_unlock(&matrix_dev->lock);
350 
351 	return ret;
352 }
353 static DEVICE_ATTR_WO(assign_adapter);
354 
355 /**
356  * unassign_adapter_store
357  *
358  * @dev:	the matrix device
359  * @attr:	the mediated matrix device's unassign_adapter attribute
360  * @buf:	a buffer containing the adapter number (APID) to be unassigned
361  * @count:	the number of bytes in @buf
362  *
363  * Parses the APID from @buf and clears the corresponding bit in the mediated
364  * matrix device's APM.
365  *
366  * Returns the number of bytes processed if the APID is valid; otherwise,
367  * returns one of the following errors:
368  *	-EINVAL if the APID is not a number
369  *	-ENODEV if the APID it exceeds the maximum value configured for the
370  *		system
371  */
372 static ssize_t unassign_adapter_store(struct device *dev,
373 				      struct device_attribute *attr,
374 				      const char *buf, size_t count)
375 {
376 	int ret;
377 	unsigned long apid;
378 	struct mdev_device *mdev = mdev_from_dev(dev);
379 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
380 
381 	/* If the guest is running, disallow un-assignment of adapter */
382 	if (matrix_mdev->kvm)
383 		return -EBUSY;
384 
385 	ret = kstrtoul(buf, 0, &apid);
386 	if (ret)
387 		return ret;
388 
389 	if (apid > matrix_mdev->matrix.apm_max)
390 		return -ENODEV;
391 
392 	mutex_lock(&matrix_dev->lock);
393 	clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
394 	mutex_unlock(&matrix_dev->lock);
395 
396 	return count;
397 }
398 static DEVICE_ATTR_WO(unassign_adapter);
399 
400 static int
401 vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
402 					     unsigned long apqi)
403 {
404 	int ret;
405 	unsigned long apid;
406 	unsigned long nbits = matrix_mdev->matrix.apm_max + 1;
407 
408 	if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits)
409 		return vfio_ap_verify_queue_reserved(NULL, &apqi);
410 
411 	for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) {
412 		ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
413 		if (ret)
414 			return ret;
415 	}
416 
417 	return 0;
418 }
419 
420 /**
421  * assign_domain_store
422  *
423  * @dev:	the matrix device
424  * @attr:	the mediated matrix device's assign_domain attribute
425  * @buf:	a buffer containing the AP queue index (APQI) of the domain to
426  *		be assigned
427  * @count:	the number of bytes in @buf
428  *
429  * Parses the APQI from @buf and sets the corresponding bit in the mediated
430  * matrix device's AQM.
431  *
432  * Returns the number of bytes processed if the APQI is valid; otherwise returns
433  * one of the following errors:
434  *
435  *	1. -EINVAL
436  *	   The APQI is not a valid number
437  *
438  *	2. -ENODEV
439  *	   The APQI exceeds the maximum value configured for the system
440  *
441  *	3. -EADDRNOTAVAIL
442  *	   An APQN derived from the cross product of the APQI being assigned
443  *	   and the APIDs previously assigned is not bound to the vfio_ap device
444  *	   driver; or, if no APIDs have yet been assigned, the APQI is not
445  *	   contained in an APQN bound to the vfio_ap device driver.
446  *
447  *	4. -EADDRINUSE
448  *	   An APQN derived from the cross product of the APQI being assigned
449  *	   and the APIDs previously assigned is being used by another mediated
450  *	   matrix device
451  */
452 static ssize_t assign_domain_store(struct device *dev,
453 				   struct device_attribute *attr,
454 				   const char *buf, size_t count)
455 {
456 	int ret;
457 	unsigned long apqi;
458 	struct mdev_device *mdev = mdev_from_dev(dev);
459 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
460 	unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
461 
462 	/* If the guest is running, disallow assignment of domain */
463 	if (matrix_mdev->kvm)
464 		return -EBUSY;
465 
466 	ret = kstrtoul(buf, 0, &apqi);
467 	if (ret)
468 		return ret;
469 	if (apqi > max_apqi)
470 		return -ENODEV;
471 
472 	mutex_lock(&matrix_dev->lock);
473 
474 	ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
475 	if (ret)
476 		goto done;
477 
478 	set_bit_inv(apqi, matrix_mdev->matrix.aqm);
479 
480 	ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
481 	if (ret)
482 		goto share_err;
483 
484 	ret = count;
485 	goto done;
486 
487 share_err:
488 	clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
489 done:
490 	mutex_unlock(&matrix_dev->lock);
491 
492 	return ret;
493 }
494 static DEVICE_ATTR_WO(assign_domain);
495 
496 
497 /**
498  * unassign_domain_store
499  *
500  * @dev:	the matrix device
501  * @attr:	the mediated matrix device's unassign_domain attribute
502  * @buf:	a buffer containing the AP queue index (APQI) of the domain to
503  *		be unassigned
504  * @count:	the number of bytes in @buf
505  *
506  * Parses the APQI from @buf and clears the corresponding bit in the
507  * mediated matrix device's AQM.
508  *
509  * Returns the number of bytes processed if the APQI is valid; otherwise,
510  * returns one of the following errors:
511  *	-EINVAL if the APQI is not a number
512  *	-ENODEV if the APQI exceeds the maximum value configured for the system
513  */
514 static ssize_t unassign_domain_store(struct device *dev,
515 				     struct device_attribute *attr,
516 				     const char *buf, size_t count)
517 {
518 	int ret;
519 	unsigned long apqi;
520 	struct mdev_device *mdev = mdev_from_dev(dev);
521 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
522 
523 	/* If the guest is running, disallow un-assignment of domain */
524 	if (matrix_mdev->kvm)
525 		return -EBUSY;
526 
527 	ret = kstrtoul(buf, 0, &apqi);
528 	if (ret)
529 		return ret;
530 
531 	if (apqi > matrix_mdev->matrix.aqm_max)
532 		return -ENODEV;
533 
534 	mutex_lock(&matrix_dev->lock);
535 	clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
536 	mutex_unlock(&matrix_dev->lock);
537 
538 	return count;
539 }
540 static DEVICE_ATTR_WO(unassign_domain);
541 
542 /**
543  * assign_control_domain_store
544  *
545  * @dev:	the matrix device
546  * @attr:	the mediated matrix device's assign_control_domain attribute
547  * @buf:	a buffer containing the domain ID to be assigned
548  * @count:	the number of bytes in @buf
549  *
550  * Parses the domain ID from @buf and sets the corresponding bit in the mediated
551  * matrix device's ADM.
552  *
553  * Returns the number of bytes processed if the domain ID is valid; otherwise,
554  * returns one of the following errors:
555  *	-EINVAL if the ID is not a number
556  *	-ENODEV if the ID exceeds the maximum value configured for the system
557  */
558 static ssize_t assign_control_domain_store(struct device *dev,
559 					   struct device_attribute *attr,
560 					   const char *buf, size_t count)
561 {
562 	int ret;
563 	unsigned long id;
564 	struct mdev_device *mdev = mdev_from_dev(dev);
565 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
566 
567 	/* If the guest is running, disallow assignment of control domain */
568 	if (matrix_mdev->kvm)
569 		return -EBUSY;
570 
571 	ret = kstrtoul(buf, 0, &id);
572 	if (ret)
573 		return ret;
574 
575 	if (id > matrix_mdev->matrix.adm_max)
576 		return -ENODEV;
577 
578 	/* Set the bit in the ADM (bitmask) corresponding to the AP control
579 	 * domain number (id). The bits in the mask, from most significant to
580 	 * least significant, correspond to IDs 0 up to the one less than the
581 	 * number of control domains that can be assigned.
582 	 */
583 	mutex_lock(&matrix_dev->lock);
584 	set_bit_inv(id, matrix_mdev->matrix.adm);
585 	mutex_unlock(&matrix_dev->lock);
586 
587 	return count;
588 }
589 static DEVICE_ATTR_WO(assign_control_domain);
590 
591 /**
592  * unassign_control_domain_store
593  *
594  * @dev:	the matrix device
595  * @attr:	the mediated matrix device's unassign_control_domain attribute
596  * @buf:	a buffer containing the domain ID to be unassigned
597  * @count:	the number of bytes in @buf
598  *
599  * Parses the domain ID from @buf and clears the corresponding bit in the
600  * mediated matrix device's ADM.
601  *
602  * Returns the number of bytes processed if the domain ID is valid; otherwise,
603  * returns one of the following errors:
604  *	-EINVAL if the ID is not a number
605  *	-ENODEV if the ID exceeds the maximum value configured for the system
606  */
607 static ssize_t unassign_control_domain_store(struct device *dev,
608 					     struct device_attribute *attr,
609 					     const char *buf, size_t count)
610 {
611 	int ret;
612 	unsigned long domid;
613 	struct mdev_device *mdev = mdev_from_dev(dev);
614 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
615 	unsigned long max_domid =  matrix_mdev->matrix.adm_max;
616 
617 	/* If the guest is running, disallow un-assignment of control domain */
618 	if (matrix_mdev->kvm)
619 		return -EBUSY;
620 
621 	ret = kstrtoul(buf, 0, &domid);
622 	if (ret)
623 		return ret;
624 	if (domid > max_domid)
625 		return -ENODEV;
626 
627 	mutex_lock(&matrix_dev->lock);
628 	clear_bit_inv(domid, matrix_mdev->matrix.adm);
629 	mutex_unlock(&matrix_dev->lock);
630 
631 	return count;
632 }
633 static DEVICE_ATTR_WO(unassign_control_domain);
634 
635 static ssize_t control_domains_show(struct device *dev,
636 				    struct device_attribute *dev_attr,
637 				    char *buf)
638 {
639 	unsigned long id;
640 	int nchars = 0;
641 	int n;
642 	char *bufpos = buf;
643 	struct mdev_device *mdev = mdev_from_dev(dev);
644 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
645 	unsigned long max_domid = matrix_mdev->matrix.adm_max;
646 
647 	mutex_lock(&matrix_dev->lock);
648 	for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
649 		n = sprintf(bufpos, "%04lx\n", id);
650 		bufpos += n;
651 		nchars += n;
652 	}
653 	mutex_unlock(&matrix_dev->lock);
654 
655 	return nchars;
656 }
657 static DEVICE_ATTR_RO(control_domains);
658 
659 static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
660 			   char *buf)
661 {
662 	struct mdev_device *mdev = mdev_from_dev(dev);
663 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
664 	char *bufpos = buf;
665 	unsigned long apid;
666 	unsigned long apqi;
667 	unsigned long apid1;
668 	unsigned long apqi1;
669 	unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1;
670 	unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1;
671 	int nchars = 0;
672 	int n;
673 
674 	apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits);
675 	apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits);
676 
677 	mutex_lock(&matrix_dev->lock);
678 
679 	if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
680 		for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
681 			for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
682 					     naqm_bits) {
683 				n = sprintf(bufpos, "%02lx.%04lx\n", apid,
684 					    apqi);
685 				bufpos += n;
686 				nchars += n;
687 			}
688 		}
689 	} else if (apid1 < napm_bits) {
690 		for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
691 			n = sprintf(bufpos, "%02lx.\n", apid);
692 			bufpos += n;
693 			nchars += n;
694 		}
695 	} else if (apqi1 < naqm_bits) {
696 		for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) {
697 			n = sprintf(bufpos, ".%04lx\n", apqi);
698 			bufpos += n;
699 			nchars += n;
700 		}
701 	}
702 
703 	mutex_unlock(&matrix_dev->lock);
704 
705 	return nchars;
706 }
707 static DEVICE_ATTR_RO(matrix);
708 
709 static struct attribute *vfio_ap_mdev_attrs[] = {
710 	&dev_attr_assign_adapter.attr,
711 	&dev_attr_unassign_adapter.attr,
712 	&dev_attr_assign_domain.attr,
713 	&dev_attr_unassign_domain.attr,
714 	&dev_attr_assign_control_domain.attr,
715 	&dev_attr_unassign_control_domain.attr,
716 	&dev_attr_control_domains.attr,
717 	&dev_attr_matrix.attr,
718 	NULL,
719 };
720 
721 static struct attribute_group vfio_ap_mdev_attr_group = {
722 	.attrs = vfio_ap_mdev_attrs
723 };
724 
725 static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
726 	&vfio_ap_mdev_attr_group,
727 	NULL
728 };
729 
730 /**
731  * vfio_ap_mdev_set_kvm
732  *
733  * @matrix_mdev: a mediated matrix device
734  * @kvm: reference to KVM instance
735  *
736  * Verifies no other mediated matrix device has @kvm and sets a reference to
737  * it in @matrix_mdev->kvm.
738  *
739  * Return 0 if no other mediated matrix device has a reference to @kvm;
740  * otherwise, returns an -EPERM.
741  */
742 static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
743 				struct kvm *kvm)
744 {
745 	struct ap_matrix_mdev *m;
746 
747 	mutex_lock(&matrix_dev->lock);
748 
749 	list_for_each_entry(m, &matrix_dev->mdev_list, node) {
750 		if ((m != matrix_mdev) && (m->kvm == kvm)) {
751 			mutex_unlock(&matrix_dev->lock);
752 			return -EPERM;
753 		}
754 	}
755 
756 	matrix_mdev->kvm = kvm;
757 	mutex_unlock(&matrix_dev->lock);
758 
759 	return 0;
760 }
761 
762 static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
763 				       unsigned long action, void *data)
764 {
765 	int ret;
766 	struct ap_matrix_mdev *matrix_mdev;
767 
768 	if (action != VFIO_GROUP_NOTIFY_SET_KVM)
769 		return NOTIFY_OK;
770 
771 	matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
772 
773 	if (!data) {
774 		matrix_mdev->kvm = NULL;
775 		return NOTIFY_OK;
776 	}
777 
778 	ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
779 	if (ret)
780 		return NOTIFY_DONE;
781 
782 	/* If there is no CRYCB pointer, then we can't copy the masks */
783 	if (!matrix_mdev->kvm->arch.crypto.crycbd)
784 		return NOTIFY_DONE;
785 
786 	kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
787 				  matrix_mdev->matrix.aqm,
788 				  matrix_mdev->matrix.adm);
789 
790 	return NOTIFY_OK;
791 }
792 
793 static int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
794 				    unsigned int retry)
795 {
796 	struct ap_queue_status status;
797 
798 	do {
799 		status = ap_zapq(AP_MKQID(apid, apqi));
800 		switch (status.response_code) {
801 		case AP_RESPONSE_NORMAL:
802 			return 0;
803 		case AP_RESPONSE_RESET_IN_PROGRESS:
804 		case AP_RESPONSE_BUSY:
805 			msleep(20);
806 			break;
807 		default:
808 			/* things are really broken, give up */
809 			return -EIO;
810 		}
811 	} while (retry--);
812 
813 	return -EBUSY;
814 }
815 
816 static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
817 {
818 	int ret;
819 	int rc = 0;
820 	unsigned long apid, apqi;
821 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
822 
823 	for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
824 			     matrix_mdev->matrix.apm_max + 1) {
825 		for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
826 				     matrix_mdev->matrix.aqm_max + 1) {
827 			ret = vfio_ap_mdev_reset_queue(apid, apqi, 1);
828 			/*
829 			 * Regardless whether a queue turns out to be busy, or
830 			 * is not operational, we need to continue resetting
831 			 * the remaining queues.
832 			 */
833 			if (ret)
834 				rc = ret;
835 		}
836 	}
837 
838 	return rc;
839 }
840 
841 static int vfio_ap_mdev_open(struct mdev_device *mdev)
842 {
843 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
844 	unsigned long events;
845 	int ret;
846 
847 
848 	if (!try_module_get(THIS_MODULE))
849 		return -ENODEV;
850 
851 	matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier;
852 	events = VFIO_GROUP_NOTIFY_SET_KVM;
853 
854 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
855 				     &events, &matrix_mdev->group_notifier);
856 	if (ret) {
857 		module_put(THIS_MODULE);
858 		return ret;
859 	}
860 
861 	return 0;
862 }
863 
864 static void vfio_ap_mdev_release(struct mdev_device *mdev)
865 {
866 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
867 
868 	if (matrix_mdev->kvm)
869 		kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
870 
871 	vfio_ap_mdev_reset_queues(mdev);
872 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
873 				 &matrix_mdev->group_notifier);
874 	matrix_mdev->kvm = NULL;
875 	module_put(THIS_MODULE);
876 }
877 
878 static int vfio_ap_mdev_get_device_info(unsigned long arg)
879 {
880 	unsigned long minsz;
881 	struct vfio_device_info info;
882 
883 	minsz = offsetofend(struct vfio_device_info, num_irqs);
884 
885 	if (copy_from_user(&info, (void __user *)arg, minsz))
886 		return -EFAULT;
887 
888 	if (info.argsz < minsz)
889 		return -EINVAL;
890 
891 	info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
892 	info.num_regions = 0;
893 	info.num_irqs = 0;
894 
895 	return copy_to_user((void __user *)arg, &info, minsz);
896 }
897 
898 static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
899 				    unsigned int cmd, unsigned long arg)
900 {
901 	int ret;
902 
903 	switch (cmd) {
904 	case VFIO_DEVICE_GET_INFO:
905 		ret = vfio_ap_mdev_get_device_info(arg);
906 		break;
907 	case VFIO_DEVICE_RESET:
908 		ret = vfio_ap_mdev_reset_queues(mdev);
909 		break;
910 	default:
911 		ret = -EOPNOTSUPP;
912 		break;
913 	}
914 
915 	return ret;
916 }
917 
918 static const struct mdev_parent_ops vfio_ap_matrix_ops = {
919 	.owner			= THIS_MODULE,
920 	.supported_type_groups	= vfio_ap_mdev_type_groups,
921 	.mdev_attr_groups	= vfio_ap_mdev_attr_groups,
922 	.create			= vfio_ap_mdev_create,
923 	.remove			= vfio_ap_mdev_remove,
924 	.open			= vfio_ap_mdev_open,
925 	.release		= vfio_ap_mdev_release,
926 	.ioctl			= vfio_ap_mdev_ioctl,
927 };
928 
929 int vfio_ap_mdev_register(void)
930 {
931 	atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
932 
933 	return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
934 }
935 
936 void vfio_ap_mdev_unregister(void)
937 {
938 	mdev_unregister_device(&matrix_dev->device);
939 }
940