xref: /linux/drivers/virt/acrn/hsm.c (revision 164666fa66669d437bdcc8d5f1744a2aee73be41)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ACRN Hypervisor Service Module (HSM)
4  *
5  * Copyright (C) 2020 Intel Corporation. All rights reserved.
6  *
7  * Authors:
8  *	Fengwei Yin <fengwei.yin@intel.com>
9  *	Yakui Zhao <yakui.zhao@intel.com>
10  */
11 
12 #include <linux/cpu.h>
13 #include <linux/io.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 
18 #include <asm/acrn.h>
19 #include <asm/hypervisor.h>
20 
21 #include "acrn_drv.h"
22 
23 /*
24  * When /dev/acrn_hsm is opened, a 'struct acrn_vm' object is created to
25  * represent a VM instance and continues to be associated with the opened file
26  * descriptor. All ioctl operations on this file descriptor will be targeted to
27  * the VM instance. Release of this file descriptor will destroy the object.
28  */
29 static int acrn_dev_open(struct inode *inode, struct file *filp)
30 {
31 	struct acrn_vm *vm;
32 
33 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
34 	if (!vm)
35 		return -ENOMEM;
36 
37 	vm->vmid = ACRN_INVALID_VMID;
38 	filp->private_data = vm;
39 	return 0;
40 }
41 
42 static int pmcmd_ioctl(u64 cmd, void __user *uptr)
43 {
44 	struct acrn_pstate_data *px_data;
45 	struct acrn_cstate_data *cx_data;
46 	u64 *pm_info;
47 	int ret = 0;
48 
49 	switch (cmd & PMCMD_TYPE_MASK) {
50 	case ACRN_PMCMD_GET_PX_CNT:
51 	case ACRN_PMCMD_GET_CX_CNT:
52 		pm_info = kmalloc(sizeof(u64), GFP_KERNEL);
53 		if (!pm_info)
54 			return -ENOMEM;
55 
56 		ret = hcall_get_cpu_state(cmd, virt_to_phys(pm_info));
57 		if (ret < 0) {
58 			kfree(pm_info);
59 			break;
60 		}
61 
62 		if (copy_to_user(uptr, pm_info, sizeof(u64)))
63 			ret = -EFAULT;
64 		kfree(pm_info);
65 		break;
66 	case ACRN_PMCMD_GET_PX_DATA:
67 		px_data = kmalloc(sizeof(*px_data), GFP_KERNEL);
68 		if (!px_data)
69 			return -ENOMEM;
70 
71 		ret = hcall_get_cpu_state(cmd, virt_to_phys(px_data));
72 		if (ret < 0) {
73 			kfree(px_data);
74 			break;
75 		}
76 
77 		if (copy_to_user(uptr, px_data, sizeof(*px_data)))
78 			ret = -EFAULT;
79 		kfree(px_data);
80 		break;
81 	case ACRN_PMCMD_GET_CX_DATA:
82 		cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL);
83 		if (!cx_data)
84 			return -ENOMEM;
85 
86 		ret = hcall_get_cpu_state(cmd, virt_to_phys(cx_data));
87 		if (ret < 0) {
88 			kfree(cx_data);
89 			break;
90 		}
91 
92 		if (copy_to_user(uptr, cx_data, sizeof(*cx_data)))
93 			ret = -EFAULT;
94 		kfree(cx_data);
95 		break;
96 	default:
97 		break;
98 	}
99 
100 	return ret;
101 }
102 
103 /*
104  * HSM relies on hypercall layer of the ACRN hypervisor to do the
105  * sanity check against the input parameters.
106  */
107 static long acrn_dev_ioctl(struct file *filp, unsigned int cmd,
108 			   unsigned long ioctl_param)
109 {
110 	struct acrn_vm *vm = filp->private_data;
111 	struct acrn_vm_creation *vm_param;
112 	struct acrn_vcpu_regs *cpu_regs;
113 	struct acrn_ioreq_notify notify;
114 	struct acrn_ptdev_irq *irq_info;
115 	struct acrn_ioeventfd ioeventfd;
116 	struct acrn_vm_memmap memmap;
117 	struct acrn_mmiodev *mmiodev;
118 	struct acrn_msi_entry *msi;
119 	struct acrn_pcidev *pcidev;
120 	struct acrn_irqfd irqfd;
121 	struct acrn_vdev *vdev;
122 	struct page *page;
123 	u64 cstate_cmd;
124 	int i, ret = 0;
125 
126 	if (vm->vmid == ACRN_INVALID_VMID && cmd != ACRN_IOCTL_CREATE_VM) {
127 		dev_dbg(acrn_dev.this_device,
128 			"ioctl 0x%x: Invalid VM state!\n", cmd);
129 		return -EINVAL;
130 	}
131 
132 	switch (cmd) {
133 	case ACRN_IOCTL_CREATE_VM:
134 		vm_param = memdup_user((void __user *)ioctl_param,
135 				       sizeof(struct acrn_vm_creation));
136 		if (IS_ERR(vm_param))
137 			return PTR_ERR(vm_param);
138 
139 		if ((vm_param->reserved0 | vm_param->reserved1) != 0)
140 			return -EINVAL;
141 
142 		vm = acrn_vm_create(vm, vm_param);
143 		if (!vm) {
144 			ret = -EINVAL;
145 			kfree(vm_param);
146 			break;
147 		}
148 
149 		if (copy_to_user((void __user *)ioctl_param, vm_param,
150 				 sizeof(struct acrn_vm_creation))) {
151 			acrn_vm_destroy(vm);
152 			ret = -EFAULT;
153 		}
154 
155 		kfree(vm_param);
156 		break;
157 	case ACRN_IOCTL_START_VM:
158 		ret = hcall_start_vm(vm->vmid);
159 		if (ret < 0)
160 			dev_dbg(acrn_dev.this_device,
161 				"Failed to start VM %u!\n", vm->vmid);
162 		break;
163 	case ACRN_IOCTL_PAUSE_VM:
164 		ret = hcall_pause_vm(vm->vmid);
165 		if (ret < 0)
166 			dev_dbg(acrn_dev.this_device,
167 				"Failed to pause VM %u!\n", vm->vmid);
168 		break;
169 	case ACRN_IOCTL_RESET_VM:
170 		ret = hcall_reset_vm(vm->vmid);
171 		if (ret < 0)
172 			dev_dbg(acrn_dev.this_device,
173 				"Failed to restart VM %u!\n", vm->vmid);
174 		break;
175 	case ACRN_IOCTL_DESTROY_VM:
176 		ret = acrn_vm_destroy(vm);
177 		break;
178 	case ACRN_IOCTL_SET_VCPU_REGS:
179 		cpu_regs = memdup_user((void __user *)ioctl_param,
180 				       sizeof(struct acrn_vcpu_regs));
181 		if (IS_ERR(cpu_regs))
182 			return PTR_ERR(cpu_regs);
183 
184 		for (i = 0; i < ARRAY_SIZE(cpu_regs->reserved); i++)
185 			if (cpu_regs->reserved[i])
186 				return -EINVAL;
187 
188 		for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_32); i++)
189 			if (cpu_regs->vcpu_regs.reserved_32[i])
190 				return -EINVAL;
191 
192 		for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_64); i++)
193 			if (cpu_regs->vcpu_regs.reserved_64[i])
194 				return -EINVAL;
195 
196 		for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.gdt.reserved); i++)
197 			if (cpu_regs->vcpu_regs.gdt.reserved[i] |
198 			    cpu_regs->vcpu_regs.idt.reserved[i])
199 				return -EINVAL;
200 
201 		ret = hcall_set_vcpu_regs(vm->vmid, virt_to_phys(cpu_regs));
202 		if (ret < 0)
203 			dev_dbg(acrn_dev.this_device,
204 				"Failed to set regs state of VM%u!\n",
205 				vm->vmid);
206 		kfree(cpu_regs);
207 		break;
208 	case ACRN_IOCTL_SET_MEMSEG:
209 		if (copy_from_user(&memmap, (void __user *)ioctl_param,
210 				   sizeof(memmap)))
211 			return -EFAULT;
212 
213 		ret = acrn_vm_memseg_map(vm, &memmap);
214 		break;
215 	case ACRN_IOCTL_UNSET_MEMSEG:
216 		if (copy_from_user(&memmap, (void __user *)ioctl_param,
217 				   sizeof(memmap)))
218 			return -EFAULT;
219 
220 		ret = acrn_vm_memseg_unmap(vm, &memmap);
221 		break;
222 	case ACRN_IOCTL_ASSIGN_MMIODEV:
223 		mmiodev = memdup_user((void __user *)ioctl_param,
224 				      sizeof(struct acrn_mmiodev));
225 		if (IS_ERR(mmiodev))
226 			return PTR_ERR(mmiodev);
227 
228 		ret = hcall_assign_mmiodev(vm->vmid, virt_to_phys(mmiodev));
229 		if (ret < 0)
230 			dev_dbg(acrn_dev.this_device,
231 				"Failed to assign MMIO device!\n");
232 		kfree(mmiodev);
233 		break;
234 	case ACRN_IOCTL_DEASSIGN_MMIODEV:
235 		mmiodev = memdup_user((void __user *)ioctl_param,
236 				      sizeof(struct acrn_mmiodev));
237 		if (IS_ERR(mmiodev))
238 			return PTR_ERR(mmiodev);
239 
240 		ret = hcall_deassign_mmiodev(vm->vmid, virt_to_phys(mmiodev));
241 		if (ret < 0)
242 			dev_dbg(acrn_dev.this_device,
243 				"Failed to deassign MMIO device!\n");
244 		kfree(mmiodev);
245 		break;
246 	case ACRN_IOCTL_ASSIGN_PCIDEV:
247 		pcidev = memdup_user((void __user *)ioctl_param,
248 				     sizeof(struct acrn_pcidev));
249 		if (IS_ERR(pcidev))
250 			return PTR_ERR(pcidev);
251 
252 		ret = hcall_assign_pcidev(vm->vmid, virt_to_phys(pcidev));
253 		if (ret < 0)
254 			dev_dbg(acrn_dev.this_device,
255 				"Failed to assign pci device!\n");
256 		kfree(pcidev);
257 		break;
258 	case ACRN_IOCTL_DEASSIGN_PCIDEV:
259 		pcidev = memdup_user((void __user *)ioctl_param,
260 				     sizeof(struct acrn_pcidev));
261 		if (IS_ERR(pcidev))
262 			return PTR_ERR(pcidev);
263 
264 		ret = hcall_deassign_pcidev(vm->vmid, virt_to_phys(pcidev));
265 		if (ret < 0)
266 			dev_dbg(acrn_dev.this_device,
267 				"Failed to deassign pci device!\n");
268 		kfree(pcidev);
269 		break;
270 	case ACRN_IOCTL_CREATE_VDEV:
271 		vdev = memdup_user((void __user *)ioctl_param,
272 				   sizeof(struct acrn_vdev));
273 		if (IS_ERR(vdev))
274 			return PTR_ERR(vdev);
275 
276 		ret = hcall_create_vdev(vm->vmid, virt_to_phys(vdev));
277 		if (ret < 0)
278 			dev_dbg(acrn_dev.this_device,
279 				"Failed to create virtual device!\n");
280 		kfree(vdev);
281 		break;
282 	case ACRN_IOCTL_DESTROY_VDEV:
283 		vdev = memdup_user((void __user *)ioctl_param,
284 				   sizeof(struct acrn_vdev));
285 		if (IS_ERR(vdev))
286 			return PTR_ERR(vdev);
287 		ret = hcall_destroy_vdev(vm->vmid, virt_to_phys(vdev));
288 		if (ret < 0)
289 			dev_dbg(acrn_dev.this_device,
290 				"Failed to destroy virtual device!\n");
291 		kfree(vdev);
292 		break;
293 	case ACRN_IOCTL_SET_PTDEV_INTR:
294 		irq_info = memdup_user((void __user *)ioctl_param,
295 				       sizeof(struct acrn_ptdev_irq));
296 		if (IS_ERR(irq_info))
297 			return PTR_ERR(irq_info);
298 
299 		ret = hcall_set_ptdev_intr(vm->vmid, virt_to_phys(irq_info));
300 		if (ret < 0)
301 			dev_dbg(acrn_dev.this_device,
302 				"Failed to configure intr for ptdev!\n");
303 		kfree(irq_info);
304 		break;
305 	case ACRN_IOCTL_RESET_PTDEV_INTR:
306 		irq_info = memdup_user((void __user *)ioctl_param,
307 				       sizeof(struct acrn_ptdev_irq));
308 		if (IS_ERR(irq_info))
309 			return PTR_ERR(irq_info);
310 
311 		ret = hcall_reset_ptdev_intr(vm->vmid, virt_to_phys(irq_info));
312 		if (ret < 0)
313 			dev_dbg(acrn_dev.this_device,
314 				"Failed to reset intr for ptdev!\n");
315 		kfree(irq_info);
316 		break;
317 	case ACRN_IOCTL_SET_IRQLINE:
318 		ret = hcall_set_irqline(vm->vmid, ioctl_param);
319 		if (ret < 0)
320 			dev_dbg(acrn_dev.this_device,
321 				"Failed to set interrupt line!\n");
322 		break;
323 	case ACRN_IOCTL_INJECT_MSI:
324 		msi = memdup_user((void __user *)ioctl_param,
325 				  sizeof(struct acrn_msi_entry));
326 		if (IS_ERR(msi))
327 			return PTR_ERR(msi);
328 
329 		ret = hcall_inject_msi(vm->vmid, virt_to_phys(msi));
330 		if (ret < 0)
331 			dev_dbg(acrn_dev.this_device,
332 				"Failed to inject MSI!\n");
333 		kfree(msi);
334 		break;
335 	case ACRN_IOCTL_VM_INTR_MONITOR:
336 		ret = pin_user_pages_fast(ioctl_param, 1,
337 					  FOLL_WRITE | FOLL_LONGTERM, &page);
338 		if (unlikely(ret != 1)) {
339 			dev_dbg(acrn_dev.this_device,
340 				"Failed to pin intr hdr buffer!\n");
341 			return -EFAULT;
342 		}
343 
344 		ret = hcall_vm_intr_monitor(vm->vmid, page_to_phys(page));
345 		if (ret < 0) {
346 			unpin_user_page(page);
347 			dev_dbg(acrn_dev.this_device,
348 				"Failed to monitor intr data!\n");
349 			return ret;
350 		}
351 		if (vm->monitor_page)
352 			unpin_user_page(vm->monitor_page);
353 		vm->monitor_page = page;
354 		break;
355 	case ACRN_IOCTL_CREATE_IOREQ_CLIENT:
356 		if (vm->default_client)
357 			return -EEXIST;
358 		if (!acrn_ioreq_client_create(vm, NULL, NULL, true, "acrndm"))
359 			ret = -EINVAL;
360 		break;
361 	case ACRN_IOCTL_DESTROY_IOREQ_CLIENT:
362 		if (vm->default_client)
363 			acrn_ioreq_client_destroy(vm->default_client);
364 		break;
365 	case ACRN_IOCTL_ATTACH_IOREQ_CLIENT:
366 		if (vm->default_client)
367 			ret = acrn_ioreq_client_wait(vm->default_client);
368 		else
369 			ret = -ENODEV;
370 		break;
371 	case ACRN_IOCTL_NOTIFY_REQUEST_FINISH:
372 		if (copy_from_user(&notify, (void __user *)ioctl_param,
373 				   sizeof(struct acrn_ioreq_notify)))
374 			return -EFAULT;
375 
376 		if (notify.reserved != 0)
377 			return -EINVAL;
378 
379 		ret = acrn_ioreq_request_default_complete(vm, notify.vcpu);
380 		break;
381 	case ACRN_IOCTL_CLEAR_VM_IOREQ:
382 		acrn_ioreq_request_clear(vm);
383 		break;
384 	case ACRN_IOCTL_PM_GET_CPU_STATE:
385 		if (copy_from_user(&cstate_cmd, (void __user *)ioctl_param,
386 				   sizeof(cstate_cmd)))
387 			return -EFAULT;
388 
389 		ret = pmcmd_ioctl(cstate_cmd, (void __user *)ioctl_param);
390 		break;
391 	case ACRN_IOCTL_IOEVENTFD:
392 		if (copy_from_user(&ioeventfd, (void __user *)ioctl_param,
393 				   sizeof(ioeventfd)))
394 			return -EFAULT;
395 
396 		if (ioeventfd.reserved != 0)
397 			return -EINVAL;
398 
399 		ret = acrn_ioeventfd_config(vm, &ioeventfd);
400 		break;
401 	case ACRN_IOCTL_IRQFD:
402 		if (copy_from_user(&irqfd, (void __user *)ioctl_param,
403 				   sizeof(irqfd)))
404 			return -EFAULT;
405 		ret = acrn_irqfd_config(vm, &irqfd);
406 		break;
407 	default:
408 		dev_dbg(acrn_dev.this_device, "Unknown IOCTL 0x%x!\n", cmd);
409 		ret = -ENOTTY;
410 	}
411 
412 	return ret;
413 }
414 
415 static int acrn_dev_release(struct inode *inode, struct file *filp)
416 {
417 	struct acrn_vm *vm = filp->private_data;
418 
419 	acrn_vm_destroy(vm);
420 	kfree(vm);
421 	return 0;
422 }
423 
424 static ssize_t remove_cpu_store(struct device *dev,
425 				struct device_attribute *attr,
426 				const char *buf, size_t count)
427 {
428 	u64 cpu, lapicid;
429 	int ret;
430 
431 	if (kstrtoull(buf, 0, &cpu) < 0)
432 		return -EINVAL;
433 
434 	if (cpu >= num_possible_cpus() || cpu == 0 || !cpu_is_hotpluggable(cpu))
435 		return -EINVAL;
436 
437 	if (cpu_online(cpu))
438 		remove_cpu(cpu);
439 
440 	lapicid = cpu_data(cpu).apicid;
441 	dev_dbg(dev, "Try to remove cpu %lld with lapicid %lld\n", cpu, lapicid);
442 	ret = hcall_sos_remove_cpu(lapicid);
443 	if (ret < 0) {
444 		dev_err(dev, "Failed to remove cpu %lld!\n", cpu);
445 		goto fail_remove;
446 	}
447 
448 	return count;
449 
450 fail_remove:
451 	add_cpu(cpu);
452 	return ret;
453 }
454 static DEVICE_ATTR_WO(remove_cpu);
455 
456 static umode_t acrn_attr_visible(struct kobject *kobj, struct attribute *a, int n)
457 {
458        if (a == &dev_attr_remove_cpu.attr)
459                return IS_ENABLED(CONFIG_HOTPLUG_CPU) ? a->mode : 0;
460 
461        return a->mode;
462 }
463 
464 static struct attribute *acrn_attrs[] = {
465 	&dev_attr_remove_cpu.attr,
466 	NULL
467 };
468 
469 static struct attribute_group acrn_attr_group = {
470 	.attrs = acrn_attrs,
471 	.is_visible = acrn_attr_visible,
472 };
473 
474 static const struct attribute_group *acrn_attr_groups[] = {
475 	&acrn_attr_group,
476 	NULL
477 };
478 
479 static const struct file_operations acrn_fops = {
480 	.owner		= THIS_MODULE,
481 	.open		= acrn_dev_open,
482 	.release	= acrn_dev_release,
483 	.unlocked_ioctl = acrn_dev_ioctl,
484 };
485 
486 struct miscdevice acrn_dev = {
487 	.minor	= MISC_DYNAMIC_MINOR,
488 	.name	= "acrn_hsm",
489 	.fops	= &acrn_fops,
490 	.groups	= acrn_attr_groups,
491 };
492 
493 static int __init hsm_init(void)
494 {
495 	int ret;
496 
497 	if (x86_hyper_type != X86_HYPER_ACRN)
498 		return -ENODEV;
499 
500 	if (!(cpuid_eax(ACRN_CPUID_FEATURES) & ACRN_FEATURE_PRIVILEGED_VM))
501 		return -EPERM;
502 
503 	ret = misc_register(&acrn_dev);
504 	if (ret) {
505 		pr_err("Create misc dev failed!\n");
506 		return ret;
507 	}
508 
509 	ret = acrn_ioreq_intr_setup();
510 	if (ret) {
511 		pr_err("Setup I/O request handler failed!\n");
512 		misc_deregister(&acrn_dev);
513 		return ret;
514 	}
515 	return 0;
516 }
517 
518 static void __exit hsm_exit(void)
519 {
520 	acrn_ioreq_intr_remove();
521 	misc_deregister(&acrn_dev);
522 }
523 module_init(hsm_init);
524 module_exit(hsm_exit);
525 
526 MODULE_AUTHOR("Intel Corporation");
527 MODULE_LICENSE("GPL");
528 MODULE_DESCRIPTION("ACRN Hypervisor Service Module (HSM)");
529