xref: /illumos-gate/usr/src/lib/libvmmapi/common/vmmapi.c (revision 4c2bdae20e15dfc656ce2c87808008f4da4fc3f0)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 /*
31  * This file and its contents are supplied under the terms of the
32  * Common Development and Distribution License ("CDDL"), version 1.0.
33  * You may only use this file in accordance with the terms of version
34  * 1.0 of the CDDL.
35  *
36  * A full copy of the text of the CDDL should have accompanied this
37  * source.  A copy of the CDDL is also available via the Internet at
38  * http://www.illumos.org/license/CDDL.
39  *
40  * Copyright 2015 Pluribus Networks Inc.
41  * Copyright 2019 Joyent, Inc.
42  * Copyright 2022 Oxide Computer Company
43  */
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include <sys/param.h>
49 #include <sys/sysctl.h>
50 #include <sys/ioctl.h>
51 #ifdef	__FreeBSD__
52 #include <sys/linker.h>
53 #endif
54 #include <sys/mman.h>
55 #include <sys/module.h>
56 #include <sys/_iovec.h>
57 #include <sys/cpuset.h>
58 
59 #include <x86/segments.h>
60 #include <machine/specialreg.h>
61 
62 #include <errno.h>
63 #ifdef	__FreeBSD__
64 #include <stdbool.h>
65 #endif
66 #include <stdio.h>
67 #include <stdlib.h>
68 #include <assert.h>
69 #include <string.h>
70 #include <fcntl.h>
71 #include <unistd.h>
72 
73 #include <libutil.h>
74 
75 #ifdef	__FreeBSD__
76 #include <vm/vm.h>
77 #endif
78 #include <machine/vmm.h>
79 #include <machine/vmm_dev.h>
80 
81 #include "vmmapi.h"
82 
83 #define	MB	(1024 * 1024UL)
84 #define	GB	(1024 * 1024 * 1024UL)
85 
86 #ifndef __FreeBSD__
87 /* shim to no-op for now */
88 #define	MAP_NOCORE		0
89 #define	MAP_ALIGNED_SUPER	0
90 
91 /* Rely on PROT_NONE for guard purposes */
92 #define	MAP_GUARD		(MAP_PRIVATE | MAP_ANON | MAP_NORESERVE)
93 
94 #define	_Thread_local		__thread
95 #endif
96 
97 /*
98  * Size of the guard region before and after the virtual address space
99  * mapping the guest physical memory. This must be a multiple of the
100  * superpage size for performance reasons.
101  */
102 #define	VM_MMAP_GUARD_SIZE	(4 * MB)
103 
104 #define	PROT_RW		(PROT_READ | PROT_WRITE)
105 #define	PROT_ALL	(PROT_READ | PROT_WRITE | PROT_EXEC)
106 
107 struct vmctx {
108 	int	fd;
109 	uint32_t lowmem_limit;
110 	int	memflags;
111 	size_t	lowmem;
112 	size_t	highmem;
113 	char	*baseaddr;
114 	char	*name;
115 };
116 
117 #ifdef	__FreeBSD__
118 #define	CREATE(x)  sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
119 #define	DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
120 #endif
121 
122 static int
123 vm_device_open(const char *name)
124 {
125 	int fd, len;
126 	char *vmfile;
127 
128 	len = strlen("/dev/vmm/") + strlen(name) + 1;
129 	vmfile = malloc(len);
130 	assert(vmfile != NULL);
131 	snprintf(vmfile, len, "/dev/vmm/%s", name);
132 
133 	/* Open the device file */
134 	fd = open(vmfile, O_RDWR, 0);
135 
136 	free(vmfile);
137 	return (fd);
138 }
139 
140 #ifdef	__FreeBSD__
141 int
142 vm_create(const char *name)
143 {
144 	/* Try to load vmm(4) module before creating a guest. */
145 	if (modfind("vmm") < 0)
146 		kldload("vmm");
147 	return (CREATE(name));
148 }
149 #else
150 static int
151 vm_do_ctl(int cmd, void *req)
152 {
153 	int ctl_fd;
154 
155 	ctl_fd = open(VMM_CTL_DEV, O_EXCL | O_RDWR);
156 	if (ctl_fd < 0) {
157 		return (-1);
158 	}
159 
160 	if (ioctl(ctl_fd, cmd, req) == -1) {
161 		int err = errno;
162 
163 		/* Do not lose ioctl errno through the close(2) */
164 		(void) close(ctl_fd);
165 		errno = err;
166 		return (-1);
167 	}
168 	(void) close(ctl_fd);
169 
170 	return (0);
171 }
172 
173 int
174 vm_create(const char *name, uint64_t flags)
175 {
176 	struct vm_create_req req;
177 
178 	(void) strncpy(req.name, name, VM_MAX_NAMELEN);
179 	req.flags = flags;
180 
181 	return (vm_do_ctl(VMM_CREATE_VM, &req));
182 }
183 #endif
184 
185 struct vmctx *
186 vm_open(const char *name)
187 {
188 	struct vmctx *vm;
189 	int saved_errno;
190 
191 	vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
192 	assert(vm != NULL);
193 
194 	vm->fd = -1;
195 	vm->memflags = 0;
196 	vm->lowmem_limit = 3 * GB;
197 	vm->name = (char *)(vm + 1);
198 	strcpy(vm->name, name);
199 
200 	if ((vm->fd = vm_device_open(vm->name)) < 0)
201 		goto err;
202 
203 	return (vm);
204 err:
205 	saved_errno = errno;
206 	free(vm);
207 	errno = saved_errno;
208 	return (NULL);
209 }
210 
211 #ifdef	__FreeBSD__
212 void
213 vm_close(struct vmctx *vm)
214 {
215 	assert(vm != NULL);
216 
217 	close(vm->fd);
218 	free(vm);
219 }
220 
221 void
222 vm_destroy(struct vmctx *vm)
223 {
224 	assert(vm != NULL);
225 
226 	if (vm->fd >= 0)
227 		close(vm->fd);
228 	DESTROY(vm->name);
229 
230 	free(vm);
231 }
232 #else
233 void
234 vm_close(struct vmctx *vm)
235 {
236 	assert(vm != NULL);
237 	assert(vm->fd >= 0);
238 
239 	(void) close(vm->fd);
240 
241 	free(vm);
242 }
243 
244 void
245 vm_destroy(struct vmctx *vm)
246 {
247 	assert(vm != NULL);
248 
249 	if (vm->fd >= 0) {
250 		(void) ioctl(vm->fd, VM_DESTROY_SELF, 0);
251 		(void) close(vm->fd);
252 		vm->fd = -1;
253 	}
254 
255 	free(vm);
256 }
257 #endif
258 
259 int
260 vm_parse_memsize(const char *opt, size_t *ret_memsize)
261 {
262 	char *endptr;
263 	size_t optval;
264 	int error;
265 
266 	optval = strtoul(opt, &endptr, 0);
267 	if (*opt != '\0' && *endptr == '\0') {
268 		/*
269 		 * For the sake of backward compatibility if the memory size
270 		 * specified on the command line is less than a megabyte then
271 		 * it is interpreted as being in units of MB.
272 		 */
273 		if (optval < MB)
274 			optval *= MB;
275 		*ret_memsize = optval;
276 		error = 0;
277 	} else
278 		error = expand_number(opt, ret_memsize);
279 
280 	return (error);
281 }
282 
283 uint32_t
284 vm_get_lowmem_limit(struct vmctx *ctx)
285 {
286 
287 	return (ctx->lowmem_limit);
288 }
289 
290 void
291 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
292 {
293 
294 	ctx->lowmem_limit = limit;
295 }
296 
297 void
298 vm_set_memflags(struct vmctx *ctx, int flags)
299 {
300 
301 	ctx->memflags = flags;
302 }
303 
304 int
305 vm_get_memflags(struct vmctx *ctx)
306 {
307 
308 	return (ctx->memflags);
309 }
310 
311 /*
312  * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len).
313  */
314 int
315 vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off,
316     size_t len, int prot)
317 {
318 	struct vm_memmap memmap;
319 	int error, flags;
320 
321 	memmap.gpa = gpa;
322 	memmap.segid = segid;
323 	memmap.segoff = off;
324 	memmap.len = len;
325 	memmap.prot = prot;
326 	memmap.flags = 0;
327 
328 	if (ctx->memflags & VM_MEM_F_WIRED)
329 		memmap.flags |= VM_MEMMAP_F_WIRED;
330 
331 	/*
332 	 * If this mapping already exists then don't create it again. This
333 	 * is the common case for SYSMEM mappings created by bhyveload(8).
334 	 */
335 	error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags);
336 	if (error == 0 && gpa == memmap.gpa) {
337 		if (segid != memmap.segid || off != memmap.segoff ||
338 		    prot != memmap.prot || flags != memmap.flags) {
339 			errno = EEXIST;
340 			return (-1);
341 		} else {
342 			return (0);
343 		}
344 	}
345 
346 	error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap);
347 	return (error);
348 }
349 
350 #ifdef	__FreeBSD__
351 int
352 vm_get_guestmem_from_ctx(struct vmctx *ctx, char **guest_baseaddr,
353     size_t *lowmem_size, size_t *highmem_size)
354 {
355 
356 	*guest_baseaddr = ctx->baseaddr;
357 	*lowmem_size = ctx->lowmem;
358 	*highmem_size = ctx->highmem;
359 	return (0);
360 }
361 #endif
362 
363 int
364 vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len)
365 {
366 	struct vm_munmap munmap;
367 	int error;
368 
369 	munmap.gpa = gpa;
370 	munmap.len = len;
371 
372 	error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap);
373 	return (error);
374 }
375 
376 int
377 vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
378     vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
379 {
380 	struct vm_memmap memmap;
381 	int error;
382 
383 	bzero(&memmap, sizeof(struct vm_memmap));
384 	memmap.gpa = *gpa;
385 	error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap);
386 	if (error == 0) {
387 		*gpa = memmap.gpa;
388 		*segid = memmap.segid;
389 		*segoff = memmap.segoff;
390 		*len = memmap.len;
391 		*prot = memmap.prot;
392 		*flags = memmap.flags;
393 	}
394 	return (error);
395 }
396 
397 /*
398  * Return 0 if the segments are identical and non-zero otherwise.
399  *
400  * This is slightly complicated by the fact that only device memory segments
401  * are named.
402  */
403 static int
404 cmpseg(size_t len, const char *str, size_t len2, const char *str2)
405 {
406 
407 	if (len == len2) {
408 		if ((!str && !str2) || (str && str2 && !strcmp(str, str2)))
409 			return (0);
410 	}
411 	return (-1);
412 }
413 
414 static int
415 vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name)
416 {
417 	struct vm_memseg memseg;
418 	size_t n;
419 	int error;
420 
421 	/*
422 	 * If the memory segment has already been created then just return.
423 	 * This is the usual case for the SYSMEM segment created by userspace
424 	 * loaders like bhyveload(8).
425 	 */
426 	error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name,
427 	    sizeof(memseg.name));
428 	if (error)
429 		return (error);
430 
431 	if (memseg.len != 0) {
432 		if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) {
433 			errno = EINVAL;
434 			return (-1);
435 		} else {
436 			return (0);
437 		}
438 	}
439 
440 	bzero(&memseg, sizeof(struct vm_memseg));
441 	memseg.segid = segid;
442 	memseg.len = len;
443 	if (name != NULL) {
444 		n = strlcpy(memseg.name, name, sizeof(memseg.name));
445 		if (n >= sizeof(memseg.name)) {
446 			errno = ENAMETOOLONG;
447 			return (-1);
448 		}
449 	}
450 
451 	error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg);
452 	return (error);
453 }
454 
455 int
456 vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf,
457     size_t bufsize)
458 {
459 	struct vm_memseg memseg;
460 	size_t n;
461 	int error;
462 
463 	memseg.segid = segid;
464 	error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg);
465 	if (error == 0) {
466 		*lenp = memseg.len;
467 		n = strlcpy(namebuf, memseg.name, bufsize);
468 		if (n >= bufsize) {
469 			errno = ENAMETOOLONG;
470 			error = -1;
471 		}
472 	}
473 	return (error);
474 }
475 
476 static int
477 #ifdef __FreeBSD__
478 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base)
479 #else
480 setup_memory_segment(struct vmctx *ctx, int segid, vm_paddr_t gpa, size_t len,
481     char *base)
482 #endif
483 {
484 	char *ptr;
485 	int error, flags;
486 
487 	/* Map 'len' bytes starting at 'gpa' in the guest address space */
488 #ifdef __FreeBSD__
489 	error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL);
490 #else
491 	/*
492 	 * As we use two segments for lowmem/highmem the offset within the
493 	 * segment is 0 on illumos.
494 	 */
495 	error = vm_mmap_memseg(ctx, gpa, segid, 0, len, PROT_ALL);
496 #endif
497 	if (error)
498 		return (error);
499 
500 	flags = MAP_SHARED | MAP_FIXED;
501 	if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
502 		flags |= MAP_NOCORE;
503 
504 	/* mmap into the process address space on the host */
505 	ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
506 	if (ptr == MAP_FAILED)
507 		return (-1);
508 
509 	return (0);
510 }
511 
512 int
513 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
514 {
515 	size_t objsize, len;
516 	vm_paddr_t gpa;
517 	char *baseaddr, *ptr;
518 	int error;
519 
520 	assert(vms == VM_MMAP_ALL);
521 
522 	/*
523 	 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
524 	 * create another 'highmem' segment above 4GB for the remainder.
525 	 */
526 	if (memsize > ctx->lowmem_limit) {
527 		ctx->lowmem = ctx->lowmem_limit;
528 		ctx->highmem = memsize - ctx->lowmem_limit;
529 		objsize = 4*GB + ctx->highmem;
530 	} else {
531 		ctx->lowmem = memsize;
532 		ctx->highmem = 0;
533 		objsize = ctx->lowmem;
534 	}
535 
536 #ifdef __FreeBSD__
537 	error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL);
538 	if (error)
539 		return (error);
540 #endif
541 
542 	/*
543 	 * Stake out a contiguous region covering the guest physical memory
544 	 * and the adjoining guard regions.
545 	 */
546 	len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
547 	ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0);
548 	if (ptr == MAP_FAILED)
549 		return (-1);
550 
551 	baseaddr = ptr + VM_MMAP_GUARD_SIZE;
552 
553 #ifdef __FreeBSD__
554 	if (ctx->highmem > 0) {
555 		gpa = 4*GB;
556 		len = ctx->highmem;
557 		error = setup_memory_segment(ctx, gpa, len, baseaddr);
558 		if (error)
559 			return (error);
560 	}
561 
562 	if (ctx->lowmem > 0) {
563 		gpa = 0;
564 		len = ctx->lowmem;
565 		error = setup_memory_segment(ctx, gpa, len, baseaddr);
566 		if (error)
567 			return (error);
568 	}
569 #else
570 	if (ctx->highmem > 0) {
571 		error = vm_alloc_memseg(ctx, VM_HIGHMEM, ctx->highmem, NULL);
572 		if (error)
573 			return (error);
574 		gpa = 4*GB;
575 		len = ctx->highmem;
576 		error = setup_memory_segment(ctx, VM_HIGHMEM, gpa, len, baseaddr);
577 		if (error)
578 			return (error);
579 	}
580 
581 	if (ctx->lowmem > 0) {
582 		error = vm_alloc_memseg(ctx, VM_LOWMEM, ctx->lowmem, NULL);
583 		if (error)
584 			return (error);
585 		gpa = 0;
586 		len = ctx->lowmem;
587 		error = setup_memory_segment(ctx, VM_LOWMEM, gpa, len, baseaddr);
588 		if (error)
589 			return (error);
590 	}
591 #endif
592 
593 	ctx->baseaddr = baseaddr;
594 
595 	return (0);
596 }
597 
598 /*
599  * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
600  * the lowmem or highmem regions.
601  *
602  * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
603  * The instruction emulation code depends on this behavior.
604  */
605 void *
606 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
607 {
608 
609 	if (ctx->lowmem > 0) {
610 		if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
611 		    gaddr + len <= ctx->lowmem)
612 			return (ctx->baseaddr + gaddr);
613 	}
614 
615 	if (ctx->highmem > 0) {
616                 if (gaddr >= 4*GB) {
617 			if (gaddr < 4*GB + ctx->highmem &&
618 			    len <= ctx->highmem &&
619 			    gaddr + len <= 4*GB + ctx->highmem)
620 				return (ctx->baseaddr + gaddr);
621 		}
622 	}
623 
624 	return (NULL);
625 }
626 
627 #ifdef	__FreeBSD__
628 vm_paddr_t
629 vm_rev_map_gpa(struct vmctx *ctx, void *addr)
630 {
631 	vm_paddr_t offaddr;
632 
633 	offaddr = (char *)addr - ctx->baseaddr;
634 
635 	if (ctx->lowmem > 0)
636 		if (offaddr <= ctx->lowmem)
637 			return (offaddr);
638 
639 	if (ctx->highmem > 0)
640 		if (offaddr >= 4*GB && offaddr < 4*GB + ctx->highmem)
641 			return (offaddr);
642 
643 	return ((vm_paddr_t)-1);
644 }
645 
646 const char *
647 vm_get_name(struct vmctx *ctx)
648 {
649 
650 	return (ctx->name);
651 }
652 #endif /* __FreeBSD__ */
653 
654 size_t
655 vm_get_lowmem_size(struct vmctx *ctx)
656 {
657 
658 	return (ctx->lowmem);
659 }
660 
661 size_t
662 vm_get_highmem_size(struct vmctx *ctx)
663 {
664 
665 	return (ctx->highmem);
666 }
667 
668 #ifndef __FreeBSD__
669 int
670 vm_get_devmem_offset(struct vmctx *ctx, int segid, off_t *mapoff)
671 {
672 	struct vm_devmem_offset vdo;
673 	int error;
674 
675 	vdo.segid = segid;
676 	error = ioctl(ctx->fd, VM_DEVMEM_GETOFFSET, &vdo);
677 	if (error == 0)
678 		*mapoff = vdo.offset;
679 
680 	return (error);
681 }
682 #endif
683 
684 void *
685 vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
686 {
687 #ifdef	__FreeBSD__
688 	char pathname[MAXPATHLEN];
689 #endif
690 	size_t len2;
691 	char *base, *ptr;
692 	int fd, error, flags;
693 	off_t mapoff;
694 
695 	fd = -1;
696 	ptr = MAP_FAILED;
697 	if (name == NULL || strlen(name) == 0) {
698 		errno = EINVAL;
699 		goto done;
700 	}
701 
702 	error = vm_alloc_memseg(ctx, segid, len, name);
703 	if (error)
704 		goto done;
705 
706 #ifdef	__FreeBSD__
707 	strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname));
708 	strlcat(pathname, ctx->name, sizeof(pathname));
709 	strlcat(pathname, ".", sizeof(pathname));
710 	strlcat(pathname, name, sizeof(pathname));
711 
712 	fd = open(pathname, O_RDWR);
713 	if (fd < 0)
714 		goto done;
715 #else
716 	if (vm_get_devmem_offset(ctx, segid, &mapoff) != 0)
717 		goto done;
718 #endif
719 
720 	/*
721 	 * Stake out a contiguous region covering the device memory and the
722 	 * adjoining guard regions.
723 	 */
724 	len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE;
725 	base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1,
726 	    0);
727 	if (base == MAP_FAILED)
728 		goto done;
729 
730 	flags = MAP_SHARED | MAP_FIXED;
731 	if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
732 		flags |= MAP_NOCORE;
733 
734 #ifdef	__FreeBSD__
735 	/* mmap the devmem region in the host address space */
736 	ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0);
737 #else
738 	/* mmap the devmem region in the host address space */
739 	ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, ctx->fd,
740 	    mapoff);
741 #endif
742 done:
743 	if (fd >= 0)
744 		close(fd);
745 	return (ptr);
746 }
747 
748 int
749 vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
750 	    uint64_t base, uint32_t limit, uint32_t access)
751 {
752 	int error;
753 	struct vm_seg_desc vmsegdesc;
754 
755 	bzero(&vmsegdesc, sizeof(vmsegdesc));
756 	vmsegdesc.cpuid = vcpu;
757 	vmsegdesc.regnum = reg;
758 	vmsegdesc.desc.base = base;
759 	vmsegdesc.desc.limit = limit;
760 	vmsegdesc.desc.access = access;
761 
762 	error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
763 	return (error);
764 }
765 
766 int
767 vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
768 	    uint64_t *base, uint32_t *limit, uint32_t *access)
769 {
770 	int error;
771 	struct vm_seg_desc vmsegdesc;
772 
773 	bzero(&vmsegdesc, sizeof(vmsegdesc));
774 	vmsegdesc.cpuid = vcpu;
775 	vmsegdesc.regnum = reg;
776 
777 	error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
778 	if (error == 0) {
779 		*base = vmsegdesc.desc.base;
780 		*limit = vmsegdesc.desc.limit;
781 		*access = vmsegdesc.desc.access;
782 	}
783 	return (error);
784 }
785 
786 int
787 vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg, struct seg_desc *seg_desc)
788 {
789 	int error;
790 
791 	error = vm_get_desc(ctx, vcpu, reg, &seg_desc->base, &seg_desc->limit,
792 	    &seg_desc->access);
793 	return (error);
794 }
795 
796 int
797 vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
798 {
799 	int error;
800 	struct vm_register vmreg;
801 
802 	bzero(&vmreg, sizeof(vmreg));
803 	vmreg.cpuid = vcpu;
804 	vmreg.regnum = reg;
805 	vmreg.regval = val;
806 
807 	error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
808 	return (error);
809 }
810 
811 int
812 vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
813 {
814 	int error;
815 	struct vm_register vmreg;
816 
817 	bzero(&vmreg, sizeof(vmreg));
818 	vmreg.cpuid = vcpu;
819 	vmreg.regnum = reg;
820 
821 	error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
822 	*ret_val = vmreg.regval;
823 	return (error);
824 }
825 
826 int
827 vm_set_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
828     const int *regnums, uint64_t *regvals)
829 {
830 	int error;
831 	struct vm_register_set vmregset;
832 
833 	bzero(&vmregset, sizeof(vmregset));
834 	vmregset.cpuid = vcpu;
835 	vmregset.count = count;
836 	vmregset.regnums = regnums;
837 	vmregset.regvals = regvals;
838 
839 	error = ioctl(ctx->fd, VM_SET_REGISTER_SET, &vmregset);
840 	return (error);
841 }
842 
843 int
844 vm_get_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
845     const int *regnums, uint64_t *regvals)
846 {
847 	int error;
848 	struct vm_register_set vmregset;
849 
850 	bzero(&vmregset, sizeof(vmregset));
851 	vmregset.cpuid = vcpu;
852 	vmregset.count = count;
853 	vmregset.regnums = regnums;
854 	vmregset.regvals = regvals;
855 
856 	error = ioctl(ctx->fd, VM_GET_REGISTER_SET, &vmregset);
857 	return (error);
858 }
859 
860 #ifdef	__FreeBSD__
861 int
862 vm_run(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
863 {
864 	int error;
865 	struct vm_run vmrun;
866 
867 	bzero(&vmrun, sizeof(vmrun));
868 	vmrun.cpuid = vcpu;
869 
870 	error = ioctl(ctx->fd, VM_RUN, &vmrun);
871 	bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
872 	return (error);
873 }
874 #else
875 int
876 vm_run(struct vmctx *ctx, int vcpu, const struct vm_entry *vm_entry,
877     struct vm_exit *vm_exit)
878 {
879 	struct vm_entry entry;
880 
881 	bcopy(vm_entry, &entry, sizeof (entry));
882 	entry.cpuid = vcpu;
883 	entry.exit_data = vm_exit;
884 
885 	return (ioctl(ctx->fd, VM_RUN, &entry));
886 }
887 #endif
888 
889 int
890 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
891 {
892 	struct vm_suspend vmsuspend;
893 
894 	bzero(&vmsuspend, sizeof(vmsuspend));
895 	vmsuspend.how = how;
896 #ifndef __FreeBSD__
897 	/*
898 	 * The existing userspace does not (currently) inject targeted
899 	 * triple-fault suspend states, so it does not need to specify source.
900 	 */
901 	vmsuspend.source = -1;
902 #endif /* __FreeBSD__ */
903 	return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
904 }
905 
906 #ifdef __FreeBSD__
907 int
908 vm_reinit(struct vmctx *ctx)
909 {
910 
911 	return (ioctl(ctx->fd, VM_REINIT, 0));
912 }
913 #else
914 int
915 vm_reinit(struct vmctx *ctx, uint64_t flags)
916 {
917 	struct vm_reinit reinit = {
918 		.flags = flags
919 	};
920 
921 	return (ioctl(ctx->fd, VM_REINIT, &reinit));
922 }
923 #endif
924 
925 int
926 vm_inject_exception(struct vmctx *ctx, int vcpu, int vector, int errcode_valid,
927     uint32_t errcode, int restart_instruction)
928 {
929 	struct vm_exception exc;
930 
931 	exc.cpuid = vcpu;
932 	exc.vector = vector;
933 	exc.error_code = errcode;
934 	exc.error_code_valid = errcode_valid;
935 	exc.restart_instruction = restart_instruction;
936 
937 	return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
938 }
939 
940 #ifndef __FreeBSD__
941 void
942 vm_inject_fault(struct vmctx *ctx, int vcpu, int vector, int errcode_valid,
943     int errcode)
944 {
945 	int error;
946 	struct vm_exception exc;
947 
948 	exc.cpuid = vcpu;
949 	exc.vector = vector;
950 	exc.error_code = errcode;
951 	exc.error_code_valid = errcode_valid;
952 	exc.restart_instruction = 1;
953 	error = ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc);
954 
955 	assert(error == 0);
956 }
957 #endif /* __FreeBSD__ */
958 
959 int
960 vm_apicid2vcpu(struct vmctx *ctx __unused, int apicid)
961 {
962 	/*
963 	 * The apic id associated with the 'vcpu' has the same numerical value
964 	 * as the 'vcpu' itself.
965 	 */
966 	return (apicid);
967 }
968 
969 int
970 vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
971 {
972 	struct vm_lapic_irq vmirq;
973 
974 	bzero(&vmirq, sizeof(vmirq));
975 	vmirq.cpuid = vcpu;
976 	vmirq.vector = vector;
977 
978 	return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
979 }
980 
981 int
982 vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector)
983 {
984 	struct vm_lapic_irq vmirq;
985 
986 	bzero(&vmirq, sizeof(vmirq));
987 	vmirq.cpuid = vcpu;
988 	vmirq.vector = vector;
989 
990 	return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq));
991 }
992 
993 int
994 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
995 {
996 	struct vm_lapic_msi vmmsi;
997 
998 	bzero(&vmmsi, sizeof(vmmsi));
999 	vmmsi.addr = addr;
1000 	vmmsi.msg = msg;
1001 
1002 	return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
1003 }
1004 
1005 int
1006 vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
1007 {
1008 	struct vm_ioapic_irq ioapic_irq;
1009 
1010 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
1011 	ioapic_irq.irq = irq;
1012 
1013 	return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
1014 }
1015 
1016 int
1017 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
1018 {
1019 	struct vm_ioapic_irq ioapic_irq;
1020 
1021 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
1022 	ioapic_irq.irq = irq;
1023 
1024 	return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
1025 }
1026 
1027 int
1028 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
1029 {
1030 	struct vm_ioapic_irq ioapic_irq;
1031 
1032 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
1033 	ioapic_irq.irq = irq;
1034 
1035 	return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
1036 }
1037 
1038 int
1039 vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
1040 {
1041 
1042 	return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
1043 }
1044 
1045 int
1046 vm_readwrite_kernemu_device(struct vmctx *ctx, int vcpu, vm_paddr_t gpa,
1047     bool write, int size, uint64_t *value)
1048 {
1049 	struct vm_readwrite_kernemu_device irp = {
1050 		.vcpuid = vcpu,
1051 		.access_width = fls(size) - 1,
1052 		.gpa = gpa,
1053 		.value = write ? *value : ~0ul,
1054 	};
1055 	long cmd = (write ? VM_SET_KERNEMU_DEV : VM_GET_KERNEMU_DEV);
1056 	int rc;
1057 
1058 	rc = ioctl(ctx->fd, cmd, &irp);
1059 	if (rc == 0 && !write)
1060 		*value = irp.value;
1061 	return (rc);
1062 }
1063 
1064 int
1065 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
1066 {
1067 	struct vm_isa_irq isa_irq;
1068 
1069 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
1070 	isa_irq.atpic_irq = atpic_irq;
1071 	isa_irq.ioapic_irq = ioapic_irq;
1072 
1073 	return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
1074 }
1075 
1076 int
1077 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
1078 {
1079 	struct vm_isa_irq isa_irq;
1080 
1081 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
1082 	isa_irq.atpic_irq = atpic_irq;
1083 	isa_irq.ioapic_irq = ioapic_irq;
1084 
1085 	return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
1086 }
1087 
1088 int
1089 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
1090 {
1091 	struct vm_isa_irq isa_irq;
1092 
1093 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
1094 	isa_irq.atpic_irq = atpic_irq;
1095 	isa_irq.ioapic_irq = ioapic_irq;
1096 
1097 	return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
1098 }
1099 
1100 int
1101 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
1102     enum vm_intr_trigger trigger)
1103 {
1104 	struct vm_isa_irq_trigger isa_irq_trigger;
1105 
1106 	bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
1107 	isa_irq_trigger.atpic_irq = atpic_irq;
1108 	isa_irq_trigger.trigger = trigger;
1109 
1110 	return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
1111 }
1112 
1113 int
1114 vm_inject_nmi(struct vmctx *ctx, int vcpu)
1115 {
1116 	struct vm_nmi vmnmi;
1117 
1118 	bzero(&vmnmi, sizeof(vmnmi));
1119 	vmnmi.cpuid = vcpu;
1120 
1121 	return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
1122 }
1123 
1124 static const char *capstrmap[] = {
1125 	[VM_CAP_HALT_EXIT]  = "hlt_exit",
1126 	[VM_CAP_MTRAP_EXIT] = "mtrap_exit",
1127 	[VM_CAP_PAUSE_EXIT] = "pause_exit",
1128 #ifdef __FreeBSD__
1129 	[VM_CAP_UNRESTRICTED_GUEST] = "unrestricted_guest",
1130 #endif
1131 	[VM_CAP_ENABLE_INVPCID] = "enable_invpcid",
1132 	[VM_CAP_BPT_EXIT] = "bpt_exit",
1133 };
1134 
1135 int
1136 vm_capability_name2type(const char *capname)
1137 {
1138 	int i;
1139 
1140 	for (i = 0; i < (int)nitems(capstrmap); i++) {
1141 		if (strcmp(capstrmap[i], capname) == 0)
1142 			return (i);
1143 	}
1144 
1145 	return (-1);
1146 }
1147 
1148 const char *
1149 vm_capability_type2name(int type)
1150 {
1151 	if (type >= 0 && type < (int)nitems(capstrmap))
1152 		return (capstrmap[type]);
1153 
1154 	return (NULL);
1155 }
1156 
1157 int
1158 vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
1159 		  int *retval)
1160 {
1161 	int error;
1162 	struct vm_capability vmcap;
1163 
1164 	bzero(&vmcap, sizeof(vmcap));
1165 	vmcap.cpuid = vcpu;
1166 	vmcap.captype = cap;
1167 
1168 	error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
1169 	*retval = vmcap.capval;
1170 	return (error);
1171 }
1172 
1173 int
1174 vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
1175 {
1176 	struct vm_capability vmcap;
1177 
1178 	bzero(&vmcap, sizeof(vmcap));
1179 	vmcap.cpuid = vcpu;
1180 	vmcap.captype = cap;
1181 	vmcap.capval = val;
1182 
1183 	return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
1184 }
1185 
1186 #ifdef __FreeBSD__
1187 int
1188 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
1189 {
1190 	struct vm_pptdev pptdev;
1191 
1192 	bzero(&pptdev, sizeof(pptdev));
1193 	pptdev.bus = bus;
1194 	pptdev.slot = slot;
1195 	pptdev.func = func;
1196 
1197 	return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
1198 }
1199 
1200 int
1201 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
1202 {
1203 	struct vm_pptdev pptdev;
1204 
1205 	bzero(&pptdev, sizeof(pptdev));
1206 	pptdev.bus = bus;
1207 	pptdev.slot = slot;
1208 	pptdev.func = func;
1209 
1210 	return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
1211 }
1212 
1213 int
1214 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
1215 		   vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
1216 {
1217 	struct vm_pptdev_mmio pptmmio;
1218 
1219 	bzero(&pptmmio, sizeof(pptmmio));
1220 	pptmmio.bus = bus;
1221 	pptmmio.slot = slot;
1222 	pptmmio.func = func;
1223 	pptmmio.gpa = gpa;
1224 	pptmmio.len = len;
1225 	pptmmio.hpa = hpa;
1226 
1227 	return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
1228 }
1229 
1230 int
1231 vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
1232 		     vm_paddr_t gpa, size_t len)
1233 {
1234 	struct vm_pptdev_mmio pptmmio;
1235 
1236 	bzero(&pptmmio, sizeof(pptmmio));
1237 	pptmmio.bus = bus;
1238 	pptmmio.slot = slot;
1239 	pptmmio.func = func;
1240 	pptmmio.gpa = gpa;
1241 	pptmmio.len = len;
1242 
1243 	return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio));
1244 }
1245 
1246 int
1247 vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
1248     uint64_t addr, uint64_t msg, int numvec)
1249 {
1250 	struct vm_pptdev_msi pptmsi;
1251 
1252 	bzero(&pptmsi, sizeof(pptmsi));
1253 	pptmsi.vcpu = vcpu;
1254 	pptmsi.bus = bus;
1255 	pptmsi.slot = slot;
1256 	pptmsi.func = func;
1257 	pptmsi.msg = msg;
1258 	pptmsi.addr = addr;
1259 	pptmsi.numvec = numvec;
1260 
1261 	return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
1262 }
1263 
1264 int
1265 vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
1266     int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
1267 {
1268 	struct vm_pptdev_msix pptmsix;
1269 
1270 	bzero(&pptmsix, sizeof(pptmsix));
1271 	pptmsix.vcpu = vcpu;
1272 	pptmsix.bus = bus;
1273 	pptmsix.slot = slot;
1274 	pptmsix.func = func;
1275 	pptmsix.idx = idx;
1276 	pptmsix.msg = msg;
1277 	pptmsix.addr = addr;
1278 	pptmsix.vector_control = vector_control;
1279 
1280 	return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
1281 }
1282 
1283 int
1284 vm_disable_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func)
1285 {
1286 	struct vm_pptdev ppt;
1287 
1288 	bzero(&ppt, sizeof(ppt));
1289 	ppt.bus = bus;
1290 	ppt.slot = slot;
1291 	ppt.func = func;
1292 
1293 	return ioctl(ctx->fd, VM_PPTDEV_DISABLE_MSIX, &ppt);
1294 }
1295 
1296 #else /* __FreeBSD__ */
1297 
1298 int
1299 vm_assign_pptdev(struct vmctx *ctx, int pptfd)
1300 {
1301 	struct vm_pptdev pptdev;
1302 
1303 	pptdev.pptfd = pptfd;
1304 	return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
1305 }
1306 
1307 int
1308 vm_unassign_pptdev(struct vmctx *ctx, int pptfd)
1309 {
1310 	struct vm_pptdev pptdev;
1311 
1312 	pptdev.pptfd = pptfd;
1313 	return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
1314 }
1315 
1316 int
1317 vm_map_pptdev_mmio(struct vmctx *ctx, int pptfd, vm_paddr_t gpa, size_t len,
1318     vm_paddr_t hpa)
1319 {
1320 	struct vm_pptdev_mmio pptmmio;
1321 
1322 	pptmmio.pptfd = pptfd;
1323 	pptmmio.gpa = gpa;
1324 	pptmmio.len = len;
1325 	pptmmio.hpa = hpa;
1326 	return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
1327 }
1328 
1329 int
1330 vm_unmap_pptdev_mmio(struct vmctx *ctx, int pptfd, vm_paddr_t gpa, size_t len)
1331 {
1332 	struct vm_pptdev_mmio pptmmio;
1333 
1334 	bzero(&pptmmio, sizeof(pptmmio));
1335 	pptmmio.pptfd = pptfd;
1336 	pptmmio.gpa = gpa;
1337 	pptmmio.len = len;
1338 
1339 	return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio));
1340 }
1341 
1342 int
1343 vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int pptfd, uint64_t addr,
1344     uint64_t msg, int numvec)
1345 {
1346 	struct vm_pptdev_msi pptmsi;
1347 
1348 	pptmsi.vcpu = vcpu;
1349 	pptmsi.pptfd = pptfd;
1350 	pptmsi.msg = msg;
1351 	pptmsi.addr = addr;
1352 	pptmsi.numvec = numvec;
1353 	return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
1354 }
1355 
1356 int
1357 vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int pptfd, int idx,
1358     uint64_t addr, uint64_t msg, uint32_t vector_control)
1359 {
1360 	struct vm_pptdev_msix pptmsix;
1361 
1362 	pptmsix.vcpu = vcpu;
1363 	pptmsix.pptfd = pptfd;
1364 	pptmsix.idx = idx;
1365 	pptmsix.msg = msg;
1366 	pptmsix.addr = addr;
1367 	pptmsix.vector_control = vector_control;
1368 	return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
1369 }
1370 
1371 int
1372 vm_get_pptdev_limits(struct vmctx *ctx, int pptfd, int *msi_limit,
1373     int *msix_limit)
1374 {
1375 	struct vm_pptdev_limits pptlimits;
1376 	int error;
1377 
1378 	bzero(&pptlimits, sizeof (pptlimits));
1379 	pptlimits.pptfd = pptfd;
1380 	error = ioctl(ctx->fd, VM_GET_PPTDEV_LIMITS, &pptlimits);
1381 
1382 	*msi_limit = pptlimits.msi_limit;
1383 	*msix_limit = pptlimits.msix_limit;
1384 	return (error);
1385 }
1386 
1387 int
1388 vm_disable_pptdev_msix(struct vmctx *ctx, int pptfd)
1389 {
1390 	struct vm_pptdev pptdev;
1391 
1392 	pptdev.pptfd = pptfd;
1393 	return (ioctl(ctx->fd, VM_PPTDEV_DISABLE_MSIX, &pptdev));
1394 }
1395 #endif /* __FreeBSD__ */
1396 
1397 uint64_t *
1398 vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
1399 	     int *ret_entries)
1400 {
1401 	static _Thread_local uint64_t *stats_buf;
1402 	static _Thread_local uint32_t stats_count;
1403 	uint64_t *new_stats;
1404 	struct vm_stats vmstats;
1405 	uint32_t count, index;
1406 	bool have_stats;
1407 
1408 	have_stats = false;
1409 	vmstats.cpuid = vcpu;
1410 	count = 0;
1411 	for (index = 0;; index += nitems(vmstats.statbuf)) {
1412 		vmstats.index = index;
1413 		if (ioctl(ctx->fd, VM_STATS_IOC, &vmstats) != 0)
1414 			break;
1415 		if (stats_count < index + vmstats.num_entries) {
1416 			new_stats = reallocarray(stats_buf,
1417 			    index + vmstats.num_entries, sizeof(uint64_t));
1418 			if (new_stats == NULL) {
1419 				errno = ENOMEM;
1420 				return (NULL);
1421 			}
1422 			stats_count = index + vmstats.num_entries;
1423 			stats_buf = new_stats;
1424 		}
1425 		memcpy(stats_buf + index, vmstats.statbuf,
1426 		    vmstats.num_entries * sizeof(uint64_t));
1427 		count += vmstats.num_entries;
1428 		have_stats = true;
1429 
1430 		if (vmstats.num_entries != nitems(vmstats.statbuf))
1431 			break;
1432 	}
1433 	if (have_stats) {
1434 		if (ret_entries)
1435 			*ret_entries = count;
1436 		if (ret_tv)
1437 			*ret_tv = vmstats.tv;
1438 		return (stats_buf);
1439 	} else {
1440 		return (NULL);
1441 	}
1442 }
1443 
1444 const char *
1445 vm_get_stat_desc(struct vmctx *ctx, int index)
1446 {
1447 	static struct vm_stat_desc statdesc;
1448 
1449 	statdesc.index = index;
1450 	if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
1451 		return (statdesc.desc);
1452 	else
1453 		return (NULL);
1454 }
1455 
1456 int
1457 vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
1458 {
1459 	int error;
1460 	struct vm_x2apic x2apic;
1461 
1462 	bzero(&x2apic, sizeof(x2apic));
1463 	x2apic.cpuid = vcpu;
1464 
1465 	error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
1466 	*state = x2apic.state;
1467 	return (error);
1468 }
1469 
1470 int
1471 vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
1472 {
1473 	int error;
1474 	struct vm_x2apic x2apic;
1475 
1476 	bzero(&x2apic, sizeof(x2apic));
1477 	x2apic.cpuid = vcpu;
1478 	x2apic.state = state;
1479 
1480 	error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
1481 
1482 	return (error);
1483 }
1484 
1485 #ifndef __FreeBSD__
1486 int
1487 vcpu_reset(struct vmctx *vmctx, int vcpu)
1488 {
1489 	struct vm_vcpu_reset vvr;
1490 
1491 	vvr.vcpuid = vcpu;
1492 	vvr.kind = VRK_RESET;
1493 
1494 	return (ioctl(vmctx->fd, VM_RESET_CPU, &vvr));
1495 }
1496 #else /* __FreeBSD__ */
1497 /*
1498  * From Intel Vol 3a:
1499  * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
1500  */
1501 int
1502 vcpu_reset(struct vmctx *vmctx, int vcpu)
1503 {
1504 	int error;
1505 	uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
1506 	uint32_t desc_access, desc_limit;
1507 	uint16_t sel;
1508 
1509 	zero = 0;
1510 
1511 	rflags = 0x2;
1512 	error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
1513 	if (error)
1514 		goto done;
1515 
1516 	rip = 0xfff0;
1517 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
1518 		goto done;
1519 
1520 	/*
1521 	 * According to Intels Software Developer Manual CR0 should be
1522 	 * initialized with CR0_ET | CR0_NW | CR0_CD but that crashes some
1523 	 * guests like Windows.
1524 	 */
1525 	cr0 = CR0_NE;
1526 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
1527 		goto done;
1528 
1529 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR2, zero)) != 0)
1530 		goto done;
1531 
1532 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
1533 		goto done;
1534 
1535 	cr4 = 0;
1536 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
1537 		goto done;
1538 
1539 	/*
1540 	 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
1541 	 */
1542 	desc_base = 0xffff0000;
1543 	desc_limit = 0xffff;
1544 	desc_access = 0x0093;
1545 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
1546 			    desc_base, desc_limit, desc_access);
1547 	if (error)
1548 		goto done;
1549 
1550 	sel = 0xf000;
1551 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
1552 		goto done;
1553 
1554 	/*
1555 	 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
1556 	 */
1557 	desc_base = 0;
1558 	desc_limit = 0xffff;
1559 	desc_access = 0x0093;
1560 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
1561 			    desc_base, desc_limit, desc_access);
1562 	if (error)
1563 		goto done;
1564 
1565 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
1566 			    desc_base, desc_limit, desc_access);
1567 	if (error)
1568 		goto done;
1569 
1570 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
1571 			    desc_base, desc_limit, desc_access);
1572 	if (error)
1573 		goto done;
1574 
1575 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
1576 			    desc_base, desc_limit, desc_access);
1577 	if (error)
1578 		goto done;
1579 
1580 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
1581 			    desc_base, desc_limit, desc_access);
1582 	if (error)
1583 		goto done;
1584 
1585 	sel = 0;
1586 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
1587 		goto done;
1588 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
1589 		goto done;
1590 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
1591 		goto done;
1592 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
1593 		goto done;
1594 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
1595 		goto done;
1596 
1597 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_EFER, zero)) != 0)
1598 		goto done;
1599 
1600 	/* General purpose registers */
1601 	rdx = 0xf00;
1602 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
1603 		goto done;
1604 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
1605 		goto done;
1606 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
1607 		goto done;
1608 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
1609 		goto done;
1610 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
1611 		goto done;
1612 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
1613 		goto done;
1614 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
1615 		goto done;
1616 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
1617 		goto done;
1618 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R8, zero)) != 0)
1619 		goto done;
1620 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R9, zero)) != 0)
1621 		goto done;
1622 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R10, zero)) != 0)
1623 		goto done;
1624 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R11, zero)) != 0)
1625 		goto done;
1626 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R12, zero)) != 0)
1627 		goto done;
1628 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R13, zero)) != 0)
1629 		goto done;
1630 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R14, zero)) != 0)
1631 		goto done;
1632 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R15, zero)) != 0)
1633 		goto done;
1634 
1635 	/* GDTR, IDTR */
1636 	desc_base = 0;
1637 	desc_limit = 0xffff;
1638 	desc_access = 0;
1639 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
1640 			    desc_base, desc_limit, desc_access);
1641 	if (error != 0)
1642 		goto done;
1643 
1644 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
1645 			    desc_base, desc_limit, desc_access);
1646 	if (error != 0)
1647 		goto done;
1648 
1649 	/* TR */
1650 	desc_base = 0;
1651 	desc_limit = 0xffff;
1652 	desc_access = 0x0000008b;
1653 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
1654 	if (error)
1655 		goto done;
1656 
1657 	sel = 0;
1658 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
1659 		goto done;
1660 
1661 	/* LDTR */
1662 	desc_base = 0;
1663 	desc_limit = 0xffff;
1664 	desc_access = 0x00000082;
1665 	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
1666 			    desc_limit, desc_access);
1667 	if (error)
1668 		goto done;
1669 
1670 	sel = 0;
1671 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
1672 		goto done;
1673 
1674 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DR6,
1675 		 0xffff0ff0)) != 0)
1676 		goto done;
1677 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DR7, 0x400)) !=
1678 	    0)
1679 		goto done;
1680 
1681 	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_INTR_SHADOW,
1682 		 zero)) != 0)
1683 		goto done;
1684 
1685 	error = 0;
1686 done:
1687 	return (error);
1688 }
1689 #endif /* __FreeBSD__ */
1690 
1691 int
1692 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
1693 {
1694 	int error, i;
1695 	struct vm_gpa_pte gpapte;
1696 
1697 	bzero(&gpapte, sizeof(gpapte));
1698 	gpapte.gpa = gpa;
1699 
1700 	error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
1701 
1702 	if (error == 0) {
1703 		*num = gpapte.ptenum;
1704 		for (i = 0; i < gpapte.ptenum; i++)
1705 			pte[i] = gpapte.pte[i];
1706 	}
1707 
1708 	return (error);
1709 }
1710 
1711 int
1712 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
1713 {
1714 	int error;
1715 	struct vm_hpet_cap cap;
1716 
1717 	bzero(&cap, sizeof(struct vm_hpet_cap));
1718 	error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
1719 	if (capabilities != NULL)
1720 		*capabilities = cap.capabilities;
1721 	return (error);
1722 }
1723 
1724 int
1725 vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1726     uint64_t gla, int prot, uint64_t *gpa, int *fault)
1727 {
1728 	struct vm_gla2gpa gg;
1729 	int error;
1730 
1731 	bzero(&gg, sizeof(struct vm_gla2gpa));
1732 	gg.vcpuid = vcpu;
1733 	gg.prot = prot;
1734 	gg.gla = gla;
1735 	gg.paging = *paging;
1736 
1737 	error = ioctl(ctx->fd, VM_GLA2GPA, &gg);
1738 	if (error == 0) {
1739 		*fault = gg.fault;
1740 		*gpa = gg.gpa;
1741 	}
1742 	return (error);
1743 }
1744 
1745 int
1746 vm_gla2gpa_nofault(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1747     uint64_t gla, int prot, uint64_t *gpa, int *fault)
1748 {
1749 	struct vm_gla2gpa gg;
1750 	int error;
1751 
1752 	bzero(&gg, sizeof(struct vm_gla2gpa));
1753 	gg.vcpuid = vcpu;
1754 	gg.prot = prot;
1755 	gg.gla = gla;
1756 	gg.paging = *paging;
1757 
1758 	error = ioctl(ctx->fd, VM_GLA2GPA_NOFAULT, &gg);
1759 	if (error == 0) {
1760 		*fault = gg.fault;
1761 		*gpa = gg.gpa;
1762 	}
1763 	return (error);
1764 }
1765 
1766 #ifndef min
1767 #define	min(a,b)	(((a) < (b)) ? (a) : (b))
1768 #endif
1769 
1770 int
1771 vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1772     uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
1773     int *fault)
1774 {
1775 	void *va;
1776 	uint64_t gpa, off;
1777 	int error, i, n;
1778 
1779 	for (i = 0; i < iovcnt; i++) {
1780 		iov[i].iov_base = 0;
1781 		iov[i].iov_len = 0;
1782 	}
1783 
1784 	while (len) {
1785 		assert(iovcnt > 0);
1786 		error = vm_gla2gpa(ctx, vcpu, paging, gla, prot, &gpa, fault);
1787 		if (error || *fault)
1788 			return (error);
1789 
1790 		off = gpa & PAGE_MASK;
1791 		n = MIN(len, PAGE_SIZE - off);
1792 
1793 		va = vm_map_gpa(ctx, gpa, n);
1794 		if (va == NULL)
1795 			return (EFAULT);
1796 
1797 		iov->iov_base = va;
1798 		iov->iov_len = n;
1799 		iov++;
1800 		iovcnt--;
1801 
1802 		gla += n;
1803 		len -= n;
1804 	}
1805 	return (0);
1806 }
1807 
1808 void
1809 vm_copy_teardown(struct vmctx *ctx __unused, int vcpu __unused,
1810     struct iovec *iov __unused, int iovcnt __unused)
1811 {
1812 }
1813 
1814 void
1815 vm_copyin(struct vmctx *ctx __unused, int vcpu __unused, struct iovec *iov,
1816     void *vp, size_t len)
1817 {
1818 	const char *src;
1819 	char *dst;
1820 	size_t n;
1821 
1822 	dst = vp;
1823 	while (len) {
1824 		assert(iov->iov_len);
1825 		n = min(len, iov->iov_len);
1826 		src = iov->iov_base;
1827 		bcopy(src, dst, n);
1828 
1829 		iov++;
1830 		dst += n;
1831 		len -= n;
1832 	}
1833 }
1834 
1835 void
1836 vm_copyout(struct vmctx *ctx __unused, int vcpu __unused, const void *vp,
1837     struct iovec *iov, size_t len)
1838 {
1839 	const char *src;
1840 	char *dst;
1841 	size_t n;
1842 
1843 	src = vp;
1844 	while (len) {
1845 		assert(iov->iov_len);
1846 		n = min(len, iov->iov_len);
1847 		dst = iov->iov_base;
1848 		bcopy(src, dst, n);
1849 
1850 		iov++;
1851 		src += n;
1852 		len -= n;
1853 	}
1854 }
1855 
1856 static int
1857 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1858 {
1859 	struct vm_cpuset vm_cpuset;
1860 	int error;
1861 
1862 	bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1863 	vm_cpuset.which = which;
1864 	vm_cpuset.cpusetsize = sizeof(cpuset_t);
1865 	vm_cpuset.cpus = cpus;
1866 
1867 	error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1868 	return (error);
1869 }
1870 
1871 int
1872 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1873 {
1874 
1875 	return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1876 }
1877 
1878 #ifdef __FreeBSD__
1879 int
1880 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1881 {
1882 
1883 	return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1884 }
1885 #endif /* __FreeBSD__ */
1886 
1887 int
1888 vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus)
1889 {
1890 
1891 	return (vm_get_cpus(ctx, VM_DEBUG_CPUS, cpus));
1892 }
1893 
1894 int
1895 vm_activate_cpu(struct vmctx *ctx, int vcpu)
1896 {
1897 	struct vm_activate_cpu ac;
1898 	int error;
1899 
1900 	bzero(&ac, sizeof(struct vm_activate_cpu));
1901 	ac.vcpuid = vcpu;
1902 	error = ioctl(ctx->fd, VM_ACTIVATE_CPU, &ac);
1903 	return (error);
1904 }
1905 
1906 int
1907 vm_suspend_cpu(struct vmctx *ctx, int vcpu)
1908 {
1909 	struct vm_activate_cpu ac;
1910 	int error;
1911 
1912 	bzero(&ac, sizeof(struct vm_activate_cpu));
1913 	ac.vcpuid = vcpu;
1914 	error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac);
1915 	return (error);
1916 }
1917 
1918 int
1919 vm_resume_cpu(struct vmctx *ctx, int vcpu)
1920 {
1921 	struct vm_activate_cpu ac;
1922 	int error;
1923 
1924 	bzero(&ac, sizeof(struct vm_activate_cpu));
1925 	ac.vcpuid = vcpu;
1926 	error = ioctl(ctx->fd, VM_RESUME_CPU, &ac);
1927 	return (error);
1928 }
1929 
1930 int
1931 vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *info1, uint64_t *info2)
1932 {
1933 	struct vm_intinfo vmii;
1934 	int error;
1935 
1936 	bzero(&vmii, sizeof(struct vm_intinfo));
1937 	vmii.vcpuid = vcpu;
1938 	error = ioctl(ctx->fd, VM_GET_INTINFO, &vmii);
1939 	if (error == 0) {
1940 		*info1 = vmii.info1;
1941 		*info2 = vmii.info2;
1942 	}
1943 	return (error);
1944 }
1945 
1946 int
1947 vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t info1)
1948 {
1949 	struct vm_intinfo vmii;
1950 	int error;
1951 
1952 	bzero(&vmii, sizeof(struct vm_intinfo));
1953 	vmii.vcpuid = vcpu;
1954 	vmii.info1 = info1;
1955 	error = ioctl(ctx->fd, VM_SET_INTINFO, &vmii);
1956 	return (error);
1957 }
1958 
1959 int
1960 vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value)
1961 {
1962 	struct vm_rtc_data rtcdata;
1963 	int error;
1964 
1965 	bzero(&rtcdata, sizeof(struct vm_rtc_data));
1966 	rtcdata.offset = offset;
1967 	rtcdata.value = value;
1968 	error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata);
1969 	return (error);
1970 }
1971 
1972 int
1973 vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval)
1974 {
1975 	struct vm_rtc_data rtcdata;
1976 	int error;
1977 
1978 	bzero(&rtcdata, sizeof(struct vm_rtc_data));
1979 	rtcdata.offset = offset;
1980 	error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata);
1981 	if (error == 0)
1982 		*retval = rtcdata.value;
1983 	return (error);
1984 }
1985 
1986 #ifdef __FreeBSD__
1987 int
1988 vm_rtc_settime(struct vmctx *ctx, time_t secs)
1989 {
1990 	struct vm_rtc_time rtctime;
1991 	int error;
1992 
1993 	bzero(&rtctime, sizeof(struct vm_rtc_time));
1994 	rtctime.secs = secs;
1995 	error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime);
1996 	return (error);
1997 }
1998 
1999 int
2000 vm_rtc_gettime(struct vmctx *ctx, time_t *secs)
2001 {
2002 	struct vm_rtc_time rtctime;
2003 	int error;
2004 
2005 	bzero(&rtctime, sizeof(struct vm_rtc_time));
2006 	error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime);
2007 	if (error == 0)
2008 		*secs = rtctime.secs;
2009 	return (error);
2010 }
2011 #else /* __FreeBSD__ */
2012 
2013 int
2014 vm_rtc_settime(struct vmctx *ctx, const timespec_t *ts)
2015 {
2016 	return (ioctl(ctx->fd, VM_RTC_SETTIME, ts));
2017 }
2018 
2019 int
2020 vm_rtc_gettime(struct vmctx *ctx, timespec_t *ts)
2021 {
2022 	return (ioctl(ctx->fd, VM_RTC_GETTIME, ts));
2023 }
2024 
2025 #endif /* __FreeBSD__ */
2026 
2027 int
2028 vm_restart_instruction(void *arg, int vcpu)
2029 {
2030 	struct vmctx *ctx = arg;
2031 
2032 	return (ioctl(ctx->fd, VM_RESTART_INSTRUCTION, &vcpu));
2033 }
2034 
2035 int
2036 vm_set_topology(struct vmctx *ctx,
2037     uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus)
2038 {
2039 	struct vm_cpu_topology topology;
2040 
2041 	bzero(&topology, sizeof (struct vm_cpu_topology));
2042 	topology.sockets = sockets;
2043 	topology.cores = cores;
2044 	topology.threads = threads;
2045 	topology.maxcpus = maxcpus;
2046 	return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology));
2047 }
2048 
2049 int
2050 vm_get_topology(struct vmctx *ctx,
2051     uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus)
2052 {
2053 	struct vm_cpu_topology topology;
2054 	int error;
2055 
2056 	bzero(&topology, sizeof (struct vm_cpu_topology));
2057 	error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology);
2058 	if (error == 0) {
2059 		*sockets = topology.sockets;
2060 		*cores = topology.cores;
2061 		*threads = topology.threads;
2062 		*maxcpus = topology.maxcpus;
2063 	}
2064 	return (error);
2065 }
2066 
2067 int
2068 vm_get_device_fd(struct vmctx *ctx)
2069 {
2070 
2071 	return (ctx->fd);
2072 }
2073 
2074 #ifndef __FreeBSD__
2075 int
2076 vm_pmtmr_set_location(struct vmctx *ctx, uint16_t ioport)
2077 {
2078 	return (ioctl(ctx->fd, VM_PMTMR_LOCATE, ioport));
2079 }
2080 
2081 int
2082 vm_wrlock_cycle(struct vmctx *ctx)
2083 {
2084 	if (ioctl(ctx->fd, VM_WRLOCK_CYCLE, 0) != 0) {
2085 		return (errno);
2086 	}
2087 	return (0);
2088 }
2089 
2090 int
2091 vm_get_run_state(struct vmctx *ctx, int vcpu, enum vcpu_run_state *state,
2092     uint8_t *sipi_vector)
2093 {
2094 	struct vm_run_state data;
2095 
2096 	data.vcpuid = vcpu;
2097 	if (ioctl(ctx->fd, VM_GET_RUN_STATE, &data) != 0) {
2098 		return (errno);
2099 	}
2100 
2101 	*state = data.state;
2102 	*sipi_vector = data.sipi_vector;
2103 	return (0);
2104 }
2105 
2106 int
2107 vm_set_run_state(struct vmctx *ctx, int vcpu, enum vcpu_run_state state,
2108     uint8_t sipi_vector)
2109 {
2110 	struct vm_run_state data;
2111 
2112 	data.vcpuid = vcpu;
2113 	data.state = state;
2114 	data.sipi_vector = sipi_vector;
2115 	if (ioctl(ctx->fd, VM_SET_RUN_STATE, &data) != 0) {
2116 		return (errno);
2117 	}
2118 
2119 	return (0);
2120 }
2121 
2122 int
2123 vm_vcpu_barrier(struct vmctx *ctx, int vcpu)
2124 {
2125 	if (ioctl(ctx->fd, VM_VCPU_BARRIER, vcpu) != 0) {
2126 		return (errno);
2127 	}
2128 
2129 	return (0);
2130 }
2131 #endif /* __FreeBSD__ */
2132 
2133 #ifdef __FreeBSD__
2134 const cap_ioctl_t *
2135 vm_get_ioctls(size_t *len)
2136 {
2137 	cap_ioctl_t *cmds;
2138 	/* keep in sync with machine/vmm_dev.h */
2139 	static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT,
2140 	    VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG,
2141 	    VM_MMAP_GETNEXT, VM_MUNMAP_MEMSEG, VM_SET_REGISTER, VM_GET_REGISTER,
2142 	    VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR,
2143 	    VM_SET_REGISTER_SET, VM_GET_REGISTER_SET,
2144 	    VM_SET_KERNEMU_DEV, VM_GET_KERNEMU_DEV,
2145 	    VM_INJECT_EXCEPTION, VM_LAPIC_IRQ, VM_LAPIC_LOCAL_IRQ,
2146 	    VM_LAPIC_MSI, VM_IOAPIC_ASSERT_IRQ, VM_IOAPIC_DEASSERT_IRQ,
2147 	    VM_IOAPIC_PULSE_IRQ, VM_IOAPIC_PINCOUNT, VM_ISA_ASSERT_IRQ,
2148 	    VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER,
2149 	    VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV,
2150 	    VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI,
2151 	    VM_PPTDEV_MSIX, VM_UNMAP_PPTDEV_MMIO, VM_PPTDEV_DISABLE_MSIX,
2152 	    VM_INJECT_NMI, VM_STATS, VM_STAT_DESC,
2153 	    VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE,
2154 	    VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA,
2155 	    VM_GLA2GPA_NOFAULT,
2156 	    VM_ACTIVATE_CPU, VM_GET_CPUS, VM_SUSPEND_CPU, VM_RESUME_CPU,
2157 	    VM_SET_INTINFO, VM_GET_INTINFO,
2158 	    VM_RTC_WRITE, VM_RTC_READ, VM_RTC_SETTIME, VM_RTC_GETTIME,
2159 	    VM_RESTART_INSTRUCTION, VM_SET_TOPOLOGY, VM_GET_TOPOLOGY };
2160 
2161 	if (len == NULL) {
2162 		cmds = malloc(sizeof(vm_ioctl_cmds));
2163 		if (cmds == NULL)
2164 			return (NULL);
2165 		bcopy(vm_ioctl_cmds, cmds, sizeof(vm_ioctl_cmds));
2166 		return (cmds);
2167 	}
2168 
2169 	*len = nitems(vm_ioctl_cmds);
2170 	return (NULL);
2171 }
2172 #endif /* __FreeBSD__ */
2173