1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 /* 31 * This file and its contents are supplied under the terms of the 32 * Common Development and Distribution License ("CDDL"), version 1.0. 33 * You may only use this file in accordance with the terms of version 34 * 1.0 of the CDDL. 35 * 36 * A full copy of the text of the CDDL should have accompanied this 37 * source. A copy of the CDDL is also available via the Internet at 38 * http://www.illumos.org/license/CDDL. 39 * 40 * Copyright 2015 Pluribus Networks Inc. 41 * Copyright 2019 Joyent, Inc. 42 * Copyright 2022 Oxide Computer Company 43 * Copyright 2021 OmniOS Community Edition (OmniOSce) Association. 44 */ 45 46 #ifndef _VMM_KERNEL_H_ 47 #define _VMM_KERNEL_H_ 48 49 #include <sys/sdt.h> 50 #include <x86/segments.h> 51 #include <sys/vmm.h> 52 53 SDT_PROVIDER_DECLARE(vmm); 54 55 struct vm; 56 struct vm_exception; 57 struct seg_desc; 58 struct vm_exit; 59 struct vie; 60 struct vm_run; 61 struct vhpet; 62 struct vioapic; 63 struct vlapic; 64 struct vmspace; 65 struct vm_client; 66 struct vm_object; 67 struct vm_guest_paging; 68 69 typedef int (*vmm_init_func_t)(void); 70 typedef int (*vmm_cleanup_func_t)(void); 71 typedef void (*vmm_resume_func_t)(void); 72 typedef void * (*vmi_init_func_t)(struct vm *vm); 73 typedef int (*vmi_run_func_t)(void *vmi, int vcpu, uint64_t rip); 74 typedef void (*vmi_cleanup_func_t)(void *vmi); 75 typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num, 76 uint64_t *retval); 77 typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num, 78 uint64_t val); 79 typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num, 80 struct seg_desc *desc); 81 typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num, 82 const struct seg_desc *desc); 83 typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval); 84 typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val); 85 typedef struct vlapic *(*vmi_vlapic_init)(void *vmi, int vcpu); 86 typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic); 87 typedef void (*vmi_savectx)(void *vmi, int vcpu); 88 typedef void (*vmi_restorectx)(void *vmi, int vcpu); 89 90 struct vmm_ops { 91 vmm_init_func_t init; /* module wide initialization */ 92 vmm_cleanup_func_t cleanup; 93 vmm_resume_func_t resume; 94 95 vmi_init_func_t vminit; /* vm-specific initialization */ 96 vmi_run_func_t vmrun; 97 vmi_cleanup_func_t vmcleanup; 98 vmi_get_register_t vmgetreg; 99 vmi_set_register_t vmsetreg; 100 vmi_get_desc_t vmgetdesc; 101 vmi_set_desc_t vmsetdesc; 102 vmi_get_cap_t vmgetcap; 103 vmi_set_cap_t vmsetcap; 104 vmi_vlapic_init vlapic_init; 105 vmi_vlapic_cleanup vlapic_cleanup; 106 107 vmi_savectx vmsavectx; 108 vmi_restorectx vmrestorectx; 109 }; 110 111 extern struct vmm_ops vmm_ops_intel; 112 extern struct vmm_ops vmm_ops_amd; 113 114 int vm_create(uint64_t flags, struct vm **retvm); 115 void vm_destroy(struct vm *vm); 116 int vm_reinit(struct vm *vm, uint64_t); 117 uint16_t vm_get_maxcpus(struct vm *vm); 118 void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, 119 uint16_t *threads, uint16_t *maxcpus); 120 int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, 121 uint16_t threads, uint16_t maxcpus); 122 123 /* 124 * APIs that race against hardware. 125 */ 126 void vm_track_dirty_pages(struct vm *, uint64_t, size_t, uint8_t *); 127 128 /* 129 * APIs that modify the guest memory map require all vcpus to be frozen. 130 */ 131 int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off, 132 size_t len, int prot, int flags); 133 int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len); 134 int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem); 135 void vm_free_memseg(struct vm *vm, int ident); 136 int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); 137 int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len); 138 int vm_assign_pptdev(struct vm *vm, int pptfd); 139 int vm_unassign_pptdev(struct vm *vm, int pptfd); 140 141 /* 142 * APIs that inspect the guest memory map require only a *single* vcpu to 143 * be frozen. This acts like a read lock on the guest memory map since any 144 * modification requires *all* vcpus to be frozen. 145 */ 146 int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, 147 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags); 148 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, 149 struct vm_object **objptr); 150 vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm); 151 bool vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa); 152 153 int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval); 154 int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val); 155 int vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 156 struct seg_desc *ret_desc); 157 int vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 158 const struct seg_desc *desc); 159 int vm_get_run_state(struct vm *vm, int vcpuid, uint32_t *state, 160 uint8_t *sipi_vec); 161 int vm_set_run_state(struct vm *vm, int vcpuid, uint32_t state, 162 uint8_t sipi_vec); 163 int vm_get_fpu(struct vm *vm, int vcpuid, void *buf, size_t len); 164 int vm_set_fpu(struct vm *vm, int vcpuid, void *buf, size_t len); 165 int vm_run(struct vm *vm, int vcpuid, const struct vm_entry *); 166 int vm_suspend(struct vm *vm, enum vm_suspend_how how); 167 int vm_inject_nmi(struct vm *vm, int vcpu); 168 bool vm_nmi_pending(struct vm *vm, int vcpuid); 169 void vm_nmi_clear(struct vm *vm, int vcpuid); 170 int vm_inject_extint(struct vm *vm, int vcpu); 171 bool vm_extint_pending(struct vm *vm, int vcpuid); 172 void vm_extint_clear(struct vm *vm, int vcpuid); 173 int vm_inject_init(struct vm *vm, int vcpuid); 174 int vm_inject_sipi(struct vm *vm, int vcpuid, uint8_t vec); 175 struct vlapic *vm_lapic(struct vm *vm, int cpu); 176 struct vioapic *vm_ioapic(struct vm *vm); 177 struct vhpet *vm_hpet(struct vm *vm); 178 int vm_get_capability(struct vm *vm, int vcpu, int type, int *val); 179 int vm_set_capability(struct vm *vm, int vcpu, int type, int val); 180 int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state); 181 int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state); 182 int vm_apicid2vcpuid(struct vm *vm, int apicid); 183 int vm_activate_cpu(struct vm *vm, int vcpu); 184 int vm_suspend_cpu(struct vm *vm, int vcpu); 185 int vm_resume_cpu(struct vm *vm, int vcpu); 186 struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid); 187 struct vie *vm_vie_ctx(struct vm *vm, int vcpuid); 188 void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip); 189 void vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip); 190 void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip); 191 void vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip); 192 void vm_exit_run_state(struct vm *vm, int vcpuid, uint64_t rip); 193 int vm_service_mmio_read(struct vm *vm, int cpuid, uint64_t gpa, uint64_t *rval, 194 int rsize); 195 int vm_service_mmio_write(struct vm *vm, int cpuid, uint64_t gpa, uint64_t wval, 196 int wsize); 197 198 #ifdef _SYS__CPUSET_H_ 199 cpuset_t vm_active_cpus(struct vm *vm); 200 cpuset_t vm_debug_cpus(struct vm *vm); 201 cpuset_t vm_suspended_cpus(struct vm *vm); 202 #endif /* _SYS__CPUSET_H_ */ 203 204 bool vcpu_entry_bailout_checks(struct vm *vm, int vcpuid, uint64_t rip); 205 bool vcpu_run_state_pending(struct vm *vm, int vcpuid); 206 int vcpu_arch_reset(struct vm *vm, int vcpuid, bool init_only); 207 208 /* 209 * Return true if device indicated by bus/slot/func is supposed to be a 210 * pci passthrough device. 211 * 212 * Return false otherwise. 213 */ 214 bool vmm_is_pptdev(int bus, int slot, int func); 215 216 void *vm_iommu_domain(struct vm *vm); 217 218 enum vcpu_state { 219 VCPU_IDLE, 220 VCPU_FROZEN, 221 VCPU_RUNNING, 222 VCPU_SLEEPING, 223 }; 224 225 int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state, 226 bool from_idle); 227 enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu); 228 void vcpu_block_run(struct vm *, int); 229 void vcpu_unblock_run(struct vm *, int); 230 231 uint64_t vcpu_tsc_offset(struct vm *vm, int vcpuid, bool phys_adj); 232 233 static __inline int 234 vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu) 235 { 236 return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING); 237 } 238 239 #ifdef _SYS_THREAD_H 240 static __inline int 241 vcpu_should_yield(struct vm *vm, int vcpu) 242 { 243 244 if (curthread->t_astflag) 245 return (1); 246 else if (CPU->cpu_runrun) 247 return (1); 248 else 249 return (0); 250 } 251 #endif /* _SYS_THREAD_H */ 252 253 typedef enum vcpu_notify { 254 VCPU_NOTIFY_NONE, 255 VCPU_NOTIFY_APIC, /* Posted intr notification (if possible) */ 256 VCPU_NOTIFY_EXIT, /* IPI to cause VM exit */ 257 } vcpu_notify_t; 258 259 void *vcpu_stats(struct vm *vm, int vcpu); 260 void vcpu_notify_event(struct vm *vm, int vcpuid); 261 void vcpu_notify_event_type(struct vm *vm, int vcpuid, vcpu_notify_t); 262 struct vmspace *vm_get_vmspace(struct vm *vm); 263 struct vm_client *vm_get_vmclient(struct vm *vm, int vcpuid); 264 struct vatpic *vm_atpic(struct vm *vm); 265 struct vatpit *vm_atpit(struct vm *vm); 266 struct vpmtmr *vm_pmtmr(struct vm *vm); 267 struct vrtc *vm_rtc(struct vm *vm); 268 269 /* 270 * Inject exception 'vector' into the guest vcpu. This function returns 0 on 271 * success and non-zero on failure. 272 * 273 * Wrapper functions like 'vm_inject_gp()' should be preferred to calling 274 * this function directly because they enforce the trap-like or fault-like 275 * behavior of an exception. 276 * 277 * This function should only be called in the context of the thread that is 278 * executing this vcpu. 279 */ 280 int vm_inject_exception(struct vm *vm, int vcpuid, uint8_t vector, 281 bool err_valid, uint32_t errcode, bool restart_instruction); 282 283 /* 284 * This function is called after a VM-exit that occurred during exception or 285 * interrupt delivery through the IDT. The format of 'intinfo' is described 286 * in Figure 15-1, "EXITINTINFO for All Intercepts", APM, Vol 2. 287 * 288 * If a VM-exit handler completes the event delivery successfully then it 289 * should call vm_exit_intinfo() to extinguish the pending event. For e.g., 290 * if the task switch emulation is triggered via a task gate then it should 291 * call this function with 'intinfo=0' to indicate that the external event 292 * is not pending anymore. 293 * 294 * Return value is 0 on success and non-zero on failure. 295 */ 296 int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo); 297 298 /* 299 * This function is called before every VM-entry to retrieve a pending 300 * event that should be injected into the guest. This function combines 301 * nested events into a double or triple fault. 302 * 303 * Returns false if there are no events that need to be injected into the guest. 304 */ 305 bool vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info); 306 307 int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2); 308 309 enum vm_reg_name vm_segment_name(int seg_encoding); 310 311 struct vm_copyinfo { 312 uint64_t gpa; 313 size_t len; 314 int prot; 315 void *hva; 316 void *cookie; 317 }; 318 319 /* 320 * Set up 'copyinfo[]' to copy to/from guest linear address space starting 321 * at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for 322 * a copyin or PROT_WRITE for a copyout. 323 * 324 * retval is_fault Interpretation 325 * 0 0 Success 326 * 0 1 An exception was injected into the guest 327 * EFAULT N/A Unrecoverable error 328 * 329 * The 'copyinfo[]' can be passed to 'vm_copyin()' or 'vm_copyout()' only if 330 * the return value is 0. The 'copyinfo[]' resources should be freed by calling 331 * 'vm_copy_teardown()' after the copy is done. 332 */ 333 int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 334 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 335 uint_t num_copyinfo, int *is_fault); 336 void vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 337 uint_t num_copyinfo); 338 void vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 339 void *kaddr, size_t len); 340 void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, 341 struct vm_copyinfo *copyinfo, size_t len); 342 343 int vcpu_trace_exceptions(struct vm *vm, int vcpuid); 344 345 void vm_inject_ud(struct vm *vm, int vcpuid); 346 void vm_inject_gp(struct vm *vm, int vcpuid); 347 void vm_inject_ac(struct vm *vm, int vcpuid, uint32_t errcode); 348 void vm_inject_ss(struct vm *vm, int vcpuid, uint32_t errcode); 349 void vm_inject_pf(struct vm *vm, int vcpuid, uint32_t errcode, uint64_t cr2); 350 351 /* 352 * Both SVM and VMX have complex logic for injecting events such as exceptions 353 * or interrupts into the guest. Within those two backends, the progress of 354 * event injection is tracked by event_inject_state, hopefully making it easier 355 * to reason about. 356 */ 357 enum event_inject_state { 358 EIS_CAN_INJECT = 0, /* exception/interrupt can be injected */ 359 EIS_EV_EXISTING = 1, /* blocked by existing event */ 360 EIS_EV_INJECTED = 2, /* blocked by injected event */ 361 EIS_GI_BLOCK = 3, /* blocked by guest interruptability */ 362 363 /* 364 * Flag to request an immediate exit from VM context after event 365 * injection in order to perform more processing 366 */ 367 EIS_REQ_EXIT = (1 << 15), 368 }; 369 370 /* Possible result codes for MSR access emulation */ 371 typedef enum vm_msr_result { 372 VMR_OK = 0, /* succesfully emulated */ 373 VMR_GP = 1, /* #GP should be injected */ 374 VMR_UNHANLDED = 2, /* handle in userspace, kernel cannot emulate */ 375 } vm_msr_result_t; 376 377 void vmm_sol_glue_init(void); 378 void vmm_sol_glue_cleanup(void); 379 380 int vmm_mod_load(void); 381 int vmm_mod_unload(void); 382 383 void vmm_call_trap(uint64_t); 384 385 /* 386 * Because of tangled headers, this is not exposed directly via the vmm_drv 387 * interface, but rather mirrored as vmm_drv_iop_cb_t in vmm_drv.h. 388 */ 389 typedef int (*ioport_handler_t)(void *, bool, uint16_t, uint8_t, uint32_t *); 390 391 int vm_ioport_access(struct vm *vm, int vcpuid, bool in, uint16_t port, 392 uint8_t bytes, uint32_t *val); 393 394 int vm_ioport_attach(struct vm *vm, uint16_t port, ioport_handler_t func, 395 void *arg, void **cookie); 396 int vm_ioport_detach(struct vm *vm, void **cookie, ioport_handler_t *old_func, 397 void **old_arg); 398 399 int vm_ioport_hook(struct vm *, uint16_t, ioport_handler_t, void *, void **); 400 void vm_ioport_unhook(struct vm *, void **); 401 402 enum vcpu_ustate { 403 VU_INIT = 0, /* initialized but has not yet attempted to run */ 404 VU_RUN, /* running in guest context */ 405 VU_IDLE, /* idle (HLTed, wait-for-SIPI, etc) */ 406 VU_EMU_KERN, /* emulation performed in-kernel */ 407 VU_EMU_USER, /* emulation performed in userspace */ 408 VU_SCHED, /* off-cpu for interrupt, preempt, lock contention */ 409 VU_MAX 410 }; 411 412 void vcpu_ustate_change(struct vm *, int, enum vcpu_ustate); 413 414 typedef struct vmm_kstats { 415 kstat_named_t vk_name; 416 } vmm_kstats_t; 417 418 typedef struct vmm_vcpu_kstats { 419 kstat_named_t vvk_vcpu; 420 kstat_named_t vvk_time_init; 421 kstat_named_t vvk_time_run; 422 kstat_named_t vvk_time_idle; 423 kstat_named_t vvk_time_emu_kern; 424 kstat_named_t vvk_time_emu_user; 425 kstat_named_t vvk_time_sched; 426 } vmm_vcpu_kstats_t; 427 428 #define VMM_KSTAT_CLASS "misc" 429 430 int vmm_kstat_update_vcpu(struct kstat *, int); 431 432 #endif /* _VMM_KERNEL_H_ */ 433