1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013 Chris Torek <torek @ torek net> 5 * All rights reserved. 6 * Copyright (c) 2019 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/uio.h> 35 36 #include <machine/atomic.h> 37 38 #ifdef __FreeBSD__ 39 #include <dev/virtio/pci/virtio_pci_legacy_var.h> 40 #endif 41 42 #include <stdio.h> 43 #include <stdint.h> 44 #include <pthread.h> 45 #include <pthread_np.h> 46 47 #include "bhyverun.h" 48 #include "debug.h" 49 #include "pci_emul.h" 50 #include "virtio.h" 51 52 /* 53 * Functions for dealing with generalized "virtual devices" as 54 * defined by <https://www.google.com/#output=search&q=virtio+spec> 55 */ 56 57 /* 58 * In case we decide to relax the "virtio softc comes at the 59 * front of virtio-based device softc" constraint, let's use 60 * this to convert. 61 */ 62 #define DEV_SOFTC(vs) ((void *)(vs)) 63 64 /* 65 * Link a virtio_softc to its constants, the device softc, and 66 * the PCI emulation. 67 */ 68 void 69 vi_softc_linkup(struct virtio_softc *vs, struct virtio_consts *vc, 70 void *dev_softc, struct pci_devinst *pi, 71 struct vqueue_info *queues) 72 { 73 int i; 74 75 /* vs and dev_softc addresses must match */ 76 assert((void *)vs == dev_softc); 77 vs->vs_vc = vc; 78 vs->vs_pi = pi; 79 pi->pi_arg = vs; 80 81 vs->vs_queues = queues; 82 for (i = 0; i < vc->vc_nvq; i++) { 83 queues[i].vq_vs = vs; 84 queues[i].vq_num = i; 85 } 86 } 87 88 /* 89 * Reset device (device-wide). This erases all queues, i.e., 90 * all the queues become invalid (though we don't wipe out the 91 * internal pointers, we just clear the VQ_ALLOC flag). 92 * 93 * It resets negotiated features to "none". 94 * 95 * If MSI-X is enabled, this also resets all the vectors to NO_VECTOR. 96 */ 97 void 98 vi_reset_dev(struct virtio_softc *vs) 99 { 100 struct vqueue_info *vq; 101 int i, nvq; 102 103 if (vs->vs_mtx) 104 assert(pthread_mutex_isowned_np(vs->vs_mtx)); 105 106 nvq = vs->vs_vc->vc_nvq; 107 for (vq = vs->vs_queues, i = 0; i < nvq; vq++, i++) { 108 vq->vq_flags = 0; 109 vq->vq_last_avail = 0; 110 vq->vq_next_used = 0; 111 vq->vq_save_used = 0; 112 vq->vq_pfn = 0; 113 vq->vq_msix_idx = VIRTIO_MSI_NO_VECTOR; 114 } 115 vs->vs_negotiated_caps = 0; 116 vs->vs_curq = 0; 117 /* vs->vs_status = 0; -- redundant */ 118 if (vs->vs_isr) 119 pci_lintr_deassert(vs->vs_pi); 120 vs->vs_isr = 0; 121 vs->vs_msix_cfg_idx = VIRTIO_MSI_NO_VECTOR; 122 } 123 124 /* 125 * Set I/O BAR (usually 0) to map PCI config registers. 126 */ 127 void 128 vi_set_io_bar(struct virtio_softc *vs, int barnum) 129 { 130 size_t size; 131 132 /* 133 * ??? should we use VIRTIO_PCI_CONFIG_OFF(0) if MSI-X is disabled? 134 * Existing code did not... 135 */ 136 size = VIRTIO_PCI_CONFIG_OFF(1) + vs->vs_vc->vc_cfgsize; 137 pci_emul_alloc_bar(vs->vs_pi, barnum, PCIBAR_IO, size); 138 } 139 140 /* 141 * Initialize MSI-X vector capabilities if we're to use MSI-X, 142 * or MSI capabilities if not. 143 * 144 * We assume we want one MSI-X vector per queue, here, plus one 145 * for the config vec. 146 */ 147 int 148 vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix) 149 { 150 int nvec; 151 152 if (use_msix) { 153 vs->vs_flags |= VIRTIO_USE_MSIX; 154 VS_LOCK(vs); 155 vi_reset_dev(vs); /* set all vectors to NO_VECTOR */ 156 VS_UNLOCK(vs); 157 nvec = vs->vs_vc->vc_nvq + 1; 158 if (pci_emul_add_msixcap(vs->vs_pi, nvec, barnum)) 159 return (1); 160 } else 161 vs->vs_flags &= ~VIRTIO_USE_MSIX; 162 163 /* Only 1 MSI vector for bhyve */ 164 pci_emul_add_msicap(vs->vs_pi, 1); 165 166 /* Legacy interrupts are mandatory for virtio devices */ 167 pci_lintr_request(vs->vs_pi); 168 169 return (0); 170 } 171 172 /* 173 * Initialize the currently-selected virtio queue (vs->vs_curq). 174 * The guest just gave us a page frame number, from which we can 175 * calculate the addresses of the queue. 176 */ 177 void 178 vi_vq_init(struct virtio_softc *vs, uint32_t pfn) 179 { 180 struct vqueue_info *vq; 181 uint64_t phys; 182 size_t size; 183 char *base; 184 185 vq = &vs->vs_queues[vs->vs_curq]; 186 vq->vq_pfn = pfn; 187 phys = (uint64_t)pfn << VRING_PFN; 188 size = vring_size_aligned(vq->vq_qsize); 189 base = paddr_guest2host(vs->vs_pi->pi_vmctx, phys, size); 190 191 /* First page(s) are descriptors... */ 192 vq->vq_desc = (struct vring_desc *)base; 193 base += vq->vq_qsize * sizeof(struct vring_desc); 194 195 /* ... immediately followed by "avail" ring (entirely uint16_t's) */ 196 vq->vq_avail = (struct vring_avail *)base; 197 base += (2 + vq->vq_qsize + 1) * sizeof(uint16_t); 198 199 /* Then it's rounded up to the next page... */ 200 base = (char *)roundup2((uintptr_t)base, VRING_ALIGN); 201 202 /* ... and the last page(s) are the used ring. */ 203 vq->vq_used = (struct vring_used *)base; 204 205 /* Mark queue as allocated, and start at 0 when we use it. */ 206 vq->vq_flags = VQ_ALLOC; 207 vq->vq_last_avail = 0; 208 vq->vq_next_used = 0; 209 vq->vq_save_used = 0; 210 } 211 212 /* 213 * Helper inline for vq_getchain(): record the i'th "real" 214 * descriptor. 215 */ 216 static inline void 217 _vq_record(int i, volatile struct vring_desc *vd, struct vmctx *ctx, 218 struct iovec *iov, int n_iov, uint16_t *flags) { 219 220 if (i >= n_iov) 221 return; 222 iov[i].iov_base = paddr_guest2host(ctx, vd->addr, vd->len); 223 iov[i].iov_len = vd->len; 224 if (flags != NULL) 225 flags[i] = vd->flags; 226 } 227 #define VQ_MAX_DESCRIPTORS 512 /* see below */ 228 229 /* 230 * Examine the chain of descriptors starting at the "next one" to 231 * make sure that they describe a sensible request. If so, return 232 * the number of "real" descriptors that would be needed/used in 233 * acting on this request. This may be smaller than the number of 234 * available descriptors, e.g., if there are two available but 235 * they are two separate requests, this just returns 1. Or, it 236 * may be larger: if there are indirect descriptors involved, 237 * there may only be one descriptor available but it may be an 238 * indirect pointing to eight more. We return 8 in this case, 239 * i.e., we do not count the indirect descriptors, only the "real" 240 * ones. 241 * 242 * Basically, this vets the "flags" and "next" field of each 243 * descriptor and tells you how many are involved. Since some may 244 * be indirect, this also needs the vmctx (in the pci_devinst 245 * at vs->vs_pi) so that it can find indirect descriptors. 246 * 247 * As we process each descriptor, we copy and adjust it (guest to 248 * host address wise, also using the vmtctx) into the given iov[] 249 * array (of the given size). If the array overflows, we stop 250 * placing values into the array but keep processing descriptors, 251 * up to VQ_MAX_DESCRIPTORS, before giving up and returning -1. 252 * So you, the caller, must not assume that iov[] is as big as the 253 * return value (you can process the same thing twice to allocate 254 * a larger iov array if needed, or supply a zero length to find 255 * out how much space is needed). 256 * 257 * If you want to verify the WRITE flag on each descriptor, pass a 258 * non-NULL "flags" pointer to an array of "uint16_t" of the same size 259 * as n_iov and we'll copy each "flags" field after unwinding any 260 * indirects. 261 * 262 * If some descriptor(s) are invalid, this prints a diagnostic message 263 * and returns -1. If no descriptors are ready now it simply returns 0. 264 * 265 * You are assumed to have done a vq_ring_ready() if needed (note 266 * that vq_has_descs() does one). 267 */ 268 int 269 vq_getchain(struct vqueue_info *vq, uint16_t *pidx, 270 struct iovec *iov, int n_iov, uint16_t *flags) 271 { 272 int i; 273 u_int ndesc, n_indir; 274 u_int idx, next; 275 volatile struct vring_desc *vdir, *vindir, *vp; 276 struct vmctx *ctx; 277 struct virtio_softc *vs; 278 const char *name; 279 280 vs = vq->vq_vs; 281 name = vs->vs_vc->vc_name; 282 283 /* 284 * Note: it's the responsibility of the guest not to 285 * update vq->vq_avail->idx until all of the descriptors 286 * the guest has written are valid (including all their 287 * "next" fields and "flags"). 288 * 289 * Compute (vq_avail->idx - last_avail) in integers mod 2**16. This is 290 * the number of descriptors the device has made available 291 * since the last time we updated vq->vq_last_avail. 292 * 293 * We just need to do the subtraction as an unsigned int, 294 * then trim off excess bits. 295 */ 296 idx = vq->vq_last_avail; 297 ndesc = (uint16_t)((u_int)vq->vq_avail->idx - idx); 298 if (ndesc == 0) 299 return (0); 300 if (ndesc > vq->vq_qsize) { 301 /* XXX need better way to diagnose issues */ 302 EPRINTLN( 303 "%s: ndesc (%u) out of range, driver confused?", 304 name, (u_int)ndesc); 305 return (-1); 306 } 307 308 /* 309 * Now count/parse "involved" descriptors starting from 310 * the head of the chain. 311 * 312 * To prevent loops, we could be more complicated and 313 * check whether we're re-visiting a previously visited 314 * index, but we just abort if the count gets excessive. 315 */ 316 ctx = vs->vs_pi->pi_vmctx; 317 *pidx = next = vq->vq_avail->ring[idx & (vq->vq_qsize - 1)]; 318 vq->vq_last_avail++; 319 for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->next) { 320 if (next >= vq->vq_qsize) { 321 EPRINTLN( 322 "%s: descriptor index %u out of range, " 323 "driver confused?", 324 name, next); 325 return (-1); 326 } 327 vdir = &vq->vq_desc[next]; 328 if ((vdir->flags & VRING_DESC_F_INDIRECT) == 0) { 329 _vq_record(i, vdir, ctx, iov, n_iov, flags); 330 i++; 331 } else if ((vs->vs_vc->vc_hv_caps & 332 VIRTIO_RING_F_INDIRECT_DESC) == 0) { 333 EPRINTLN( 334 "%s: descriptor has forbidden INDIRECT flag, " 335 "driver confused?", 336 name); 337 return (-1); 338 } else { 339 n_indir = vdir->len / 16; 340 if ((vdir->len & 0xf) || n_indir == 0) { 341 EPRINTLN( 342 "%s: invalid indir len 0x%x, " 343 "driver confused?", 344 name, (u_int)vdir->len); 345 return (-1); 346 } 347 vindir = paddr_guest2host(ctx, 348 vdir->addr, vdir->len); 349 /* 350 * Indirects start at the 0th, then follow 351 * their own embedded "next"s until those run 352 * out. Each one's indirect flag must be off 353 * (we don't really have to check, could just 354 * ignore errors...). 355 */ 356 next = 0; 357 for (;;) { 358 vp = &vindir[next]; 359 if (vp->flags & VRING_DESC_F_INDIRECT) { 360 EPRINTLN( 361 "%s: indirect desc has INDIR flag," 362 " driver confused?", 363 name); 364 return (-1); 365 } 366 _vq_record(i, vp, ctx, iov, n_iov, flags); 367 if (++i > VQ_MAX_DESCRIPTORS) 368 goto loopy; 369 if ((vp->flags & VRING_DESC_F_NEXT) == 0) 370 break; 371 next = vp->next; 372 if (next >= n_indir) { 373 EPRINTLN( 374 "%s: invalid next %u > %u, " 375 "driver confused?", 376 name, (u_int)next, n_indir); 377 return (-1); 378 } 379 } 380 } 381 if ((vdir->flags & VRING_DESC_F_NEXT) == 0) 382 return (i); 383 } 384 loopy: 385 EPRINTLN( 386 "%s: descriptor loop? count > %d - driver confused?", 387 name, i); 388 return (-1); 389 } 390 391 /* 392 * Return the first n_chain request chains back to the available queue. 393 * 394 * (These chains are the ones you handled when you called vq_getchain() 395 * and used its positive return value.) 396 */ 397 void 398 vq_retchains(struct vqueue_info *vq, uint16_t n_chains) 399 { 400 401 vq->vq_last_avail -= n_chains; 402 } 403 404 void 405 vq_relchain_prepare(struct vqueue_info *vq, uint16_t idx, uint32_t iolen) 406 { 407 volatile struct vring_used *vuh; 408 volatile struct vring_used_elem *vue; 409 uint16_t mask; 410 411 /* 412 * Notes: 413 * - mask is N-1 where N is a power of 2 so computes x % N 414 * - vuh points to the "used" data shared with guest 415 * - vue points to the "used" ring entry we want to update 416 */ 417 mask = vq->vq_qsize - 1; 418 vuh = vq->vq_used; 419 420 vue = &vuh->ring[vq->vq_next_used++ & mask]; 421 vue->id = idx; 422 vue->len = iolen; 423 } 424 425 void 426 vq_relchain_publish(struct vqueue_info *vq) 427 { 428 /* 429 * Ensure the used descriptor is visible before updating the index. 430 * This is necessary on ISAs with memory ordering less strict than x86 431 * (and even on x86 to act as a compiler barrier). 432 */ 433 atomic_thread_fence_rel(); 434 vq->vq_used->idx = vq->vq_next_used; 435 } 436 437 /* 438 * Return specified request chain to the guest, setting its I/O length 439 * to the provided value. 440 * 441 * (This chain is the one you handled when you called vq_getchain() 442 * and used its positive return value.) 443 */ 444 void 445 vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen) 446 { 447 vq_relchain_prepare(vq, idx, iolen); 448 vq_relchain_publish(vq); 449 } 450 451 /* 452 * Driver has finished processing "available" chains and calling 453 * vq_relchain on each one. If driver used all the available 454 * chains, used_all should be set. 455 * 456 * If the "used" index moved we may need to inform the guest, i.e., 457 * deliver an interrupt. Even if the used index did NOT move we 458 * may need to deliver an interrupt, if the avail ring is empty and 459 * we are supposed to interrupt on empty. 460 * 461 * Note that used_all_avail is provided by the caller because it's 462 * a snapshot of the ring state when he decided to finish interrupt 463 * processing -- it's possible that descriptors became available after 464 * that point. (It's also typically a constant 1/True as well.) 465 */ 466 void 467 vq_endchains(struct vqueue_info *vq, int used_all_avail) 468 { 469 struct virtio_softc *vs; 470 uint16_t event_idx, new_idx, old_idx; 471 int intr; 472 473 /* 474 * Interrupt generation: if we're using EVENT_IDX, 475 * interrupt if we've crossed the event threshold. 476 * Otherwise interrupt is generated if we added "used" entries, 477 * but suppressed by VRING_AVAIL_F_NO_INTERRUPT. 478 * 479 * In any case, though, if NOTIFY_ON_EMPTY is set and the 480 * entire avail was processed, we need to interrupt always. 481 */ 482 vs = vq->vq_vs; 483 old_idx = vq->vq_save_used; 484 vq->vq_save_used = new_idx = vq->vq_used->idx; 485 486 /* 487 * Use full memory barrier between "idx" store from preceding 488 * vq_relchain() call and the loads from VQ_USED_EVENT_IDX() or 489 * "flags" field below. 490 */ 491 atomic_thread_fence_seq_cst(); 492 if (used_all_avail && 493 (vs->vs_negotiated_caps & VIRTIO_F_NOTIFY_ON_EMPTY)) 494 intr = 1; 495 else if (vs->vs_negotiated_caps & VIRTIO_RING_F_EVENT_IDX) { 496 event_idx = VQ_USED_EVENT_IDX(vq); 497 /* 498 * This calculation is per docs and the kernel 499 * (see src/sys/dev/virtio/virtio_ring.h). 500 */ 501 intr = (uint16_t)(new_idx - event_idx - 1) < 502 (uint16_t)(new_idx - old_idx); 503 } else { 504 intr = new_idx != old_idx && 505 !(vq->vq_avail->flags & VRING_AVAIL_F_NO_INTERRUPT); 506 } 507 if (intr) 508 vq_interrupt(vs, vq); 509 } 510 511 /* Note: these are in sorted order to make for a fast search */ 512 static struct config_reg { 513 uint16_t cr_offset; /* register offset */ 514 uint8_t cr_size; /* size (bytes) */ 515 uint8_t cr_ro; /* true => reg is read only */ 516 const char *cr_name; /* name of reg */ 517 } config_regs[] = { 518 { VIRTIO_PCI_HOST_FEATURES, 4, 1, "HOST_FEATURES" }, 519 { VIRTIO_PCI_GUEST_FEATURES, 4, 0, "GUEST_FEATURES" }, 520 { VIRTIO_PCI_QUEUE_PFN, 4, 0, "QUEUE_PFN" }, 521 { VIRTIO_PCI_QUEUE_NUM, 2, 1, "QUEUE_NUM" }, 522 { VIRTIO_PCI_QUEUE_SEL, 2, 0, "QUEUE_SEL" }, 523 { VIRTIO_PCI_QUEUE_NOTIFY, 2, 0, "QUEUE_NOTIFY" }, 524 { VIRTIO_PCI_STATUS, 1, 0, "STATUS" }, 525 { VIRTIO_PCI_ISR, 1, 0, "ISR" }, 526 { VIRTIO_MSI_CONFIG_VECTOR, 2, 0, "CONFIG_VECTOR" }, 527 { VIRTIO_MSI_QUEUE_VECTOR, 2, 0, "QUEUE_VECTOR" }, 528 }; 529 530 static inline struct config_reg * 531 vi_find_cr(int offset) { 532 u_int hi, lo, mid; 533 struct config_reg *cr; 534 535 lo = 0; 536 hi = sizeof(config_regs) / sizeof(*config_regs) - 1; 537 while (hi >= lo) { 538 mid = (hi + lo) >> 1; 539 cr = &config_regs[mid]; 540 if (cr->cr_offset == offset) 541 return (cr); 542 if (cr->cr_offset < offset) 543 lo = mid + 1; 544 else 545 hi = mid - 1; 546 } 547 return (NULL); 548 } 549 550 /* 551 * Handle pci config space reads. 552 * If it's to the MSI-X info, do that. 553 * If it's part of the virtio standard stuff, do that. 554 * Otherwise dispatch to the actual driver. 555 */ 556 uint64_t 557 vi_pci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, 558 int baridx, uint64_t offset, int size) 559 { 560 struct virtio_softc *vs = pi->pi_arg; 561 struct virtio_consts *vc; 562 struct config_reg *cr; 563 uint64_t virtio_config_size, max; 564 const char *name; 565 uint32_t newoff; 566 uint32_t value; 567 int error; 568 569 if (vs->vs_flags & VIRTIO_USE_MSIX) { 570 if (baridx == pci_msix_table_bar(pi) || 571 baridx == pci_msix_pba_bar(pi)) { 572 return (pci_emul_msix_tread(pi, offset, size)); 573 } 574 } 575 576 /* XXX probably should do something better than just assert() */ 577 assert(baridx == 0); 578 579 if (vs->vs_mtx) 580 pthread_mutex_lock(vs->vs_mtx); 581 582 vc = vs->vs_vc; 583 name = vc->vc_name; 584 value = size == 1 ? 0xff : size == 2 ? 0xffff : 0xffffffff; 585 586 if (size != 1 && size != 2 && size != 4) 587 goto bad; 588 589 virtio_config_size = VIRTIO_PCI_CONFIG_OFF(pci_msix_enabled(pi)); 590 591 if (offset >= virtio_config_size) { 592 /* 593 * Subtract off the standard size (including MSI-X 594 * registers if enabled) and dispatch to underlying driver. 595 * If that fails, fall into general code. 596 */ 597 newoff = offset - virtio_config_size; 598 max = vc->vc_cfgsize ? vc->vc_cfgsize : 0x100000000; 599 if (newoff + size > max) 600 goto bad; 601 error = (*vc->vc_cfgread)(DEV_SOFTC(vs), newoff, size, &value); 602 if (!error) 603 goto done; 604 } 605 606 bad: 607 cr = vi_find_cr(offset); 608 if (cr == NULL || cr->cr_size != size) { 609 if (cr != NULL) { 610 /* offset must be OK, so size must be bad */ 611 EPRINTLN( 612 "%s: read from %s: bad size %d", 613 name, cr->cr_name, size); 614 } else { 615 EPRINTLN( 616 "%s: read from bad offset/size %jd/%d", 617 name, (uintmax_t)offset, size); 618 } 619 goto done; 620 } 621 622 switch (offset) { 623 case VIRTIO_PCI_HOST_FEATURES: 624 value = vc->vc_hv_caps; 625 break; 626 case VIRTIO_PCI_GUEST_FEATURES: 627 value = vs->vs_negotiated_caps; 628 break; 629 case VIRTIO_PCI_QUEUE_PFN: 630 if (vs->vs_curq < vc->vc_nvq) 631 value = vs->vs_queues[vs->vs_curq].vq_pfn; 632 break; 633 case VIRTIO_PCI_QUEUE_NUM: 634 value = vs->vs_curq < vc->vc_nvq ? 635 vs->vs_queues[vs->vs_curq].vq_qsize : 0; 636 break; 637 case VIRTIO_PCI_QUEUE_SEL: 638 value = vs->vs_curq; 639 break; 640 case VIRTIO_PCI_QUEUE_NOTIFY: 641 value = 0; /* XXX */ 642 break; 643 case VIRTIO_PCI_STATUS: 644 value = vs->vs_status; 645 break; 646 case VIRTIO_PCI_ISR: 647 value = vs->vs_isr; 648 vs->vs_isr = 0; /* a read clears this flag */ 649 if (value) 650 pci_lintr_deassert(pi); 651 break; 652 case VIRTIO_MSI_CONFIG_VECTOR: 653 value = vs->vs_msix_cfg_idx; 654 break; 655 case VIRTIO_MSI_QUEUE_VECTOR: 656 value = vs->vs_curq < vc->vc_nvq ? 657 vs->vs_queues[vs->vs_curq].vq_msix_idx : 658 VIRTIO_MSI_NO_VECTOR; 659 break; 660 } 661 done: 662 if (vs->vs_mtx) 663 pthread_mutex_unlock(vs->vs_mtx); 664 return (value); 665 } 666 667 /* 668 * Handle pci config space writes. 669 * If it's to the MSI-X info, do that. 670 * If it's part of the virtio standard stuff, do that. 671 * Otherwise dispatch to the actual driver. 672 */ 673 void 674 vi_pci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, 675 int baridx, uint64_t offset, int size, uint64_t value) 676 { 677 struct virtio_softc *vs = pi->pi_arg; 678 struct vqueue_info *vq; 679 struct virtio_consts *vc; 680 struct config_reg *cr; 681 uint64_t virtio_config_size, max; 682 const char *name; 683 uint32_t newoff; 684 int error; 685 686 if (vs->vs_flags & VIRTIO_USE_MSIX) { 687 if (baridx == pci_msix_table_bar(pi) || 688 baridx == pci_msix_pba_bar(pi)) { 689 pci_emul_msix_twrite(pi, offset, size, value); 690 return; 691 } 692 } 693 694 /* XXX probably should do something better than just assert() */ 695 assert(baridx == 0); 696 697 if (vs->vs_mtx) 698 pthread_mutex_lock(vs->vs_mtx); 699 700 vc = vs->vs_vc; 701 name = vc->vc_name; 702 703 if (size != 1 && size != 2 && size != 4) 704 goto bad; 705 706 virtio_config_size = VIRTIO_PCI_CONFIG_OFF(pci_msix_enabled(pi)); 707 708 if (offset >= virtio_config_size) { 709 /* 710 * Subtract off the standard size (including MSI-X 711 * registers if enabled) and dispatch to underlying driver. 712 */ 713 newoff = offset - virtio_config_size; 714 max = vc->vc_cfgsize ? vc->vc_cfgsize : 0x100000000; 715 if (newoff + size > max) 716 goto bad; 717 error = (*vc->vc_cfgwrite)(DEV_SOFTC(vs), newoff, size, value); 718 if (!error) 719 goto done; 720 } 721 722 bad: 723 cr = vi_find_cr(offset); 724 if (cr == NULL || cr->cr_size != size || cr->cr_ro) { 725 if (cr != NULL) { 726 /* offset must be OK, wrong size and/or reg is R/O */ 727 if (cr->cr_size != size) 728 EPRINTLN( 729 "%s: write to %s: bad size %d", 730 name, cr->cr_name, size); 731 if (cr->cr_ro) 732 EPRINTLN( 733 "%s: write to read-only reg %s", 734 name, cr->cr_name); 735 } else { 736 EPRINTLN( 737 "%s: write to bad offset/size %jd/%d", 738 name, (uintmax_t)offset, size); 739 } 740 goto done; 741 } 742 743 switch (offset) { 744 case VIRTIO_PCI_GUEST_FEATURES: 745 vs->vs_negotiated_caps = value & vc->vc_hv_caps; 746 if (vc->vc_apply_features) 747 (*vc->vc_apply_features)(DEV_SOFTC(vs), 748 vs->vs_negotiated_caps); 749 break; 750 case VIRTIO_PCI_QUEUE_PFN: 751 if (vs->vs_curq >= vc->vc_nvq) 752 goto bad_qindex; 753 vi_vq_init(vs, value); 754 break; 755 case VIRTIO_PCI_QUEUE_SEL: 756 /* 757 * Note that the guest is allowed to select an 758 * invalid queue; we just need to return a QNUM 759 * of 0 while the bad queue is selected. 760 */ 761 vs->vs_curq = value; 762 break; 763 case VIRTIO_PCI_QUEUE_NOTIFY: 764 if (value >= vc->vc_nvq) { 765 EPRINTLN("%s: queue %d notify out of range", 766 name, (int)value); 767 goto done; 768 } 769 vq = &vs->vs_queues[value]; 770 if (vq->vq_notify) 771 (*vq->vq_notify)(DEV_SOFTC(vs), vq); 772 else if (vc->vc_qnotify) 773 (*vc->vc_qnotify)(DEV_SOFTC(vs), vq); 774 else 775 EPRINTLN( 776 "%s: qnotify queue %d: missing vq/vc notify", 777 name, (int)value); 778 break; 779 case VIRTIO_PCI_STATUS: 780 vs->vs_status = value; 781 if (value == 0) 782 (*vc->vc_reset)(DEV_SOFTC(vs)); 783 break; 784 case VIRTIO_MSI_CONFIG_VECTOR: 785 vs->vs_msix_cfg_idx = value; 786 break; 787 case VIRTIO_MSI_QUEUE_VECTOR: 788 if (vs->vs_curq >= vc->vc_nvq) 789 goto bad_qindex; 790 vq = &vs->vs_queues[vs->vs_curq]; 791 vq->vq_msix_idx = value; 792 break; 793 } 794 goto done; 795 796 bad_qindex: 797 EPRINTLN( 798 "%s: write config reg %s: curq %d >= max %d", 799 name, cr->cr_name, vs->vs_curq, vc->vc_nvq); 800 done: 801 if (vs->vs_mtx) 802 pthread_mutex_unlock(vs->vs_mtx); 803 } 804