xref: /illumos-gate/usr/src/cmd/bhyve/virtio.h (revision 5d9d9091f564c198a760790b0bfa72c44e17912b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013  Chris Torek <torek @ torek net>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifndef	_BHYVE_VIRTIO_H_
32 #define	_BHYVE_VIRTIO_H_
33 
34 #include <pthread_np.h>
35 #include <machine/atomic.h>
36 
37 #include <dev/virtio/virtio.h>
38 #ifdef __FreeBSD__
39 #include <dev/virtio/virtio_ring.h>
40 #include <dev/virtio/pci/virtio_pci_var.h>
41 #endif
42 
43 /*
44  * These are derived from several virtio specifications.
45  *
46  * Some useful links:
47  *    https://github.com/rustyrussell/virtio-spec
48  *    http://people.redhat.com/pbonzini/virtio-spec.pdf
49  */
50 
51 /*
52  * A virtual device has zero or more "virtual queues" (virtqueue).
53  * Each virtqueue uses at least two 4096-byte pages, laid out thus:
54  *
55  *      +-----------------------------------------------+
56  *      |    "desc":  <N> descriptors, 16 bytes each    |
57  *      |   -----------------------------------------   |
58  *      |   "avail":   2 uint16; <N> uint16; 1 uint16   |
59  *      |   -----------------------------------------   |
60  *      |              pad to 4k boundary               |
61  *      +-----------------------------------------------+
62  *      |   "used": 2 x uint16; <N> elems; 1 uint16     |
63  *      |   -----------------------------------------   |
64  *      |              pad to 4k boundary               |
65  *      +-----------------------------------------------+
66  *
67  * The number <N> that appears here is always a power of two and is
68  * limited to no more than 32768 (as it must fit in a 16-bit field).
69  * If <N> is sufficiently large, the above will occupy more than
70  * two pages.  In any case, all pages must be physically contiguous
71  * within the guest's physical address space.
72  *
73  * The <N> 16-byte "desc" descriptors consist of a 64-bit guest
74  * physical address <addr>, a 32-bit length <len>, a 16-bit
75  * <flags>, and a 16-bit <next> field (all in guest byte order).
76  *
77  * There are three flags that may be set :
78  *	NEXT    descriptor is chained, so use its "next" field
79  *	WRITE   descriptor is for host to write into guest RAM
80  *		(else host is to read from guest RAM)
81  *	INDIRECT   descriptor address field is (guest physical)
82  *		address of a linear array of descriptors
83  *
84  * Unless INDIRECT is set, <len> is the number of bytes that may
85  * be read/written from guest physical address <addr>.  If
86  * INDIRECT is set, WRITE is ignored and <len> provides the length
87  * of the indirect descriptors (and <len> must be a multiple of
88  * 16).  Note that NEXT may still be set in the main descriptor
89  * pointing to the indirect, and should be set in each indirect
90  * descriptor that uses the next descriptor (these should generally
91  * be numbered sequentially).  However, INDIRECT must not be set
92  * in the indirect descriptors.  Upon reaching an indirect descriptor
93  * without a NEXT bit, control returns to the direct descriptors.
94  *
95  * Except inside an indirect, each <next> value must be in the
96  * range [0 .. N) (i.e., the half-open interval).  (Inside an
97  * indirect, each <next> must be in the range [0 .. <len>/16).)
98  *
99  * The "avail" data structures reside in the same pages as the
100  * "desc" structures since both together are used by the device to
101  * pass information to the hypervisor's virtual driver.  These
102  * begin with a 16-bit <flags> field and 16-bit index <idx>, then
103  * have <N> 16-bit <ring> values, followed by one final 16-bit
104  * field <used_event>.  The <N> <ring> entries are simply indices
105  * indices into the descriptor ring (and thus must meet the same
106  * constraints as each <next> value).  However, <idx> is counted
107  * up from 0 (initially) and simply wraps around after 65535; it
108  * is taken mod <N> to find the next available entry.
109  *
110  * The "used" ring occupies a separate page or pages, and contains
111  * values written from the virtual driver back to the guest OS.
112  * This begins with a 16-bit <flags> and 16-bit <idx>, then there
113  * are <N> "vring_used" elements, followed by a 16-bit <avail_event>.
114  * The <N> "vring_used" elements consist of a 32-bit <id> and a
115  * 32-bit <len> (vu_tlen below).  The <id> is simply the index of
116  * the head of a descriptor chain the guest made available
117  * earlier, and the <len> is the number of bytes actually written,
118  * e.g., in the case of a network driver that provided a large
119  * receive buffer but received only a small amount of data.
120  *
121  * The two event fields, <used_event> and <avail_event>, in the
122  * avail and used rings (respectively -- note the reversal!), are
123  * always provided, but are used only if the virtual device
124  * negotiates the VIRTIO_RING_F_EVENT_IDX feature during feature
125  * negotiation.  Similarly, both rings provide a flag --
126  * VRING_AVAIL_F_NO_INTERRUPT and VRING_USED_F_NO_NOTIFY -- in
127  * their <flags> field, indicating that the guest does not need an
128  * interrupt, or that the hypervisor driver does not need a
129  * notify, when descriptors are added to the corresponding ring.
130  * (These are provided only for interrupt optimization and need
131  * not be implemented.)
132  */
133 #define VRING_ALIGN	4096
134 
135 /*
136  * The address of any given virtual queue is determined by a single
137  * Page Frame Number register.  The guest writes the PFN into the
138  * PCI config space.  However, a device that has two or more
139  * virtqueues can have a different PFN, and size, for each queue.
140  * The number of queues is determinable via the PCI config space
141  * VTCFG_R_QSEL register.  Writes to QSEL select the queue: 0 means
142  * queue #0, 1 means queue#1, etc.  Once a queue is selected, the
143  * remaining PFN and QNUM registers refer to that queue.
144  *
145  * QNUM is a read-only register containing a nonzero power of two
146  * that indicates the (hypervisor's) queue size.  Or, if reading it
147  * produces zero, the hypervisor does not have a corresponding
148  * queue.  (The number of possible queues depends on the virtual
149  * device.  The block device has just one; the network device
150  * provides either two -- 0 = receive, 1 = transmit -- or three,
151  * with 2 = control.)
152  *
153  * PFN is a read/write register giving the physical page address of
154  * the virtqueue in guest memory (the guest must allocate enough space
155  * based on the hypervisor's provided QNUM).
156  *
157  * QNOTIFY is effectively write-only: when the guest writes a queue
158  * number to the register, the hypervisor should scan the specified
159  * virtqueue. (Reading QNOTIFY currently always gets 0).
160  */
161 
162 /*
163  * PFN register shift amount
164  */
165 #define	VRING_PFN		12
166 
167 /*
168  * PCI vendor/device IDs
169  */
170 #define	VIRTIO_VENDOR		0x1AF4
171 #define	VIRTIO_DEV_NET		0x1000
172 #define	VIRTIO_DEV_BLOCK	0x1001
173 #define	VIRTIO_DEV_CONSOLE	0x1003
174 #define	VIRTIO_DEV_SCSI		0x1004
175 #define	VIRTIO_DEV_RANDOM	0x1005
176 #define	VIRTIO_DEV_9P		0x1009
177 #define VIRTIO_DEV_INPUT	0x1052
178 
179 /*
180  * PCI revision IDs
181  */
182 #define VIRTIO_REV_INPUT	1
183 
184 /*
185  * PCI subvendor IDs
186  */
187 #define VIRTIO_SUBVEN_INPUT	0x108E
188 
189 /*
190  * PCI subdevice IDs
191  */
192 #define VIRTIO_SUBDEV_INPUT	0x1100
193 
194 /* From section 2.3, "Virtqueue Configuration", of the virtio specification */
195 static inline int
196 vring_size_aligned(u_int qsz)
197 {
198 	return (roundup2(vring_size(qsz, VRING_ALIGN), VRING_ALIGN));
199 }
200 
201 struct vmctx;
202 struct pci_devinst;
203 struct vqueue_info;
204 
205 /*
206  * A virtual device, with some number (possibly 0) of virtual
207  * queues and some size (possibly 0) of configuration-space
208  * registers private to the device.  The virtio_softc should come
209  * at the front of each "derived class", so that a pointer to the
210  * virtio_softc is also a pointer to the more specific, derived-
211  * from-virtio driver's softc.
212  *
213  * Note: inside each hypervisor virtio driver, changes to these
214  * data structures must be locked against other threads, if any.
215  * Except for PCI config space register read/write, we assume each
216  * driver does the required locking, but we need a pointer to the
217  * lock (if there is one) for PCI config space read/write ops.
218  *
219  * When the guest reads or writes the device's config space, the
220  * generic layer checks for operations on the special registers
221  * described above.  If the offset of the register(s) being read
222  * or written is past the CFG area (CFG0 or CFG1), the request is
223  * passed on to the virtual device, after subtracting off the
224  * generic-layer size.  (So, drivers can just use the offset as
225  * an offset into "struct config", for instance.)
226  *
227  * (The virtio layer also makes sure that the read or write is to/
228  * from a "good" config offset, hence vc_cfgsize, and on BAR #0.
229  * However, the driver must verify the read or write size and offset
230  * and that no one is writing a readonly register.)
231  *
232  * The BROKED flag ("this thing done gone and broked") is for future
233  * use.
234  */
235 #define	VIRTIO_USE_MSIX		0x01
236 #define	VIRTIO_EVENT_IDX	0x02	/* use the event-index values */
237 #define	VIRTIO_BROKED		0x08	/* ??? */
238 
239 struct virtio_softc {
240 	struct virtio_consts *vs_vc;	/* constants (see below) */
241 	int	vs_flags;		/* VIRTIO_* flags from above */
242 	pthread_mutex_t *vs_mtx;	/* POSIX mutex, if any */
243 	struct pci_devinst *vs_pi;	/* PCI device instance */
244 	uint32_t vs_negotiated_caps;	/* negotiated capabilities */
245 	struct vqueue_info *vs_queues;	/* one per vc_nvq */
246 	int	vs_curq;		/* current queue */
247 	uint8_t	vs_status;		/* value from last status write */
248 	uint8_t	vs_isr;			/* ISR flags, if not MSI-X */
249 	uint16_t vs_msix_cfg_idx;	/* MSI-X vector for config event */
250 };
251 
252 #define	VS_LOCK(vs)							\
253 do {									\
254 	if (vs->vs_mtx)							\
255 		pthread_mutex_lock(vs->vs_mtx);				\
256 } while (0)
257 
258 #define	VS_UNLOCK(vs)							\
259 do {									\
260 	if (vs->vs_mtx)							\
261 		pthread_mutex_unlock(vs->vs_mtx);			\
262 } while (0)
263 
264 struct virtio_consts {
265 	const char *vc_name;		/* name of driver (for diagnostics) */
266 	int	vc_nvq;			/* number of virtual queues */
267 	size_t	vc_cfgsize;		/* size of dev-specific config regs */
268 	void	(*vc_reset)(void *);	/* called on virtual device reset */
269 	void	(*vc_qnotify)(void *, struct vqueue_info *);
270 					/* called on QNOTIFY if no VQ notify */
271 	int	(*vc_cfgread)(void *, int, int, uint32_t *);
272 					/* called to read config regs */
273 	int	(*vc_cfgwrite)(void *, int, int, uint32_t);
274 					/* called to write config regs */
275 	void    (*vc_apply_features)(void *, uint64_t);
276 				/* called to apply negotiated features */
277 	uint64_t vc_hv_caps;		/* hypervisor-provided capabilities */
278 };
279 
280 /*
281  * Data structure allocated (statically) per virtual queue.
282  *
283  * Drivers may change vq_qsize after a reset.  When the guest OS
284  * requests a device reset, the hypervisor first calls
285  * vs->vs_vc->vc_reset(); then the data structure below is
286  * reinitialized (for each virtqueue: vs->vs_vc->vc_nvq).
287  *
288  * The remaining fields should only be fussed-with by the generic
289  * code.
290  *
291  * Note: the addresses of vq_desc, vq_avail, and vq_used are all
292  * computable from each other, but it's a lot simpler if we just
293  * keep a pointer to each one.  The event indices are similarly
294  * (but more easily) computable, and this time we'll compute them:
295  * they're just XX_ring[N].
296  */
297 #define	VQ_ALLOC	0x01	/* set once we have a pfn */
298 #define	VQ_BROKED	0x02	/* ??? */
299 struct vqueue_info {
300 	uint16_t vq_qsize;	/* size of this queue (a power of 2) */
301 	void	(*vq_notify)(void *, struct vqueue_info *);
302 				/* called instead of vc_notify, if not NULL */
303 
304 	struct virtio_softc *vq_vs;	/* backpointer to softc */
305 	uint16_t vq_num;	/* we're the num'th queue in the softc */
306 
307 	uint16_t vq_flags;	/* flags (see above) */
308 	uint16_t vq_last_avail;	/* a recent value of vq_avail->idx */
309 	uint16_t vq_next_used;	/* index of the next used slot to be filled */
310 	uint16_t vq_save_used;	/* saved vq_used->idx; see vq_endchains */
311 	uint16_t vq_msix_idx;	/* MSI-X index, or VIRTIO_MSI_NO_VECTOR */
312 
313 	uint32_t vq_pfn;	/* PFN of virt queue (not shifted!) */
314 
315 	struct vring_desc *vq_desc;	/* descriptor array */
316 	struct vring_avail *vq_avail;	/* the "avail" ring */
317 	struct vring_used *vq_used;	/* the "used" ring */
318 
319 };
320 /* as noted above, these are sort of backwards, name-wise */
321 #define VQ_AVAIL_EVENT_IDX(vq) \
322 	(*(uint16_t *)&(vq)->vq_used->ring[(vq)->vq_qsize])
323 #define VQ_USED_EVENT_IDX(vq) \
324 	((vq)->vq_avail->ring[(vq)->vq_qsize])
325 
326 /*
327  * Is this ring ready for I/O?
328  */
329 static inline int
330 vq_ring_ready(struct vqueue_info *vq)
331 {
332 
333 	return (vq->vq_flags & VQ_ALLOC);
334 }
335 
336 /*
337  * Are there "available" descriptors?  (This does not count
338  * how many, just returns True if there are some.)
339  */
340 static inline int
341 vq_has_descs(struct vqueue_info *vq)
342 {
343 
344 	return (vq_ring_ready(vq) && vq->vq_last_avail !=
345 	    vq->vq_avail->idx);
346 }
347 
348 /*
349  * Deliver an interrupt to the guest for a specific MSI-X queue or
350  * event.
351  */
352 static inline void
353 vi_interrupt(struct virtio_softc *vs, uint8_t isr, uint16_t msix_idx)
354 {
355 
356 	if (pci_msix_enabled(vs->vs_pi))
357 		pci_generate_msix(vs->vs_pi, msix_idx);
358 	else {
359 #ifndef __FreeBSD__
360 		boolean_t unlock = B_FALSE;
361 
362 		if (vs->vs_mtx && !pthread_mutex_isowned_np(vs->vs_mtx)) {
363 			unlock = B_TRUE;
364 			pthread_mutex_lock(vs->vs_mtx);
365 		}
366 #else
367 		VS_LOCK(vs);
368 #endif
369 		vs->vs_isr |= isr;
370 		pci_generate_msi(vs->vs_pi, 0);
371 		pci_lintr_assert(vs->vs_pi);
372 #ifndef __FreeBSD__
373 		if (unlock)
374 			pthread_mutex_unlock(vs->vs_mtx);
375 #else
376 		VS_UNLOCK(vs);
377 #endif
378 	}
379 }
380 
381 /*
382  * Deliver an interrupt to the guest on the given virtual queue (if
383  * possible, or a generic MSI interrupt if not using MSI-X).
384  */
385 static inline void
386 vq_interrupt(struct virtio_softc *vs, struct vqueue_info *vq)
387 {
388 
389 	vi_interrupt(vs, VIRTIO_PCI_ISR_INTR, vq->vq_msix_idx);
390 }
391 
392 static inline void
393 vq_kick_enable(struct vqueue_info *vq)
394 {
395 
396 	vq->vq_used->flags &= ~VRING_USED_F_NO_NOTIFY;
397 	/*
398 	 * Full memory barrier to make sure the store to vq_used->flags
399 	 * happens before the load from vq_avail->idx, which results from a
400 	 * subsequent call to vq_has_descs().
401 	 */
402 	atomic_thread_fence_seq_cst();
403 }
404 
405 static inline void
406 vq_kick_disable(struct vqueue_info *vq)
407 {
408 
409 	vq->vq_used->flags |= VRING_USED_F_NO_NOTIFY;
410 }
411 
412 struct iovec;
413 
414 /*
415  * Request description returned by vq_getchain.
416  *
417  * Writable iovecs start at iov[req.readable].
418  */
419 struct vi_req {
420 	int readable;		/* num of readable iovecs */
421 	int writable;		/* num of writable iovecs */
422 	unsigned int idx;	/* ring index */
423 };
424 
425 void	vi_softc_linkup(struct virtio_softc *vs, struct virtio_consts *vc,
426 			void *dev_softc, struct pci_devinst *pi,
427 			struct vqueue_info *queues);
428 int	vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix);
429 void	vi_reset_dev(struct virtio_softc *);
430 void	vi_set_io_bar(struct virtio_softc *, int);
431 
432 int	vq_getchain(struct vqueue_info *vq, struct iovec *iov, int niov,
433 	    struct vi_req *reqp);
434 void	vq_retchains(struct vqueue_info *vq, uint16_t n_chains);
435 void	vq_relchain_prepare(struct vqueue_info *vq, uint16_t idx,
436 			    uint32_t iolen);
437 void	vq_relchain_publish(struct vqueue_info *vq);
438 void	vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen);
439 void	vq_endchains(struct vqueue_info *vq, int used_all_avail);
440 
441 uint64_t vi_pci_read(struct vmctx *ctx, struct pci_devinst *pi,
442 		     int baridx, uint64_t offset, int size);
443 void	vi_pci_write(struct vmctx *ctx, struct pci_devinst *pi,
444 		     int baridx, uint64_t offset, int size, uint64_t value);
445 
446 #ifndef __FreeBSD__
447 void	vi_vq_init(struct virtio_softc *, uint32_t);
448 #endif
449 
450 #endif	/* _BHYVE_VIRTIO_H_ */
451