xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/xhci/xhci_endpoint.c (revision 5f82aa32fbc5dc2c59bca6ff315f44a4c4c9ea86)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2016 Joyent, Inc.
14  */
15 
16 /*
17  * xHCI Endpoint Initialization and Management
18  *
19  * Please see the big theory statement in xhci.c for more information.
20  */
21 
22 #include <sys/usb/hcd/xhci/xhci.h>
23 #include <sys/sdt.h>
24 
25 boolean_t
26 xhci_endpoint_is_periodic_in(xhci_endpoint_t *xep)
27 {
28 	usba_pipe_handle_data_t *ph;
29 
30 	ASSERT(xep != NULL);
31 	ph = xep->xep_pipe;
32 	ASSERT(ph != NULL);
33 
34 	return ((xep->xep_type == USB_EP_ATTR_INTR ||
35 	    xep->xep_type == USB_EP_ATTR_ISOCH) &&
36 	    (ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN);
37 }
38 
39 /*
40  * Endpoints are a bit weirdly numbered. Endpoint zero is the default control
41  * endpoint, so the direction doesn't matter. For all the others, they're
42  * arranged as ep 1 out, ep 1 in, ep 2 out, ep 2 in. This is based on the layout
43  * of the Device Context Structure in xHCI 1.1 / 6.2.1. Therefore to go from the
44  * endpoint and direction, we know that endpoint n starts at 2n - 1.  e.g.
45  * endpoint 1 starts at entry 1, endpoint 2 at entry 3, etc. Finally, the OUT
46  * direction comes first, followed by the IN direction. So if we're getting the
47  * endpoint for one of those, then we have to deal with that.
48  */
49 uint_t
50 xhci_endpoint_pipe_to_epid(usba_pipe_handle_data_t *ph)
51 {
52 	int ep;
53 
54 	ep = ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK;
55 	if (ep == 0)
56 		return (ep);
57 	ep = ep * 2 - 1;
58 	if ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN)
59 		ep++;
60 
61 	VERIFY(ep < XHCI_NUM_ENDPOINTS);
62 	return (ep);
63 }
64 
65 /*
66  * The assumption is that someone calling this owns this endpoint / device and
67  * that it's in a state where it's safe to zero out that information.
68  */
69 void
70 xhci_endpoint_fini(xhci_device_t *xd, int endpoint)
71 {
72 	xhci_endpoint_t *xep = xd->xd_endpoints[endpoint];
73 
74 	VERIFY(xep != NULL);
75 	xd->xd_endpoints[endpoint] = NULL;
76 
77 	xhci_ring_free(&xep->xep_ring);
78 	cv_destroy(&xep->xep_state_cv);
79 	list_destroy(&xep->xep_transfers);
80 	kmem_free(xep, sizeof (xhci_endpoint_t));
81 }
82 
83 /*
84  * Set up the default control endpoint input context. This needs to be done
85  * before we address the device. Note, we separate out the default endpoint from
86  * others, as we must set this up before we have a pipe handle.
87  */
88 int
89 xhci_endpoint_setup_default_context(xhci_t *xhcip, xhci_device_t *xd,
90     xhci_endpoint_t *xep)
91 {
92 	uint_t mps;
93 	xhci_endpoint_context_t *ectx;
94 	uint64_t deq;
95 
96 	ectx = xd->xd_endin[xep->xep_num];
97 	VERIFY(ectx != NULL);
98 
99 	/*
100 	 * We may or may not have a device descriptor. This should match the
101 	 * same initial sizes that are done in hubd_create_child().
102 	 *
103 	 * Note, since we don't necessarily have an endpoint descriptor yet to
104 	 * base this on we instead use the device's defaults if available. This
105 	 * is different from normal endpoints for which there's always a
106 	 * specific descriptor.
107 	 */
108 	switch (xd->xd_usbdev->usb_port_status) {
109 	case USBA_LOW_SPEED_DEV:
110 		if (xd->xd_usbdev->usb_dev_descr != NULL) {
111 			mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
112 		} else {
113 			mps = 8;
114 		}
115 		break;
116 	case USBA_FULL_SPEED_DEV:
117 	case USBA_HIGH_SPEED_DEV:
118 		if (xd->xd_usbdev->usb_dev_descr != NULL) {
119 			mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
120 		} else {
121 			mps = 64;
122 		}
123 		break;
124 	case USBA_SUPER_SPEED_DEV:
125 	default:
126 		if (xd->xd_usbdev->usb_dev_descr != NULL) {
127 			mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
128 			mps = 1 << mps;
129 		} else {
130 			mps = 512;
131 		}
132 		break;
133 	}
134 
135 	bzero(ectx, sizeof (xhci_endpoint_context_t));
136 	ectx->xec_info = LE_32(0);
137 	ectx->xec_info2 = LE_32(XHCI_EPCTX_SET_CERR(3) |
138 	    XHCI_EPCTX_SET_EPTYPE(XHCI_EPCTX_TYPE_CTRL) |
139 	    XHCI_EPCTX_SET_MAXB(0) | XHCI_EPCTX_SET_MPS(mps));
140 	deq = xhci_dma_pa(&xep->xep_ring.xr_dma) + sizeof (xhci_trb_t) *
141 	    xep->xep_ring.xr_tail;
142 	ectx->xec_dequeue = LE_64(deq | xep->xep_ring.xr_cycle);
143 	ectx->xec_txinfo = LE_32(XHCI_EPCTX_MAX_ESIT_PAYLOAD(0) |
144 	    XHCI_EPCTX_AVG_TRB_LEN(XHCI_CONTEXT_DEF_CTRL_ATL));
145 
146 	XHCI_DMA_SYNC(xd->xd_ictx, DDI_DMA_SYNC_FORDEV);
147 	if (xhci_check_dma_handle(xhcip, &xd->xd_ictx) != DDI_FM_OK) {
148 		xhci_error(xhcip, "failed to initialize default device input "
149 		    "context on slot %d and port %d for endpoint %u:  "
150 		    "encountered fatal FM error synchronizing input context "
151 		    "DMA memory", xd->xd_slot, xd->xd_port, xep->xep_num);
152 		xhci_fm_runtime_reset(xhcip);
153 		return (EIO);
154 	}
155 
156 	return (0);
157 }
158 
159 /*
160  * Determine if we need to update the maximum packet size of the default
161  * control endpoint. This may happen because we start with the default size
162  * before we have a descriptor and then it may change. For example, with
163  * full-speed devices that may have either an 8 or 64 byte maximum packet size.
164  */
165 int
166 xhci_endpoint_update_default(xhci_t *xhcip, xhci_device_t *xd,
167     xhci_endpoint_t *xep)
168 {
169 	int mps, desc, info, ret;
170 	ASSERT(xd->xd_usbdev != NULL);
171 
172 	mps = XHCI_EPCTX_GET_MPS(xd->xd_endout[xep->xep_num]->xec_info2);
173 	desc = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
174 	if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
175 		desc = 1 << desc;
176 	}
177 
178 	if (mps == desc)
179 		return (USB_SUCCESS);
180 
181 	/*
182 	 * Update only the context for the default control endpoint.
183 	 */
184 	mutex_enter(&xd->xd_imtx);
185 	info = LE_32(xd->xd_endout[xep->xep_num]->xec_info2);
186 	info &= ~XHCI_EPCTX_SET_MPS(mps);
187 	info |= XHCI_EPCTX_SET_MPS(desc);
188 	xd->xd_endin[xep->xep_num]->xec_info2 = LE_32(info);
189 	xd->xd_input->xic_drop_flags = LE_32(0);
190 	xd->xd_input->xic_add_flags = LE_32(XHCI_INCTX_MASK_DCI(1));
191 
192 	ret = xhci_command_evaluate_context(xhcip, xd);
193 	mutex_exit(&xd->xd_imtx);
194 
195 	return (ret);
196 }
197 
198 static uint_t
199 xhci_endpoint_epdesc_to_type(usb_ep_descr_t *ep)
200 {
201 	int type = ep->bmAttributes & USB_EP_ATTR_MASK;
202 	boolean_t in = (ep->bEndpointAddress & USB_EP_DIR_MASK) ==
203 	    USB_EP_DIR_IN;
204 
205 	switch (type) {
206 	case USB_EP_ATTR_CONTROL:
207 		return (XHCI_EPCTX_TYPE_CTRL);
208 	case USB_EP_ATTR_ISOCH:
209 		if (in == B_TRUE)
210 			return (XHCI_EPCTX_TYPE_ISOCH_IN);
211 		return (XHCI_EPCTX_TYPE_ISOCH_OUT);
212 	case USB_EP_ATTR_BULK:
213 		if (in == B_TRUE)
214 			return (XHCI_EPCTX_TYPE_BULK_IN);
215 		return (XHCI_EPCTX_TYPE_BULK_OUT);
216 	case USB_EP_ATTR_INTR:
217 		if (in == B_TRUE)
218 			return (XHCI_EPCTX_TYPE_INTR_IN);
219 		return (XHCI_EPCTX_TYPE_INTR_OUT);
220 	default:
221 		panic("bad USB attribute type: %d", type);
222 	}
223 
224 	/* LINTED: E_FUNC_NO_RET_VAL */
225 }
226 
227 static uint_t
228 xhci_endpoint_determine_burst(xhci_device_t *xd, xhci_endpoint_t *xep)
229 {
230 	switch (xd->xd_usbdev->usb_port_status) {
231 	case USBA_LOW_SPEED_DEV:
232 	case USBA_FULL_SPEED_DEV:
233 		/*
234 		 * Per xHCI 1.1 / 6.2.3.4, burst is always zero for these
235 		 * devices.
236 		 */
237 		return (0);
238 	case USBA_HIGH_SPEED_DEV:
239 		if (xep->xep_type == USB_EP_ATTR_CONTROL ||
240 		    xep->xep_type == USB_EP_ATTR_BULK)
241 			return (0);
242 		return ((xep->xep_pipe->p_xep.uex_ep.wMaxPacketSize &
243 		    XHCI_CONTEXT_BURST_MASK) >> XHCI_CONTEXT_BURST_SHIFT);
244 	default:
245 		/*
246 		 * For these USB >= 3.0, this comes from the companion
247 		 * descriptor.
248 		 */
249 		ASSERT(xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP);
250 		return (xep->xep_pipe->p_xep.uex_ep_ss.bMaxBurst);
251 	}
252 }
253 
254 /*
255  * Convert a linear mapping of values that are in in the range of 1-255 into a
256  * 2^x value. Because we're supposed to round down for these calculations (see
257  * the note in xHCI 1.1 / 6.2.3.6) we can do this simply with a fls() and
258  * subtracting one.
259  */
260 static uint_t
261 xhci_endpoint_linear_interval(usb_ep_descr_t *ep)
262 {
263 	int exp;
264 	int ival = ep->bInterval;
265 	if (ival < 1)
266 		ival = 1;
267 	if (ival > 255)
268 		ival = 255;
269 	exp = ddi_fls(ival) - 1;
270 	ASSERT(exp >= 0 && exp <= 7);
271 	return (exp);
272 }
273 
274 /*
275  * Convert the set of values that use a 2^(x-1) value for interval into a 2^x
276  * range. Note the valid input range is 1-16, so we clamp values based on this.
277  * See xHCI 1.1 / 6.2.3.6 for more information.
278  */
279 static uint_t
280 xhci_endpoint_exponential_interval(usb_ep_descr_t *ep)
281 {
282 	int ival;
283 
284 	ival = ep->bInterval;
285 	if (ival < 1)
286 		ival = 1;
287 	if (ival > 16)
288 		ival = 16;
289 	ival--;
290 	ASSERT(ival >= 0 && ival <= 15);
291 	return (ival);
292 }
293 
294 
295 /*
296  * Determining the interval is unfortunately somewhat complicated as there are
297  * many differnet forms that things can take. This is all summarized in a
298  * somewhat helpful table, number 65, in xHCI 1.1 / 6.2.3.6. But here's
299  * basically the six different cases we have to consider:
300  *
301  * Case 1: Non-High Speed Bulk and Control Endpoints
302  * 	Always return 0.
303  *
304  * Case 2: Super Speed and High Speed Isoch and Intr endpoints
305  * 	Convert from a 2^(x-1) range to a 2^x range.
306  *
307  * Case 3: Full Speed Isochronous Endpoints
308  * 	As case 2, but add 3 as its values are in frames and we need to convert
309  * 	to microframes. Adding three to the result is the same as multiplying
310  * 	the initial value by 8.
311  *
312  * Case 4: Full speed and Low Speed Interrupt Endpoints
313  * 	These have a 1-255 ms range that we need to convert to a 2^x * 128 us
314  * 	range. We use the linear conversion and then add 3 to account for the
315  * 	multiplying by 8 conversion from frames to microframes.
316  *
317  * Case 5: High Speed Interrupt and Bulk Output
318  * 	These are a bit of a weird case. The spec and other implementations make
319  * 	it seem that it's similar to case 4, but without the fixed addition as
320  * 	its interpreted differently due to NAKs.
321  *
322  * Case 6: Low Speed Isochronous Endpoints
323  * 	These are not actually defined; however, like other implementations we
324  * 	treat them like case 4.
325  */
326 static uint_t
327 xhci_endpoint_interval(xhci_device_t *xd, usb_ep_descr_t *ep)
328 {
329 	int type = ep->bmAttributes & USB_EP_ATTR_MASK;
330 	int speed = xd->xd_usbdev->usb_port_status;
331 
332 	/*
333 	 * Handle Cases 1 and 5 first.
334 	 */
335 	if (type == USB_EP_ATTR_CONTROL || type == USB_EP_ATTR_BULK) {
336 		if (speed != USBA_HIGH_SPEED_DEV)
337 			return (0);
338 		return (xhci_endpoint_linear_interval(ep));
339 	}
340 
341 	/*
342 	 * Handle Isoch and Intr cases next.
343 	 */
344 	switch (speed) {
345 	case USBA_LOW_SPEED_DEV:
346 		/*
347 		 * Interrupt endpoints at low speed are the same as full speed,
348 		 * hence the fall through.
349 		 */
350 		if (type == USB_EP_ATTR_ISOCH) {
351 			return (xhci_endpoint_exponential_interval(ep) + 3);
352 		}
353 		/* FALLTHROUGH */
354 	case USBA_FULL_SPEED_DEV:
355 		return (xhci_endpoint_linear_interval(ep) + 3);
356 	case USBA_HIGH_SPEED_DEV:
357 	case USBA_SUPER_SPEED_DEV:
358 	default:
359 		/*
360 		 * Case 2. Treat any newer and faster speeds as Super Speed by
361 		 * default as USB 3.1 is effectively treated the same here.
362 		 */
363 		return (xhci_endpoint_exponential_interval(ep));
364 	}
365 }
366 
367 /*
368  * The way to calculate the Maximum ESIT is described in xHCI 1.1 / 4.14.2.
369  * First off, this only applies to Interrupt and Isochronous descriptors. For
370  * Super Speed and newer things, it comes out of a descriptor. Otherwise we
371  * calculate it by doing 'Max Packet Size' * ('Max Burst' + 1).
372  */
373 static uint_t
374 xhci_endpoint_max_esit(xhci_device_t *xd, xhci_endpoint_t *xep, uint_t mps,
375     uint_t burst)
376 {
377 	if (xep->xep_type == USB_EP_ATTR_CONTROL ||
378 	    xep->xep_type == USB_EP_ATTR_BULK) {
379 		return (0);
380 	}
381 
382 	/*
383 	 * Note that this will need to be updated for SuperSpeedPlus ISOC
384 	 * devices to pull from the secondary companion descriptor they use.
385 	 */
386 	if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
387 		usb_ep_xdescr_t *ep_xdesc = &xep->xep_pipe->p_xep;
388 		ASSERT(xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP);
389 		return (ep_xdesc->uex_ep_ss.wBytesPerInterval);
390 	}
391 
392 	return (mps * (burst + 1));
393 }
394 
395 /*
396  * We've been asked to calculate and tell the xHCI controller an average TRB
397  * data length. This is talked about in an implementation note in xHCI 1.1 /
398  * 4.14.1.1. So, the reality is that it's hard to actually calculate this, as
399  * we're supposed to take into account all of the TRBs that we use on that ring.
400  *
401  * Surveying other xHCI drivers, they all agree on using the default of 8 for
402  * control endpoints; however, from there things get a little more fluid. For
403  * interrupt and isochronous endpoints, many device use the minimum of the max
404  * packet size and the device's pagesize. For bulk endpoints some folks punt and
405  * don't set anything and others try and set it to the pagesize. The xHCI
406  * implementation note suggests a 3k size here initially. For now, we'll just
407  * guess for bulk endpoints and use our page size as a determining factor for
408  * this and use the BSD style for others. Note Linux here only sets this value
409  * for control devices.
410  */
411 static uint_t
412 xhci_endpoint_avg_trb(xhci_t *xhcip, usb_ep_descr_t *ep, int mps)
413 {
414 	int type = ep->bmAttributes & USB_EP_ATTR_MASK;
415 
416 	switch (type) {
417 	case USB_EP_ATTR_ISOCH:
418 	case USB_EP_ATTR_INTR:
419 		return (MIN(xhcip->xhci_caps.xcap_pagesize, mps));
420 	case USB_EP_ATTR_CONTROL:
421 		return (XHCI_CONTEXT_DEF_CTRL_ATL);
422 	case USB_EP_ATTR_BULK:
423 		return (xhcip->xhci_caps.xcap_pagesize);
424 	default:
425 		panic("bad USB endpoint type: %d", type);
426 	}
427 
428 	/* LINTED: E_FUNC_NO_RET_VAL */
429 }
430 
431 int
432 xhci_endpoint_setup_context(xhci_t *xhcip, xhci_device_t *xd,
433     xhci_endpoint_t *xep)
434 {
435 	uint_t eptype, burst, ival, max_esit, avgtrb, mps, mult, cerr;
436 	xhci_endpoint_context_t *ectx;
437 	uint64_t deq;
438 
439 	/*
440 	 * For a USB >=3.0 device we should always have its companion descriptor
441 	 * provided for us by USBA. If it's not here, complain loudly and fail.
442 	 */
443 	if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV &&
444 	    (xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP) == 0) {
445 		const char *prod, *mfg;
446 
447 		prod = xd->xd_usbdev->usb_product_str;
448 		if (prod == NULL)
449 			prod = "Unknown Device";
450 		mfg = xd->xd_usbdev->usb_mfg_str;
451 		if (mfg == NULL)
452 			mfg = "Unknown Manufacturer";
453 
454 		xhci_log(xhcip, "Encountered USB >=3.0 device without endpoint "
455 		    "companion descriptor. Ensure driver %s is properly using "
456 		    "usb_pipe_xopen() for device %s %s",
457 		    ddi_driver_name(xd->xd_usbdev->usb_dip), prod, mfg);
458 		return (EINVAL);
459 	}
460 
461 	ectx = xd->xd_endin[xep->xep_num];
462 	VERIFY(ectx != NULL);
463 	VERIFY(xd->xd_usbdev->usb_dev_descr != NULL);
464 	VERIFY(xep->xep_pipe != NULL);
465 
466 	mps = xep->xep_pipe->p_ep.wMaxPacketSize & XHCI_CONTEXT_MPS_MASK;
467 	mult = XHCI_CONTEXT_DEF_MULT;
468 	cerr = XHCI_CONTEXT_DEF_CERR;
469 
470 	switch (xep->xep_type) {
471 	case USB_EP_ATTR_ISOCH:
472 		/*
473 		 * When we have support for USB 3.1 SuperSpeedPlus devices,
474 		 * we'll need to make sure that we also check for its secondary
475 		 * endpoint companion descriptor here.
476 		 */
477 		/*
478 		 * Super Speed devices nominally have these xHCI super speed
479 		 * companion descriptors. We know that we're not properly
480 		 * grabbing them right now, so until we do, we should basically
481 		 * error about it.
482 		 */
483 		if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
484 			ASSERT(xep->xep_pipe->p_xep.uex_flags &
485 			    USB_EP_XFLAGS_SS_COMP);
486 			mult = xep->xep_pipe->p_xep.uex_ep_ss.bmAttributes &
487 			    USB_EP_SS_COMP_ISOC_MULT_MASK;
488 		}
489 
490 		mps &= XHCI_CONTEXT_MPS_MASK;
491 		cerr = XHCI_CONTEXT_ISOCH_CERR;
492 		break;
493 	default:
494 		/*
495 		 * No explicit changes needed for CONTROL, INTR, and BULK
496 		 * endpoints. They've been handled already and don't have any
497 		 * differences.
498 		 */
499 		break;
500 	}
501 
502 	eptype = xhci_endpoint_epdesc_to_type(&xep->xep_pipe->p_xep.uex_ep);
503 	burst = xhci_endpoint_determine_burst(xd, xep);
504 	ival = xhci_endpoint_interval(xd, &xep->xep_pipe->p_xep.uex_ep);
505 	max_esit = xhci_endpoint_max_esit(xd, xep, mps, burst);
506 	avgtrb = xhci_endpoint_avg_trb(xhcip, &xep->xep_pipe->p_xep.uex_ep,
507 	    mps);
508 
509 	/*
510 	 * The multi field may be reserved as zero if the LEC feature flag is
511 	 * set. See the description of mult in xHCI 1.1 / 6.2.3.
512 	 */
513 	if (xhcip->xhci_caps.xcap_flags2 & XCAP2_LEC)
514 		mult = 0;
515 
516 	bzero(ectx, sizeof (xhci_endpoint_context_t));
517 
518 	ectx->xec_info = LE_32(XHCI_EPCTX_SET_MULT(mult) |
519 	    XHCI_EPCTX_SET_IVAL(ival));
520 	if (xhcip->xhci_caps.xcap_flags2 & XCAP2_LEC)
521 		ectx->xec_info |= LE_32(XHCI_EPCTX_SET_MAX_ESIT_HI(max_esit));
522 
523 	ectx->xec_info2 = LE_32(XHCI_EPCTX_SET_CERR(cerr) |
524 	    XHCI_EPCTX_SET_EPTYPE(eptype) | XHCI_EPCTX_SET_MAXB(burst) |
525 	    XHCI_EPCTX_SET_MPS(mps));
526 
527 	deq = xhci_dma_pa(&xep->xep_ring.xr_dma) + sizeof (xhci_trb_t) *
528 	    xep->xep_ring.xr_tail;
529 	ectx->xec_dequeue = LE_64(deq | xep->xep_ring.xr_cycle);
530 
531 	ectx->xec_txinfo = LE_32(XHCI_EPCTX_MAX_ESIT_PAYLOAD(max_esit) |
532 	    XHCI_EPCTX_AVG_TRB_LEN(avgtrb));
533 
534 	XHCI_DMA_SYNC(xd->xd_ictx, DDI_DMA_SYNC_FORDEV);
535 	if (xhci_check_dma_handle(xhcip, &xd->xd_ictx) != DDI_FM_OK) {
536 		xhci_error(xhcip, "failed to initialize device input "
537 		    "context on slot %d and port %d for endpoint %u:  "
538 		    "encountered fatal FM error synchronizing input context "
539 		    "DMA memory", xd->xd_slot, xd->xd_port, xep->xep_num);
540 		xhci_fm_runtime_reset(xhcip);
541 		return (EIO);
542 	}
543 
544 	return (0);
545 }
546 
547 /*
548  * Initialize the endpoint and its input context for a given device. This is
549  * called from two different contexts:
550  *
551  *   1. Initializing a device
552  *   2. Opening a USB pipe
553  *
554  * In the second case, we need to worry about locking around the device. We
555  * don't need to worry about the locking in the first case because the USBA
556  * doesn't know about it yet.
557  */
558 int
559 xhci_endpoint_init(xhci_t *xhcip, xhci_device_t *xd,
560     usba_pipe_handle_data_t *ph)
561 {
562 	int ret;
563 	uint_t epid;
564 	xhci_endpoint_t *xep;
565 
566 	if (ph == NULL) {
567 		epid = XHCI_DEFAULT_ENDPOINT;
568 	} else {
569 		ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
570 		epid = xhci_endpoint_pipe_to_epid(ph);
571 	}
572 	VERIFY(xd->xd_endpoints[epid] == NULL);
573 
574 	xep = kmem_zalloc(sizeof (xhci_endpoint_t), KM_SLEEP);
575 	list_create(&xep->xep_transfers, sizeof (xhci_transfer_t),
576 	    offsetof(xhci_transfer_t, xt_link));
577 	cv_init(&xep->xep_state_cv, NULL, CV_DRIVER, NULL);
578 	xep->xep_xd = xd;
579 	xep->xep_xhci = xhcip;
580 	xep->xep_num = epid;
581 	if (ph == NULL) {
582 		xep->xep_pipe = NULL;
583 		xep->xep_type = USB_EP_ATTR_CONTROL;
584 	} else {
585 		xep->xep_pipe = ph;
586 		xep->xep_type = ph->p_ep.bmAttributes & USB_EP_ATTR_MASK;
587 	}
588 
589 	if ((ret = xhci_ring_alloc(xhcip, &xep->xep_ring)) != 0) {
590 		cv_destroy(&xep->xep_state_cv);
591 		list_destroy(&xep->xep_transfers);
592 		kmem_free(xep, sizeof (xhci_endpoint_t));
593 		return (ret);
594 	}
595 
596 	if ((ret = xhci_ring_reset(xhcip, &xep->xep_ring)) != 0) {
597 		xhci_ring_free(&xep->xep_ring);
598 		cv_destroy(&xep->xep_state_cv);
599 		list_destroy(&xep->xep_transfers);
600 		kmem_free(xep, sizeof (xhci_endpoint_t));
601 		return (ret);
602 	}
603 
604 	xd->xd_endpoints[epid] = xep;
605 	if (ph == NULL) {
606 		ret = xhci_endpoint_setup_default_context(xhcip, xd, xep);
607 	} else {
608 		ret = xhci_endpoint_setup_context(xhcip, xd, xep);
609 	}
610 	if (ret != 0) {
611 		xhci_endpoint_fini(xd, xep->xep_num);
612 		return (ret);
613 	}
614 
615 	return (0);
616 }
617 
618 /*
619  * Attempt to quiesce an endpoint. Depending on the state of the endpoint, we
620  * may need to simply stop it. Alternatively, we may need to explicitly reset
621  * the endpoint. Once done, this endpoint should be stopped and can be
622  * manipulated.
623  */
624 int
625 xhci_endpoint_quiesce(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep)
626 {
627 	int ret = USB_SUCCESS;
628 	xhci_endpoint_context_t *epctx = xd->xd_endout[xep->xep_num];
629 
630 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
631 	ASSERT(xep->xep_state & XHCI_ENDPOINT_QUIESCE);
632 
633 	/*
634 	 * First attempt to stop the endpoint, unless it's halted. We don't
635 	 * really care what state it is in. Note that because other activity
636 	 * could be going on, the state may change on us; however, if it's
637 	 * running, it will always transition to a stopped state and none of the
638 	 * other valid states will allow transitions without us taking an active
639 	 * action.
640 	 */
641 	if (!(xep->xep_state & XHCI_ENDPOINT_HALTED)) {
642 		mutex_exit(&xhcip->xhci_lock);
643 		ret = xhci_command_stop_endpoint(xhcip, xd, xep);
644 		mutex_enter(&xhcip->xhci_lock);
645 
646 		if (ret == USB_INVALID_CONTEXT) {
647 			XHCI_DMA_SYNC(xd->xd_octx, DDI_DMA_SYNC_FORKERNEL);
648 		}
649 	}
650 
651 	/*
652 	 * Now, if we had the HALTED flag set or we failed to stop it due to a
653 	 * context error and we're in the HALTED state now, reset the end point.
654 	 */
655 	if ((xep->xep_state & XHCI_ENDPOINT_HALTED) ||
656 	    (ret == USB_INVALID_CONTEXT &&
657 	    XHCI_EPCTX_STATE(LE_32(epctx->xec_info)) == XHCI_EP_HALTED)) {
658 		mutex_exit(&xhcip->xhci_lock);
659 		ret = xhci_command_reset_endpoint(xhcip, xd, xep);
660 		mutex_enter(&xhcip->xhci_lock);
661 	}
662 
663 	/*
664 	 * Ideally, one of the two commands should have worked; however, we
665 	 * could have had a context error due to being in the wrong state.
666 	 * Verify that we're either in the ERROR or STOPPED state and treat both
667 	 * as success. All callers are assumed to be doing this so they can
668 	 * change the dequeue pointer.
669 	 */
670 	if (ret != USB_SUCCESS && ret != USB_INVALID_CONTEXT) {
671 		return (ret);
672 	}
673 
674 	if (ret == USB_INVALID_CONTEXT) {
675 		XHCI_DMA_SYNC(xd->xd_octx, DDI_DMA_SYNC_FORKERNEL);
676 
677 		switch (XHCI_EPCTX_STATE(LE_32(epctx->xec_info))) {
678 		case XHCI_EP_STOPPED:
679 		case XHCI_EP_ERROR:
680 			/*
681 			 * This is where we wanted to go, so let's just take it.
682 			 */
683 			ret = USB_SUCCESS;
684 			break;
685 		case XHCI_EP_DISABLED:
686 		case XHCI_EP_RUNNING:
687 		case XHCI_EP_HALTED:
688 		default:
689 			/*
690 			 * If we're in any of these, something really weird has
691 			 * happened and it's not worth trying to recover at this
692 			 * point.
693 			 */
694 			xhci_error(xhcip, "!asked to stop endpoint %u on slot "
695 			    "%d and port %d: ended up in unexpected state %d",
696 			    xep->xep_num, xd->xd_slot, xd->xd_port,
697 			    XHCI_EPCTX_STATE(LE_32(epctx->xec_info)));
698 			return (ret);
699 		}
700 	}
701 
702 	/*
703 	 * Now that we're successful, we can clear any possible halted state
704 	 * tracking that we might have had.
705 	 */
706 	if (ret == USB_SUCCESS) {
707 		xep->xep_state &= ~XHCI_ENDPOINT_HALTED;
708 	}
709 
710 	return (ret);
711 }
712 
713 int
714 xhci_endpoint_ring(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep)
715 {
716 	/*
717 	 * The doorbell ID's are offset by one from the endpoint numbers that we
718 	 * keep.
719 	 */
720 	xhci_put32(xhcip, XHCI_R_DOOR, XHCI_DOORBELL(xd->xd_slot),
721 	    xep->xep_num + 1);
722 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
723 		xhci_error(xhcip, "failed to ring doorbell for slot %d and "
724 		    "endpoint %u: encountered fatal FM register access error",
725 		    xd->xd_slot, xep->xep_num);
726 		xhci_fm_runtime_reset(xhcip);
727 		return (USB_HC_HARDWARE_ERROR);
728 	}
729 
730 	DTRACE_PROBE3(xhci__doorbell__ring, xhci_t *, xhcip, uint32_t,
731 	    XHCI_DOORBELL(xd->xd_slot), uint32_t, xep->xep_num + 1);
732 
733 	return (USB_SUCCESS);
734 }
735 
736 static void
737 xhci_endpoint_tick(void *arg)
738 {
739 	int ret;
740 	xhci_transfer_t *xt;
741 	xhci_endpoint_t *xep = arg;
742 	xhci_device_t *xd = xep->xep_xd;
743 	xhci_t *xhcip = xep->xep_xhci;
744 
745 	mutex_enter(&xhcip->xhci_lock);
746 
747 	/*
748 	 * If we have the teardown flag set, then this is going away, don't try
749 	 * to do anything. Also, if somehow a periodic endpoint has something
750 	 * scheduled, just quit now and don't bother.
751 	 */
752 	if (xep->xep_state & (XHCI_ENDPOINT_TEARDOWN |
753 	    XHCI_ENDPOINT_PERIODIC)) {
754 		xep->xep_timeout = 0;
755 		mutex_exit(&xhcip->xhci_lock);
756 		return;
757 	}
758 
759 	/*
760 	 * If something else has already kicked off, something potentially
761 	 * dangerous, just don't bother waiting for it and reschedule.
762 	 */
763 	if (xep->xep_state & XHCI_ENDPOINT_DONT_SCHEDULE) {
764 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
765 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
766 		mutex_exit(&xhcip->xhci_lock);
767 		return;
768 	}
769 
770 	/*
771 	 * At this point, we have an endpoint that we need to consider. See if
772 	 * there are any transfers on it, if none, we're done. If so, check if
773 	 * we have exceeded the timeout. If we have, then we have some work to
774 	 * do.
775 	 */
776 	xt = list_head(&xep->xep_transfers);
777 	if (xt == NULL) {
778 		xep->xep_timeout = 0;
779 		mutex_exit(&xhcip->xhci_lock);
780 		return;
781 	}
782 
783 	if (xt->xt_timeout > 0) {
784 		xt->xt_timeout--;
785 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
786 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
787 		mutex_exit(&xhcip->xhci_lock);
788 		return;
789 	}
790 
791 	/*
792 	 * This item has timed out. We need to stop the ring and take action.
793 	 */
794 	xep->xep_state |= XHCI_ENDPOINT_TIMED_OUT | XHCI_ENDPOINT_QUIESCE;
795 	ret = xhci_endpoint_quiesce(xhcip, xd, xep);
796 	if (ret != USB_SUCCESS) {
797 		/*
798 		 * If we fail to quiesce during the timeout, then remove the
799 		 * state flags and hopefully we'll be able to the next time
800 		 * around or if a reset or polling stop comes in, maybe it can
801 		 * deal with it.
802 		 */
803 		xep->xep_state &= ~(XHCI_ENDPOINT_QUIESCE |
804 		    XHCI_ENDPOINT_TIMED_OUT);
805 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
806 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
807 		mutex_exit(&xhcip->xhci_lock);
808 		cv_broadcast(&xep->xep_state_cv);
809 		xhci_error(xhcip, "failed to successfully quiesce timed out "
810 		    "endpoint %u of device on slot %d and port %d: device "
811 		    "remains timed out", xep->xep_num, xd->xd_slot,
812 		    xd->xd_port);
813 		return;
814 	}
815 
816 	xhci_ring_skip_transfer(&xep->xep_ring, xt);
817 	(void) list_remove_head(&xep->xep_transfers);
818 	mutex_exit(&xhcip->xhci_lock);
819 
820 	/*
821 	 * At this point, we try and set the ring's dequeue pointer. If this
822 	 * fails, we're left in an awkward state. We've already adjusted the
823 	 * ring and removed the transfer. All we can really do is go through and
824 	 * return the transfer and hope that they perhaps attempt to reset the
825 	 * ring and that will succeed at this point. Based on everything we've
826 	 * done to set things up, it'd be odd if this did fail.
827 	 */
828 	ret = xhci_command_set_tr_dequeue(xhcip, xd, xep);
829 	mutex_enter(&xhcip->xhci_lock);
830 	xep->xep_state &= ~XHCI_ENDPOINT_QUIESCE;
831 	if (ret == USB_SUCCESS) {
832 		xep->xep_state &= ~XHCI_ENDPOINT_TIMED_OUT;
833 	} else {
834 		xhci_error(xhcip, "failed to successfully set transfer ring "
835 		    "dequeue pointer of timed out endpoint %u of "
836 		    "device on slot %d and port %d: device remains timed out, "
837 		    "please use cfgadm to recover", xep->xep_num, xd->xd_slot,
838 		    xd->xd_port);
839 	}
840 	xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
841 	    drv_usectohz(XHCI_TICK_TIMEOUT_US));
842 	mutex_exit(&xhcip->xhci_lock);
843 	cv_broadcast(&xep->xep_state_cv);
844 
845 	/*
846 	 * Because we never time out periodic related activity, we will always
847 	 * have the request on the transfer.
848 	 */
849 	ASSERT(xt->xt_usba_req != NULL);
850 	usba_hcdi_cb(xep->xep_pipe, xt->xt_usba_req, USB_CR_TIMEOUT);
851 	xhci_transfer_free(xhcip, xt);
852 }
853 
854 /*
855  * We've been asked to schedule a series of frames onto the specified endpoint.
856  * We need to make sure that there is enough room, at which point we can queue
857  * it and then ring the door bell. Note that we queue in reverse order to make
858  * sure that if the ring moves on, it won't see the correct cycle bit.
859  */
860 int
861 xhci_endpoint_schedule(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep,
862     xhci_transfer_t *xt, boolean_t ring)
863 {
864 	int i;
865 	xhci_ring_t *rp = &xep->xep_ring;
866 
867 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
868 	ASSERT(xt->xt_ntrbs > 0);
869 	ASSERT(xt->xt_trbs != NULL);
870 
871 	if ((xep->xep_state & XHCI_ENDPOINT_DONT_SCHEDULE) != 0)
872 		return (USB_FAILURE);
873 
874 	if (xhci_ring_trb_space(rp, xt->xt_ntrbs) == B_FALSE)
875 		return (USB_NO_RESOURCES);
876 
877 	for (i = xt->xt_ntrbs - 1; i > 0; i--) {
878 		xhci_ring_trb_fill(rp, i, &xt->xt_trbs[i], B_TRUE);
879 	}
880 	xhci_ring_trb_fill(rp, 0U, &xt->xt_trbs[0], B_FALSE);
881 
882 	XHCI_DMA_SYNC(rp->xr_dma, DDI_DMA_SYNC_FORDEV);
883 	xhci_ring_trb_produce(rp, xt->xt_ntrbs);
884 	list_insert_tail(&xep->xep_transfers, xt);
885 
886 	XHCI_DMA_SYNC(rp->xr_dma, DDI_DMA_SYNC_FORDEV);
887 	if (xhci_check_dma_handle(xhcip, &rp->xr_dma) != DDI_FM_OK) {
888 		xhci_error(xhcip, "failed to write out TRB for device on slot "
889 		    "%d, port %d, and endpoint %u: encountered fatal FM error "
890 		    "synchronizing ring DMA memory", xd->xd_slot, xd->xd_port,
891 		    xep->xep_num);
892 		xhci_fm_runtime_reset(xhcip);
893 		return (USB_HC_HARDWARE_ERROR);
894 	}
895 
896 	if (xep->xep_timeout == 0 &&
897 	    !(xep->xep_state & XHCI_ENDPOINT_PERIODIC)) {
898 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
899 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
900 	}
901 
902 	xt->xt_sched_time = gethrtime();
903 
904 	if (ring == B_FALSE)
905 		return (USB_SUCCESS);
906 
907 	return (xhci_endpoint_ring(xhcip, xd, xep));
908 }
909 
910 static xhci_transfer_t *
911 xhci_endpoint_determine_transfer(xhci_t *xhcip, xhci_endpoint_t *xep,
912     xhci_trb_t *trb, int *offp)
913 {
914 	xhci_transfer_t *xt;
915 
916 	ASSERT(xhcip != NULL);
917 	ASSERT(offp != NULL);
918 	ASSERT(xep != NULL);
919 	ASSERT(trb != NULL);
920 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
921 
922 	if ((xt = list_head(&xep->xep_transfers)) == NULL)
923 		return (NULL);
924 
925 	*offp = xhci_ring_trb_valid_range(&xep->xep_ring, LE_64(trb->trb_addr),
926 	    xt->xt_ntrbs);
927 	if (*offp == -1)
928 		return (NULL);
929 	return (xt);
930 }
931 
932 static void
933 xhci_endpoint_reschedule_periodic(xhci_t *xhcip, xhci_device_t *xd,
934     xhci_endpoint_t *xep, xhci_transfer_t *xt)
935 {
936 	int ret;
937 	xhci_pipe_t *xp = (xhci_pipe_t *)xep->xep_pipe->p_hcd_private;
938 	xhci_periodic_pipe_t *xpp = &xp->xp_periodic;
939 
940 	ASSERT3U(xpp->xpp_tsize, >, 0);
941 
942 	xt->xt_short = 0;
943 	xt->xt_cr = USB_CR_OK;
944 
945 	mutex_enter(&xhcip->xhci_lock);
946 
947 	/*
948 	 * If we don't have an active poll, then we shouldn't bother trying to
949 	 * reschedule it. This means that we're trying to stop or we ran out of
950 	 * memory.
951 	 */
952 	if (xpp->xpp_poll_state != XHCI_PERIODIC_POLL_ACTIVE) {
953 		mutex_exit(&xhcip->xhci_lock);
954 		return;
955 	}
956 
957 	if (xep->xep_type == USB_EP_ATTR_ISOCH) {
958 		int i;
959 		for (i = 0; i < xt->xt_ntrbs; i++) {
960 			xt->xt_isoc[i].isoc_pkt_actual_length =
961 			    xt->xt_isoc[i].isoc_pkt_length;
962 			xt->xt_isoc[i].isoc_pkt_status = USB_CR_OK;
963 		}
964 	}
965 
966 	/*
967 	 * In general, there should always be space on the ring for this. The
968 	 * only reason that rescheduling an existing transfer for a periodic
969 	 * endpoint wouldn't work is because we have a hardware error, at which
970 	 * point we're going to be going down hard anyways. We log and bump a
971 	 * stat here to make this case discoverable in case our assumptions our
972 	 * wrong.
973 	 */
974 	ret = xhci_endpoint_schedule(xhcip, xd, xep, xt, B_TRUE);
975 	if (ret != 0) {
976 		xhci_log(xhcip, "!failed to reschedule periodic endpoint %u "
977 		    "(type %u) on slot %d: %d\n", xep->xep_num, xep->xep_type,
978 		    xd->xd_slot, ret);
979 	}
980 	mutex_exit(&xhcip->xhci_lock);
981 }
982 
983 /*
984  * We're dealing with a message on a control endpoint. This may be a default
985  * endpoint or otherwise. These usually come in groups of 3+ TRBs where you have
986  * a setup stage, data stage (which may have one or more other TRBs) and then a
987  * final status stage.
988  *
989  * We generally set ourselves up such that we get interrupted and notified only
990  * on the status stage and for short transfers in the data stage. If we
991  * encounter a short transfer in the data stage, then we need to go through and
992  * check whether or not the short transfer is allowed. If it is, then there's
993  * nothing to do. We'll update everything and call back the framework once we
994  * get the status stage.
995  */
996 static boolean_t
997 xhci_endpoint_control_callback(xhci_t *xhcip, xhci_device_t *xd,
998     xhci_endpoint_t *xep, xhci_transfer_t *xt, int off, xhci_trb_t *trb)
999 {
1000 	int code;
1001 	usb_ctrl_req_t *ucrp;
1002 	xhci_transfer_t *rem;
1003 
1004 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1005 
1006 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1007 	ucrp = (usb_ctrl_req_t *)xt->xt_usba_req;
1008 
1009 	/*
1010 	 * Now that we know what this TRB is for, was it for a data/normal stage
1011 	 * or is it the status stage. We cheat by looking at the last entry. If
1012 	 * it's a data stage, then we must have gotten a short write. In that
1013 	 * case, we should go through and check to make sure it's allowed. If
1014 	 * not, we need to fail the transfer, try to stop the ring, and make
1015 	 * callbacks. We'll clean up the xhci transfer at this time.
1016 	 */
1017 	if (off != xt->xt_ntrbs - 1) {
1018 		uint_t remain;
1019 		usb_ctrl_req_t *ucrp = (usb_ctrl_req_t *)xt->xt_usba_req;
1020 
1021 		/*
1022 		 * This is a data stage TRB. The only reason we should have
1023 		 * gotten something for this is beacuse it was short. Make sure
1024 		 * it's okay before we continue.
1025 		 */
1026 		VERIFY3S(code, ==, XHCI_CODE_SHORT_XFER);
1027 		if (!(ucrp->ctrl_attributes & USB_ATTRS_SHORT_XFER_OK)) {
1028 			xt->xt_cr = USB_CR_DATA_UNDERRUN;
1029 			mutex_exit(&xhcip->xhci_lock);
1030 			return (B_TRUE);
1031 		}
1032 
1033 		/*
1034 		 * The value in the resulting trb is how much data remained to
1035 		 * be transferred. Normalize that against the original buffer
1036 		 * size.
1037 		 */
1038 		remain = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1039 		xt->xt_short = xt->xt_buffer.xdb_len - remain;
1040 		mutex_exit(&xhcip->xhci_lock);
1041 		return (B_TRUE);
1042 	}
1043 
1044 	/*
1045 	 * Okay, this is a status stage trb that's in good health. We should
1046 	 * finally go ahead, sync data and try and finally do the callback. If
1047 	 * we have short data, then xt->xt_short will be non-zero.
1048 	 */
1049 	if (xt->xt_data_tohost == B_TRUE) {
1050 		size_t len;
1051 		if (xt->xt_short != 0) {
1052 			len = xt->xt_short;
1053 		} else {
1054 			len = xt->xt_buffer.xdb_len;
1055 		}
1056 
1057 		if (xhci_transfer_sync(xhcip, xt, DDI_DMA_SYNC_FORCPU) !=
1058 		    DDI_FM_OK) {
1059 			xhci_error(xhcip, "failed to process control transfer "
1060 			    "callback for endpoint %u of device on slot %d and "
1061 			    "port %d: encountered fatal FM error synchronizing "
1062 			    "DMA memory, resetting device", xep->xep_num,
1063 			    xd->xd_slot, xd->xd_port);
1064 			xhci_fm_runtime_reset(xhcip);
1065 			mutex_exit(&xhcip->xhci_lock);
1066 			return (B_FALSE);
1067 		}
1068 
1069 		xhci_transfer_copy(xt, ucrp->ctrl_data->b_rptr, len, B_TRUE);
1070 		ucrp->ctrl_data->b_wptr += len;
1071 	}
1072 
1073 	/*
1074 	 * Now we're done. We can go ahead and bump the ring. Free the transfer
1075 	 * outside of the lock and call back into the framework.
1076 	 */
1077 	VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, LE_64(trb->trb_addr)));
1078 	rem = list_remove_head(&xep->xep_transfers);
1079 	VERIFY3P(rem, ==, xt);
1080 	mutex_exit(&xhcip->xhci_lock);
1081 
1082 	usba_hcdi_cb(xep->xep_pipe, (usb_opaque_t)ucrp, xt->xt_cr);
1083 	xhci_transfer_free(xhcip, xt);
1084 
1085 	return (B_TRUE);
1086 }
1087 
1088 /*
1089  * Cons up a new usb request for the periodic data transfer if we can. If there
1090  * isn't one available, change the return code to NO_RESOURCES and stop polling
1091  * on this endpoint, thus using and consuming the original request.
1092  */
1093 static usb_opaque_t
1094 xhci_endpoint_dup_periodic(xhci_endpoint_t *xep, xhci_transfer_t *xt,
1095     usb_cr_t *cr)
1096 {
1097 	usb_opaque_t urp;
1098 
1099 	xhci_pipe_t *xp = (xhci_pipe_t *)xep->xep_pipe->p_hcd_private;
1100 	xhci_periodic_pipe_t *xpp = &xp->xp_periodic;
1101 
1102 	/*
1103 	 * In general, transfers shouldn't have a usb request. However, oneshot
1104 	 * Interrupt IN ones will, so we use this as a way to shortcut out of
1105 	 * here.
1106 	 */
1107 	if (xt->xt_usba_req != NULL)
1108 		return (xt->xt_usba_req);
1109 
1110 	if (xep->xep_type == USB_EP_ATTR_INTR) {
1111 		urp = (usb_opaque_t)usba_hcdi_dup_intr_req(xep->xep_pipe->p_dip,
1112 		    (usb_intr_req_t *)xpp->xpp_usb_req, xpp->xpp_tsize, 0);
1113 	} else {
1114 		urp = (usb_opaque_t)usba_hcdi_dup_isoc_req(xep->xep_pipe->p_dip,
1115 		    (usb_isoc_req_t *)xpp->xpp_usb_req, 0);
1116 	}
1117 	if (urp == NULL) {
1118 		xpp->xpp_poll_state = XHCI_PERIODIC_POLL_NOMEM;
1119 		urp = xpp->xpp_usb_req;
1120 		xpp->xpp_usb_req = NULL;
1121 		*cr = USB_CR_NO_RESOURCES;
1122 	} else {
1123 		mutex_enter(&xep->xep_pipe->p_mutex);
1124 		xep->xep_pipe->p_req_count++;
1125 		mutex_exit(&xep->xep_pipe->p_mutex);
1126 	}
1127 
1128 	return (urp);
1129 }
1130 
1131 static xhci_device_t *
1132 xhci_device_lookup_by_slot(xhci_t *xhcip, int slot)
1133 {
1134 	xhci_device_t *xd;
1135 
1136 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1137 
1138 	for (xd = list_head(&xhcip->xhci_usba.xa_devices); xd != NULL;
1139 	    xd = list_next(&xhcip->xhci_usba.xa_devices, xd)) {
1140 		if (xd->xd_slot == slot)
1141 			return (xd);
1142 	}
1143 
1144 	return (NULL);
1145 }
1146 
1147 /*
1148  * Handle things which consist solely of normal tranfers, in other words, bulk
1149  * and interrupt transfers.
1150  */
1151 static boolean_t
1152 xhci_endpoint_norm_callback(xhci_t *xhcip, xhci_device_t *xd,
1153     xhci_endpoint_t *xep, xhci_transfer_t *xt, int off, xhci_trb_t *trb)
1154 {
1155 	int code;
1156 	usb_cr_t cr;
1157 	xhci_transfer_t *rem;
1158 	int attrs;
1159 	mblk_t *mp;
1160 	boolean_t periodic = B_FALSE;
1161 	usb_opaque_t urp;
1162 
1163 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1164 	ASSERT(xep->xep_type == USB_EP_ATTR_BULK ||
1165 	    xep->xep_type == USB_EP_ATTR_INTR);
1166 
1167 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1168 
1169 	if (code == XHCI_CODE_SHORT_XFER) {
1170 		int residue;
1171 		residue = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1172 		xt->xt_short = xt->xt_buffer.xdb_len - residue;
1173 	}
1174 
1175 	/*
1176 	 * If we have an interrupt from something that's not the last entry,
1177 	 * that must mean we had a short transfer, so there's nothing more for
1178 	 * us to do at the moment. We won't call back until everything's
1179 	 * finished for the general transfer.
1180 	 */
1181 	if (off < xt->xt_ntrbs - 1) {
1182 		mutex_exit(&xhcip->xhci_lock);
1183 		return (B_TRUE);
1184 	}
1185 
1186 	urp = xt->xt_usba_req;
1187 	if (xep->xep_type == USB_EP_ATTR_BULK) {
1188 		usb_bulk_req_t *ubrp = (usb_bulk_req_t *)xt->xt_usba_req;
1189 		attrs = ubrp->bulk_attributes;
1190 		mp = ubrp->bulk_data;
1191 	} else {
1192 		usb_intr_req_t *uirp = (usb_intr_req_t *)xt->xt_usba_req;
1193 
1194 		if (uirp == NULL) {
1195 			periodic = B_TRUE;
1196 			urp = xhci_endpoint_dup_periodic(xep, xt, &cr);
1197 			uirp = (usb_intr_req_t *)urp;
1198 
1199 			/*
1200 			 * If we weren't able to duplicate the interrupt, then
1201 			 * we can't put any data in it.
1202 			 */
1203 			if (cr == USB_CR_NO_RESOURCES)
1204 				goto out;
1205 		}
1206 
1207 		attrs = uirp->intr_attributes;
1208 		mp = uirp->intr_data;
1209 	}
1210 
1211 	if (xt->xt_data_tohost == B_TRUE) {
1212 		size_t len;
1213 		if (xt->xt_short != 0) {
1214 			if (!(attrs & USB_ATTRS_SHORT_XFER_OK)) {
1215 				cr = USB_CR_DATA_UNDERRUN;
1216 				goto out;
1217 			}
1218 			len = xt->xt_short;
1219 		} else {
1220 			len = xt->xt_buffer.xdb_len;
1221 		}
1222 
1223 		if (xhci_transfer_sync(xhcip, xt, DDI_DMA_SYNC_FORCPU) !=
1224 		    DDI_FM_OK) {
1225 			xhci_error(xhcip, "failed to process normal transfer "
1226 			    "callback for endpoint %u of device on slot %d and "
1227 			    "port %d: encountered fatal FM error synchronizing "
1228 			    "DMA memory, resetting device", xep->xep_num,
1229 			    xd->xd_slot, xd->xd_port);
1230 			xhci_fm_runtime_reset(xhcip);
1231 			mutex_exit(&xhcip->xhci_lock);
1232 			return (B_FALSE);
1233 		}
1234 
1235 		xhci_transfer_copy(xt, mp->b_rptr, len, B_TRUE);
1236 		mp->b_wptr += len;
1237 	}
1238 	cr = USB_CR_OK;
1239 
1240 out:
1241 	VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, LE_64(trb->trb_addr)));
1242 	rem = list_remove_head(&xep->xep_transfers);
1243 	VERIFY3P(rem, ==, xt);
1244 	mutex_exit(&xhcip->xhci_lock);
1245 
1246 	usba_hcdi_cb(xep->xep_pipe, urp, cr);
1247 	if (periodic == B_TRUE) {
1248 		xhci_endpoint_reschedule_periodic(xhcip, xd, xep, xt);
1249 	} else {
1250 		xhci_transfer_free(xhcip, xt);
1251 	}
1252 
1253 	return (B_TRUE);
1254 }
1255 
1256 static boolean_t
1257 xhci_endpoint_isoch_callback(xhci_t *xhcip, xhci_device_t *xd,
1258     xhci_endpoint_t *xep, xhci_transfer_t *xt, int off, xhci_trb_t *trb)
1259 {
1260 	int code;
1261 	usb_cr_t cr;
1262 	xhci_transfer_t *rem;
1263 	usb_isoc_pkt_descr_t *desc;
1264 	usb_isoc_req_t *usrp;
1265 
1266 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1267 	ASSERT3S(xep->xep_type, ==, USB_EP_ATTR_ISOCH);
1268 
1269 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1270 
1271 	/*
1272 	 * The descriptors that we copy the data from are set up to assume that
1273 	 * everything was OK and we transferred all the requested data.
1274 	 */
1275 	desc = &xt->xt_isoc[off];
1276 	if (code == XHCI_CODE_SHORT_XFER) {
1277 		int residue = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1278 		desc->isoc_pkt_actual_length -= residue;
1279 	}
1280 
1281 	/*
1282 	 * We don't perform the callback until the very last TRB is returned
1283 	 * here. If we have a TRB report on something else, that means that we
1284 	 * had a short transfer.
1285 	 */
1286 	if (off < xt->xt_ntrbs - 1) {
1287 		mutex_exit(&xhcip->xhci_lock);
1288 		return (B_TRUE);
1289 	}
1290 
1291 	VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, LE_64(trb->trb_addr)));
1292 	rem = list_remove_head(&xep->xep_transfers);
1293 	VERIFY3P(rem, ==, xt);
1294 	mutex_exit(&xhcip->xhci_lock);
1295 
1296 	cr = USB_CR_OK;
1297 
1298 	if (xt->xt_data_tohost == B_TRUE) {
1299 		usb_opaque_t urp;
1300 		urp = xhci_endpoint_dup_periodic(xep, xt, &cr);
1301 		usrp = (usb_isoc_req_t *)urp;
1302 
1303 		if (cr == USB_CR_OK) {
1304 			mblk_t *mp;
1305 			size_t len;
1306 			if (xhci_transfer_sync(xhcip, xt,
1307 			    DDI_DMA_SYNC_FORCPU) != DDI_FM_OK) {
1308 				xhci_error(xhcip, "failed to process "
1309 				    "isochronous transfer callback for "
1310 				    "endpoint %u of device on slot %d and port "
1311 				    "%d: encountered fatal FM error "
1312 				    "synchronizing DMA memory, resetting "
1313 				    "device",
1314 				    xep->xep_num, xd->xd_slot, xd->xd_port);
1315 				xhci_fm_runtime_reset(xhcip);
1316 				mutex_exit(&xhcip->xhci_lock);
1317 				return (B_FALSE);
1318 			}
1319 
1320 			mp = usrp->isoc_data;
1321 			len = xt->xt_buffer.xdb_len;
1322 			xhci_transfer_copy(xt, mp->b_rptr, len, B_TRUE);
1323 			mp->b_wptr += len;
1324 		}
1325 	} else {
1326 		usrp = (usb_isoc_req_t *)xt->xt_usba_req;
1327 	}
1328 
1329 	if (cr == USB_CR_OK) {
1330 		bcopy(xt->xt_isoc, usrp->isoc_pkt_descr,
1331 		    sizeof (usb_isoc_pkt_descr_t) * usrp->isoc_pkts_count);
1332 	}
1333 
1334 	usba_hcdi_cb(xep->xep_pipe, (usb_opaque_t)usrp, cr);
1335 	if (xt->xt_data_tohost == B_TRUE) {
1336 		xhci_endpoint_reschedule_periodic(xhcip, xd, xep, xt);
1337 	} else {
1338 		xhci_transfer_free(xhcip, xt);
1339 	}
1340 
1341 	return (B_TRUE);
1342 }
1343 
1344 boolean_t
1345 xhci_endpoint_transfer_callback(xhci_t *xhcip, xhci_trb_t *trb)
1346 {
1347 	boolean_t ret;
1348 	int slot, endpoint, code, off;
1349 	xhci_device_t *xd;
1350 	xhci_endpoint_t *xep;
1351 	xhci_transfer_t *xt;
1352 	boolean_t transfer_done;
1353 
1354 	endpoint = XHCI_TRB_GET_EP(LE_32(trb->trb_flags));
1355 	slot = XHCI_TRB_GET_SLOT(LE_32(trb->trb_flags));
1356 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1357 
1358 	mutex_enter(&xhcip->xhci_lock);
1359 	xd = xhci_device_lookup_by_slot(xhcip, slot);
1360 	if (xd == NULL) {
1361 		xhci_error(xhcip, "received transfer trb with code %d for "
1362 		    "unknown slot %d and endpoint %d: resetting device", code,
1363 		    slot, endpoint);
1364 		mutex_exit(&xhcip->xhci_lock);
1365 		xhci_fm_runtime_reset(xhcip);
1366 		return (B_FALSE);
1367 	}
1368 
1369 	/*
1370 	 * Endpoint IDs are indexed based on their Device Context Index, which
1371 	 * means that we need to subtract one to get the actual ID that we use.
1372 	 */
1373 	xep = xd->xd_endpoints[endpoint - 1];
1374 	if (xep == NULL) {
1375 		xhci_error(xhcip, "received transfer trb with code %d, slot "
1376 		    "%d, and unknown endpoint %d: resetting device", code,
1377 		    slot, endpoint);
1378 		mutex_exit(&xhcip->xhci_lock);
1379 		xhci_fm_runtime_reset(xhcip);
1380 		return (B_FALSE);
1381 	}
1382 
1383 	/*
1384 	 * This TRB should be part of a transfer. If it's not, then we ignore
1385 	 * it. We also check whether or not it's for the first transfer. Because
1386 	 * the rings are serviced in order, it should be.
1387 	 */
1388 	if ((xt = xhci_endpoint_determine_transfer(xhcip, xep, trb, &off)) ==
1389 	    NULL) {
1390 		mutex_exit(&xhcip->xhci_lock);
1391 		return (B_TRUE);
1392 	}
1393 
1394 	transfer_done = B_FALSE;
1395 
1396 	switch (code) {
1397 	case XHCI_CODE_SUCCESS:
1398 	case XHCI_CODE_SHORT_XFER:
1399 		/* Handled by endpoint logic */
1400 		break;
1401 	case XHCI_CODE_XFER_STOPPED:
1402 	case XHCI_CODE_XFER_STOPINV:
1403 	case XHCI_CODE_XFER_STOPSHORT:
1404 		/*
1405 		 * This causes us to transition the endpoint to a stopped state.
1406 		 * Each of these indicate a different possible state that we
1407 		 * have to deal with. Effectively we're going to drop it and
1408 		 * leave it up to the consumers to figure out what to do. For
1409 		 * the moment, that's generally okay because stops are only used
1410 		 * in cases where we're cleaning up outstanding reqs, etc.
1411 		 */
1412 		mutex_exit(&xhcip->xhci_lock);
1413 		return (B_TRUE);
1414 	case XHCI_CODE_STALL:
1415 		/*
1416 		 * This causes us to transition to the halted state;
1417 		 * however, downstream clients are able to handle this just
1418 		 * fine.
1419 		 */
1420 		xep->xep_state |= XHCI_ENDPOINT_HALTED;
1421 		xt->xt_cr = USB_CR_STALL;
1422 		transfer_done = B_TRUE;
1423 		break;
1424 	case XHCI_CODE_BABBLE:
1425 		transfer_done = B_TRUE;
1426 		xt->xt_cr = USB_CR_DATA_OVERRUN;
1427 		xep->xep_state |= XHCI_ENDPOINT_HALTED;
1428 		break;
1429 	case XHCI_CODE_TXERR:
1430 	case XHCI_CODE_SPLITERR:
1431 		transfer_done = B_TRUE;
1432 		xt->xt_cr = USB_CR_DEV_NOT_RESP;
1433 		xep->xep_state |= XHCI_ENDPOINT_HALTED;
1434 		break;
1435 	default:
1436 		/*
1437 		 * Treat these as general unspecified errors that don't cause a
1438 		 * stop of the ring. Even if it does, a subsequent timeout
1439 		 * should occur which causes us to end up dropping a pipe reset
1440 		 * or at least issuing a reset of the device as part of
1441 		 * quiescing.
1442 		 */
1443 		transfer_done = B_TRUE;
1444 		break;
1445 	}
1446 
1447 	if (transfer_done == B_TRUE) {
1448 		xhci_transfer_t *alt;
1449 
1450 		alt = list_remove_head(&xep->xep_transfers);
1451 		VERIFY3P(alt, ==, xt);
1452 		mutex_exit(&xhcip->xhci_lock);
1453 		if (xt->xt_usba_req == NULL) {
1454 			usb_opaque_t urp;
1455 
1456 			urp = xhci_endpoint_dup_periodic(xep, xt, &xt->xt_cr);
1457 			usba_hcdi_cb(xep->xep_pipe, urp, xt->xt_cr);
1458 		} else {
1459 			usba_hcdi_cb(xep->xep_pipe,
1460 			    (usb_opaque_t)xt->xt_usba_req, xt->xt_cr);
1461 			xhci_transfer_free(xhcip, xt);
1462 		}
1463 		return (B_TRUE);
1464 	}
1465 
1466 	/*
1467 	 * Process the transfer callback based on the type of endpoint. Each of
1468 	 * these callback functions will end up calling back into USBA via
1469 	 * usba_hcdi_cb() to return transfer information (whether successful or
1470 	 * not). Because we can't hold any locks across a call to that function,
1471 	 * all of these callbacks will drop the xhci_t`xhci_lock by the time
1472 	 * they return. This is why there's no mutex_exit() call before we
1473 	 * return.
1474 	 */
1475 	switch (xep->xep_type) {
1476 	case USB_EP_ATTR_CONTROL:
1477 		ret = xhci_endpoint_control_callback(xhcip, xd, xep, xt, off,
1478 		    trb);
1479 		break;
1480 	case USB_EP_ATTR_BULK:
1481 		ret = xhci_endpoint_norm_callback(xhcip, xd, xep, xt, off, trb);
1482 		break;
1483 	case USB_EP_ATTR_INTR:
1484 		ret = xhci_endpoint_norm_callback(xhcip, xd, xep, xt, off,
1485 		    trb);
1486 		break;
1487 	case USB_EP_ATTR_ISOCH:
1488 		ret = xhci_endpoint_isoch_callback(xhcip, xd, xep, xt, off,
1489 		    trb);
1490 		break;
1491 	default:
1492 		panic("bad endpoint type: %u", xep->xep_type);
1493 	}
1494 
1495 	return (ret);
1496 }
1497