1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xHCI host controller driver 4 * 5 * Copyright (C) 2008 Intel Corp. 6 * 7 * Author: Sarah Sharp 8 * Some code borrowed from the Linux EHCI driver. 9 */ 10 11 #include <linux/usb.h> 12 #include <linux/pci.h> 13 #include <linux/slab.h> 14 #include <linux/dmapool.h> 15 #include <linux/dma-mapping.h> 16 17 #include "xhci.h" 18 #include "xhci-trace.h" 19 #include "xhci-debugfs.h" 20 21 /* 22 * Allocates a generic ring segment from the ring pool, sets the dma address, 23 * initializes the segment to zero, and sets the private next pointer to NULL. 24 * 25 * Section 4.11.1.1: 26 * "All components of all Command and Transfer TRBs shall be initialized to '0'" 27 */ 28 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, 29 unsigned int cycle_state, 30 unsigned int max_packet, 31 gfp_t flags) 32 { 33 struct xhci_segment *seg; 34 dma_addr_t dma; 35 int i; 36 37 seg = kzalloc(sizeof *seg, flags); 38 if (!seg) 39 return NULL; 40 41 seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma); 42 if (!seg->trbs) { 43 kfree(seg); 44 return NULL; 45 } 46 47 if (max_packet) { 48 seg->bounce_buf = kzalloc(max_packet, flags); 49 if (!seg->bounce_buf) { 50 dma_pool_free(xhci->segment_pool, seg->trbs, dma); 51 kfree(seg); 52 return NULL; 53 } 54 } 55 /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ 56 if (cycle_state == 0) { 57 for (i = 0; i < TRBS_PER_SEGMENT; i++) 58 seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); 59 } 60 seg->dma = dma; 61 seg->next = NULL; 62 63 return seg; 64 } 65 66 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) 67 { 68 if (seg->trbs) { 69 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); 70 seg->trbs = NULL; 71 } 72 kfree(seg->bounce_buf); 73 kfree(seg); 74 } 75 76 static void xhci_free_segments_for_ring(struct xhci_hcd *xhci, 77 struct xhci_segment *first) 78 { 79 struct xhci_segment *seg; 80 81 seg = first->next; 82 while (seg != first) { 83 struct xhci_segment *next = seg->next; 84 xhci_segment_free(xhci, seg); 85 seg = next; 86 } 87 xhci_segment_free(xhci, first); 88 } 89 90 /* 91 * Make the prev segment point to the next segment. 92 * 93 * Change the last TRB in the prev segment to be a Link TRB which points to the 94 * DMA address of the next segment. The caller needs to set any Link TRB 95 * related flags, such as End TRB, Toggle Cycle, and no snoop. 96 */ 97 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, 98 struct xhci_segment *next, enum xhci_ring_type type) 99 { 100 u32 val; 101 102 if (!prev || !next) 103 return; 104 prev->next = next; 105 if (type != TYPE_EVENT) { 106 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = 107 cpu_to_le64(next->dma); 108 109 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ 110 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); 111 val &= ~TRB_TYPE_BITMASK; 112 val |= TRB_TYPE(TRB_LINK); 113 /* Always set the chain bit with 0.95 hardware */ 114 /* Set chain bit for isoc rings on AMD 0.96 host */ 115 if (xhci_link_trb_quirk(xhci) || 116 (type == TYPE_ISOC && 117 (xhci->quirks & XHCI_AMD_0x96_HOST))) 118 val |= TRB_CHAIN; 119 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); 120 } 121 } 122 123 /* 124 * Link the ring to the new segments. 125 * Set Toggle Cycle for the new ring if needed. 126 */ 127 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, 128 struct xhci_segment *first, struct xhci_segment *last, 129 unsigned int num_segs) 130 { 131 struct xhci_segment *next; 132 133 if (!ring || !first || !last) 134 return; 135 136 next = ring->enq_seg->next; 137 xhci_link_segments(xhci, ring->enq_seg, first, ring->type); 138 xhci_link_segments(xhci, last, next, ring->type); 139 ring->num_segs += num_segs; 140 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs; 141 142 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) { 143 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control 144 &= ~cpu_to_le32(LINK_TOGGLE); 145 last->trbs[TRBS_PER_SEGMENT-1].link.control 146 |= cpu_to_le32(LINK_TOGGLE); 147 ring->last_seg = last; 148 } 149 } 150 151 /* 152 * We need a radix tree for mapping physical addresses of TRBs to which stream 153 * ID they belong to. We need to do this because the host controller won't tell 154 * us which stream ring the TRB came from. We could store the stream ID in an 155 * event data TRB, but that doesn't help us for the cancellation case, since the 156 * endpoint may stop before it reaches that event data TRB. 157 * 158 * The radix tree maps the upper portion of the TRB DMA address to a ring 159 * segment that has the same upper portion of DMA addresses. For example, say I 160 * have segments of size 1KB, that are always 1KB aligned. A segment may 161 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the 162 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to 163 * pass the radix tree a key to get the right stream ID: 164 * 165 * 0x10c90fff >> 10 = 0x43243 166 * 0x10c912c0 >> 10 = 0x43244 167 * 0x10c91400 >> 10 = 0x43245 168 * 169 * Obviously, only those TRBs with DMA addresses that are within the segment 170 * will make the radix tree return the stream ID for that ring. 171 * 172 * Caveats for the radix tree: 173 * 174 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an 175 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be 176 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the 177 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit 178 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit 179 * extended systems (where the DMA address can be bigger than 32-bits), 180 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that. 181 */ 182 static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map, 183 struct xhci_ring *ring, 184 struct xhci_segment *seg, 185 gfp_t mem_flags) 186 { 187 unsigned long key; 188 int ret; 189 190 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); 191 /* Skip any segments that were already added. */ 192 if (radix_tree_lookup(trb_address_map, key)) 193 return 0; 194 195 ret = radix_tree_maybe_preload(mem_flags); 196 if (ret) 197 return ret; 198 ret = radix_tree_insert(trb_address_map, 199 key, ring); 200 radix_tree_preload_end(); 201 return ret; 202 } 203 204 static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map, 205 struct xhci_segment *seg) 206 { 207 unsigned long key; 208 209 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); 210 if (radix_tree_lookup(trb_address_map, key)) 211 radix_tree_delete(trb_address_map, key); 212 } 213 214 static int xhci_update_stream_segment_mapping( 215 struct radix_tree_root *trb_address_map, 216 struct xhci_ring *ring, 217 struct xhci_segment *first_seg, 218 struct xhci_segment *last_seg, 219 gfp_t mem_flags) 220 { 221 struct xhci_segment *seg; 222 struct xhci_segment *failed_seg; 223 int ret; 224 225 if (WARN_ON_ONCE(trb_address_map == NULL)) 226 return 0; 227 228 seg = first_seg; 229 do { 230 ret = xhci_insert_segment_mapping(trb_address_map, 231 ring, seg, mem_flags); 232 if (ret) 233 goto remove_streams; 234 if (seg == last_seg) 235 return 0; 236 seg = seg->next; 237 } while (seg != first_seg); 238 239 return 0; 240 241 remove_streams: 242 failed_seg = seg; 243 seg = first_seg; 244 do { 245 xhci_remove_segment_mapping(trb_address_map, seg); 246 if (seg == failed_seg) 247 return ret; 248 seg = seg->next; 249 } while (seg != first_seg); 250 251 return ret; 252 } 253 254 static void xhci_remove_stream_mapping(struct xhci_ring *ring) 255 { 256 struct xhci_segment *seg; 257 258 if (WARN_ON_ONCE(ring->trb_address_map == NULL)) 259 return; 260 261 seg = ring->first_seg; 262 do { 263 xhci_remove_segment_mapping(ring->trb_address_map, seg); 264 seg = seg->next; 265 } while (seg != ring->first_seg); 266 } 267 268 static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags) 269 { 270 return xhci_update_stream_segment_mapping(ring->trb_address_map, ring, 271 ring->first_seg, ring->last_seg, mem_flags); 272 } 273 274 /* XXX: Do we need the hcd structure in all these functions? */ 275 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) 276 { 277 if (!ring) 278 return; 279 280 trace_xhci_ring_free(ring); 281 282 if (ring->first_seg) { 283 if (ring->type == TYPE_STREAM) 284 xhci_remove_stream_mapping(ring); 285 xhci_free_segments_for_ring(xhci, ring->first_seg); 286 } 287 288 kfree(ring); 289 } 290 291 static void xhci_initialize_ring_info(struct xhci_ring *ring, 292 unsigned int cycle_state) 293 { 294 /* The ring is empty, so the enqueue pointer == dequeue pointer */ 295 ring->enqueue = ring->first_seg->trbs; 296 ring->enq_seg = ring->first_seg; 297 ring->dequeue = ring->enqueue; 298 ring->deq_seg = ring->first_seg; 299 /* The ring is initialized to 0. The producer must write 1 to the cycle 300 * bit to handover ownership of the TRB, so PCS = 1. The consumer must 301 * compare CCS to the cycle bit to check ownership, so CCS = 1. 302 * 303 * New rings are initialized with cycle state equal to 1; if we are 304 * handling ring expansion, set the cycle state equal to the old ring. 305 */ 306 ring->cycle_state = cycle_state; 307 308 /* 309 * Each segment has a link TRB, and leave an extra TRB for SW 310 * accounting purpose 311 */ 312 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; 313 } 314 315 /* Allocate segments and link them for a ring */ 316 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, 317 struct xhci_segment **first, struct xhci_segment **last, 318 unsigned int num_segs, unsigned int cycle_state, 319 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) 320 { 321 struct xhci_segment *prev; 322 323 prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); 324 if (!prev) 325 return -ENOMEM; 326 num_segs--; 327 328 *first = prev; 329 while (num_segs > 0) { 330 struct xhci_segment *next; 331 332 next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); 333 if (!next) { 334 prev = *first; 335 while (prev) { 336 next = prev->next; 337 xhci_segment_free(xhci, prev); 338 prev = next; 339 } 340 return -ENOMEM; 341 } 342 xhci_link_segments(xhci, prev, next, type); 343 344 prev = next; 345 num_segs--; 346 } 347 xhci_link_segments(xhci, prev, *first, type); 348 *last = prev; 349 350 return 0; 351 } 352 353 /** 354 * Create a new ring with zero or more segments. 355 * 356 * Link each segment together into a ring. 357 * Set the end flag and the cycle toggle bit on the last segment. 358 * See section 4.9.1 and figures 15 and 16. 359 */ 360 struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 361 unsigned int num_segs, unsigned int cycle_state, 362 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) 363 { 364 struct xhci_ring *ring; 365 int ret; 366 367 ring = kzalloc(sizeof *(ring), flags); 368 if (!ring) 369 return NULL; 370 371 ring->num_segs = num_segs; 372 ring->bounce_buf_len = max_packet; 373 INIT_LIST_HEAD(&ring->td_list); 374 ring->type = type; 375 if (num_segs == 0) 376 return ring; 377 378 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, 379 &ring->last_seg, num_segs, cycle_state, type, 380 max_packet, flags); 381 if (ret) 382 goto fail; 383 384 /* Only event ring does not use link TRB */ 385 if (type != TYPE_EVENT) { 386 /* See section 4.9.2.1 and 6.4.4.1 */ 387 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= 388 cpu_to_le32(LINK_TOGGLE); 389 } 390 xhci_initialize_ring_info(ring, cycle_state); 391 trace_xhci_ring_alloc(ring); 392 return ring; 393 394 fail: 395 kfree(ring); 396 return NULL; 397 } 398 399 void xhci_free_endpoint_ring(struct xhci_hcd *xhci, 400 struct xhci_virt_device *virt_dev, 401 unsigned int ep_index) 402 { 403 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); 404 virt_dev->eps[ep_index].ring = NULL; 405 } 406 407 /* 408 * Expand an existing ring. 409 * Allocate a new ring which has same segment numbers and link the two rings. 410 */ 411 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, 412 unsigned int num_trbs, gfp_t flags) 413 { 414 struct xhci_segment *first; 415 struct xhci_segment *last; 416 unsigned int num_segs; 417 unsigned int num_segs_needed; 418 int ret; 419 420 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) / 421 (TRBS_PER_SEGMENT - 1); 422 423 /* Allocate number of segments we needed, or double the ring size */ 424 num_segs = ring->num_segs > num_segs_needed ? 425 ring->num_segs : num_segs_needed; 426 427 ret = xhci_alloc_segments_for_ring(xhci, &first, &last, 428 num_segs, ring->cycle_state, ring->type, 429 ring->bounce_buf_len, flags); 430 if (ret) 431 return -ENOMEM; 432 433 if (ring->type == TYPE_STREAM) 434 ret = xhci_update_stream_segment_mapping(ring->trb_address_map, 435 ring, first, last, flags); 436 if (ret) { 437 struct xhci_segment *next; 438 do { 439 next = first->next; 440 xhci_segment_free(xhci, first); 441 if (first == last) 442 break; 443 first = next; 444 } while (true); 445 return ret; 446 } 447 448 xhci_link_rings(xhci, ring, first, last, num_segs); 449 trace_xhci_ring_expansion(ring); 450 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, 451 "ring expansion succeed, now has %d segments", 452 ring->num_segs); 453 454 return 0; 455 } 456 457 struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 458 int type, gfp_t flags) 459 { 460 struct xhci_container_ctx *ctx; 461 462 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)) 463 return NULL; 464 465 ctx = kzalloc(sizeof(*ctx), flags); 466 if (!ctx) 467 return NULL; 468 469 ctx->type = type; 470 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; 471 if (type == XHCI_CTX_TYPE_INPUT) 472 ctx->size += CTX_SIZE(xhci->hcc_params); 473 474 ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma); 475 if (!ctx->bytes) { 476 kfree(ctx); 477 return NULL; 478 } 479 return ctx; 480 } 481 482 void xhci_free_container_ctx(struct xhci_hcd *xhci, 483 struct xhci_container_ctx *ctx) 484 { 485 if (!ctx) 486 return; 487 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); 488 kfree(ctx); 489 } 490 491 struct xhci_input_control_ctx *xhci_get_input_control_ctx( 492 struct xhci_container_ctx *ctx) 493 { 494 if (ctx->type != XHCI_CTX_TYPE_INPUT) 495 return NULL; 496 497 return (struct xhci_input_control_ctx *)ctx->bytes; 498 } 499 500 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, 501 struct xhci_container_ctx *ctx) 502 { 503 if (ctx->type == XHCI_CTX_TYPE_DEVICE) 504 return (struct xhci_slot_ctx *)ctx->bytes; 505 506 return (struct xhci_slot_ctx *) 507 (ctx->bytes + CTX_SIZE(xhci->hcc_params)); 508 } 509 510 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, 511 struct xhci_container_ctx *ctx, 512 unsigned int ep_index) 513 { 514 /* increment ep index by offset of start of ep ctx array */ 515 ep_index++; 516 if (ctx->type == XHCI_CTX_TYPE_INPUT) 517 ep_index++; 518 519 return (struct xhci_ep_ctx *) 520 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); 521 } 522 523 524 /***************** Streams structures manipulation *************************/ 525 526 static void xhci_free_stream_ctx(struct xhci_hcd *xhci, 527 unsigned int num_stream_ctxs, 528 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) 529 { 530 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 531 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs; 532 533 if (size > MEDIUM_STREAM_ARRAY_SIZE) 534 dma_free_coherent(dev, size, 535 stream_ctx, dma); 536 else if (size <= SMALL_STREAM_ARRAY_SIZE) 537 return dma_pool_free(xhci->small_streams_pool, 538 stream_ctx, dma); 539 else 540 return dma_pool_free(xhci->medium_streams_pool, 541 stream_ctx, dma); 542 } 543 544 /* 545 * The stream context array for each endpoint with bulk streams enabled can 546 * vary in size, based on: 547 * - how many streams the endpoint supports, 548 * - the maximum primary stream array size the host controller supports, 549 * - and how many streams the device driver asks for. 550 * 551 * The stream context array must be a power of 2, and can be as small as 552 * 64 bytes or as large as 1MB. 553 */ 554 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, 555 unsigned int num_stream_ctxs, dma_addr_t *dma, 556 gfp_t mem_flags) 557 { 558 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 559 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs; 560 561 if (size > MEDIUM_STREAM_ARRAY_SIZE) 562 return dma_alloc_coherent(dev, size, 563 dma, mem_flags); 564 else if (size <= SMALL_STREAM_ARRAY_SIZE) 565 return dma_pool_alloc(xhci->small_streams_pool, 566 mem_flags, dma); 567 else 568 return dma_pool_alloc(xhci->medium_streams_pool, 569 mem_flags, dma); 570 } 571 572 struct xhci_ring *xhci_dma_to_transfer_ring( 573 struct xhci_virt_ep *ep, 574 u64 address) 575 { 576 if (ep->ep_state & EP_HAS_STREAMS) 577 return radix_tree_lookup(&ep->stream_info->trb_address_map, 578 address >> TRB_SEGMENT_SHIFT); 579 return ep->ring; 580 } 581 582 struct xhci_ring *xhci_stream_id_to_ring( 583 struct xhci_virt_device *dev, 584 unsigned int ep_index, 585 unsigned int stream_id) 586 { 587 struct xhci_virt_ep *ep = &dev->eps[ep_index]; 588 589 if (stream_id == 0) 590 return ep->ring; 591 if (!ep->stream_info) 592 return NULL; 593 594 if (stream_id > ep->stream_info->num_streams) 595 return NULL; 596 return ep->stream_info->stream_rings[stream_id]; 597 } 598 599 /* 600 * Change an endpoint's internal structure so it supports stream IDs. The 601 * number of requested streams includes stream 0, which cannot be used by device 602 * drivers. 603 * 604 * The number of stream contexts in the stream context array may be bigger than 605 * the number of streams the driver wants to use. This is because the number of 606 * stream context array entries must be a power of two. 607 */ 608 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, 609 unsigned int num_stream_ctxs, 610 unsigned int num_streams, 611 unsigned int max_packet, gfp_t mem_flags) 612 { 613 struct xhci_stream_info *stream_info; 614 u32 cur_stream; 615 struct xhci_ring *cur_ring; 616 u64 addr; 617 int ret; 618 619 xhci_dbg(xhci, "Allocating %u streams and %u " 620 "stream context array entries.\n", 621 num_streams, num_stream_ctxs); 622 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) { 623 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n"); 624 return NULL; 625 } 626 xhci->cmd_ring_reserved_trbs++; 627 628 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags); 629 if (!stream_info) 630 goto cleanup_trbs; 631 632 stream_info->num_streams = num_streams; 633 stream_info->num_stream_ctxs = num_stream_ctxs; 634 635 /* Initialize the array of virtual pointers to stream rings. */ 636 stream_info->stream_rings = kzalloc( 637 sizeof(struct xhci_ring *)*num_streams, 638 mem_flags); 639 if (!stream_info->stream_rings) 640 goto cleanup_info; 641 642 /* Initialize the array of DMA addresses for stream rings for the HW. */ 643 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci, 644 num_stream_ctxs, &stream_info->ctx_array_dma, 645 mem_flags); 646 if (!stream_info->stream_ctx_array) 647 goto cleanup_ctx; 648 memset(stream_info->stream_ctx_array, 0, 649 sizeof(struct xhci_stream_ctx)*num_stream_ctxs); 650 651 /* Allocate everything needed to free the stream rings later */ 652 stream_info->free_streams_command = 653 xhci_alloc_command_with_ctx(xhci, true, mem_flags); 654 if (!stream_info->free_streams_command) 655 goto cleanup_ctx; 656 657 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC); 658 659 /* Allocate rings for all the streams that the driver will use, 660 * and add their segment DMA addresses to the radix tree. 661 * Stream 0 is reserved. 662 */ 663 664 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 665 stream_info->stream_rings[cur_stream] = 666 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet, 667 mem_flags); 668 cur_ring = stream_info->stream_rings[cur_stream]; 669 if (!cur_ring) 670 goto cleanup_rings; 671 cur_ring->stream_id = cur_stream; 672 cur_ring->trb_address_map = &stream_info->trb_address_map; 673 /* Set deq ptr, cycle bit, and stream context type */ 674 addr = cur_ring->first_seg->dma | 675 SCT_FOR_CTX(SCT_PRI_TR) | 676 cur_ring->cycle_state; 677 stream_info->stream_ctx_array[cur_stream].stream_ring = 678 cpu_to_le64(addr); 679 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", 680 cur_stream, (unsigned long long) addr); 681 682 ret = xhci_update_stream_mapping(cur_ring, mem_flags); 683 if (ret) { 684 xhci_ring_free(xhci, cur_ring); 685 stream_info->stream_rings[cur_stream] = NULL; 686 goto cleanup_rings; 687 } 688 } 689 /* Leave the other unused stream ring pointers in the stream context 690 * array initialized to zero. This will cause the xHC to give us an 691 * error if the device asks for a stream ID we don't have setup (if it 692 * was any other way, the host controller would assume the ring is 693 * "empty" and wait forever for data to be queued to that stream ID). 694 */ 695 696 return stream_info; 697 698 cleanup_rings: 699 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 700 cur_ring = stream_info->stream_rings[cur_stream]; 701 if (cur_ring) { 702 xhci_ring_free(xhci, cur_ring); 703 stream_info->stream_rings[cur_stream] = NULL; 704 } 705 } 706 xhci_free_command(xhci, stream_info->free_streams_command); 707 cleanup_ctx: 708 kfree(stream_info->stream_rings); 709 cleanup_info: 710 kfree(stream_info); 711 cleanup_trbs: 712 xhci->cmd_ring_reserved_trbs--; 713 return NULL; 714 } 715 /* 716 * Sets the MaxPStreams field and the Linear Stream Array field. 717 * Sets the dequeue pointer to the stream context array. 718 */ 719 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, 720 struct xhci_ep_ctx *ep_ctx, 721 struct xhci_stream_info *stream_info) 722 { 723 u32 max_primary_streams; 724 /* MaxPStreams is the number of stream context array entries, not the 725 * number we're actually using. Must be in 2^(MaxPstreams + 1) format. 726 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. 727 */ 728 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; 729 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 730 "Setting number of stream ctx array entries to %u", 731 1 << (max_primary_streams + 1)); 732 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); 733 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) 734 | EP_HAS_LSA); 735 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); 736 } 737 738 /* 739 * Sets the MaxPStreams field and the Linear Stream Array field to 0. 740 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark, 741 * not at the beginning of the ring). 742 */ 743 void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx, 744 struct xhci_virt_ep *ep) 745 { 746 dma_addr_t addr; 747 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA)); 748 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); 749 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state); 750 } 751 752 /* Frees all stream contexts associated with the endpoint, 753 * 754 * Caller should fix the endpoint context streams fields. 755 */ 756 void xhci_free_stream_info(struct xhci_hcd *xhci, 757 struct xhci_stream_info *stream_info) 758 { 759 int cur_stream; 760 struct xhci_ring *cur_ring; 761 762 if (!stream_info) 763 return; 764 765 for (cur_stream = 1; cur_stream < stream_info->num_streams; 766 cur_stream++) { 767 cur_ring = stream_info->stream_rings[cur_stream]; 768 if (cur_ring) { 769 xhci_ring_free(xhci, cur_ring); 770 stream_info->stream_rings[cur_stream] = NULL; 771 } 772 } 773 xhci_free_command(xhci, stream_info->free_streams_command); 774 xhci->cmd_ring_reserved_trbs--; 775 if (stream_info->stream_ctx_array) 776 xhci_free_stream_ctx(xhci, 777 stream_info->num_stream_ctxs, 778 stream_info->stream_ctx_array, 779 stream_info->ctx_array_dma); 780 781 kfree(stream_info->stream_rings); 782 kfree(stream_info); 783 } 784 785 786 /***************** Device context manipulation *************************/ 787 788 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, 789 struct xhci_virt_ep *ep) 790 { 791 timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog, 792 0); 793 ep->xhci = xhci; 794 } 795 796 static void xhci_free_tt_info(struct xhci_hcd *xhci, 797 struct xhci_virt_device *virt_dev, 798 int slot_id) 799 { 800 struct list_head *tt_list_head; 801 struct xhci_tt_bw_info *tt_info, *next; 802 bool slot_found = false; 803 804 /* If the device never made it past the Set Address stage, 805 * it may not have the real_port set correctly. 806 */ 807 if (virt_dev->real_port == 0 || 808 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { 809 xhci_dbg(xhci, "Bad real port.\n"); 810 return; 811 } 812 813 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); 814 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { 815 /* Multi-TT hubs will have more than one entry */ 816 if (tt_info->slot_id == slot_id) { 817 slot_found = true; 818 list_del(&tt_info->tt_list); 819 kfree(tt_info); 820 } else if (slot_found) { 821 break; 822 } 823 } 824 } 825 826 int xhci_alloc_tt_info(struct xhci_hcd *xhci, 827 struct xhci_virt_device *virt_dev, 828 struct usb_device *hdev, 829 struct usb_tt *tt, gfp_t mem_flags) 830 { 831 struct xhci_tt_bw_info *tt_info; 832 unsigned int num_ports; 833 int i, j; 834 835 if (!tt->multi) 836 num_ports = 1; 837 else 838 num_ports = hdev->maxchild; 839 840 for (i = 0; i < num_ports; i++, tt_info++) { 841 struct xhci_interval_bw_table *bw_table; 842 843 tt_info = kzalloc(sizeof(*tt_info), mem_flags); 844 if (!tt_info) 845 goto free_tts; 846 INIT_LIST_HEAD(&tt_info->tt_list); 847 list_add(&tt_info->tt_list, 848 &xhci->rh_bw[virt_dev->real_port - 1].tts); 849 tt_info->slot_id = virt_dev->udev->slot_id; 850 if (tt->multi) 851 tt_info->ttport = i+1; 852 bw_table = &tt_info->bw_table; 853 for (j = 0; j < XHCI_MAX_INTERVAL; j++) 854 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); 855 } 856 return 0; 857 858 free_tts: 859 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id); 860 return -ENOMEM; 861 } 862 863 864 /* All the xhci_tds in the ring's TD list should be freed at this point. 865 * Should be called with xhci->lock held if there is any chance the TT lists 866 * will be manipulated by the configure endpoint, allocate device, or update 867 * hub functions while this function is removing the TT entries from the list. 868 */ 869 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 870 { 871 struct xhci_virt_device *dev; 872 int i; 873 int old_active_eps = 0; 874 875 /* Slot ID 0 is reserved */ 876 if (slot_id == 0 || !xhci->devs[slot_id]) 877 return; 878 879 dev = xhci->devs[slot_id]; 880 881 trace_xhci_free_virt_device(dev); 882 883 xhci->dcbaa->dev_context_ptrs[slot_id] = 0; 884 if (!dev) 885 return; 886 887 if (dev->tt_info) 888 old_active_eps = dev->tt_info->active_eps; 889 890 for (i = 0; i < 31; i++) { 891 if (dev->eps[i].ring) 892 xhci_ring_free(xhci, dev->eps[i].ring); 893 if (dev->eps[i].stream_info) 894 xhci_free_stream_info(xhci, 895 dev->eps[i].stream_info); 896 /* Endpoints on the TT/root port lists should have been removed 897 * when usb_disable_device() was called for the device. 898 * We can't drop them anyway, because the udev might have gone 899 * away by this point, and we can't tell what speed it was. 900 */ 901 if (!list_empty(&dev->eps[i].bw_endpoint_list)) 902 xhci_warn(xhci, "Slot %u endpoint %u " 903 "not removed from BW list!\n", 904 slot_id, i); 905 } 906 /* If this is a hub, free the TT(s) from the TT list */ 907 xhci_free_tt_info(xhci, dev, slot_id); 908 /* If necessary, update the number of active TTs on this root port */ 909 xhci_update_tt_active_eps(xhci, dev, old_active_eps); 910 911 if (dev->in_ctx) 912 xhci_free_container_ctx(xhci, dev->in_ctx); 913 if (dev->out_ctx) 914 xhci_free_container_ctx(xhci, dev->out_ctx); 915 916 if (dev->udev && dev->udev->slot_id) 917 dev->udev->slot_id = 0; 918 kfree(xhci->devs[slot_id]); 919 xhci->devs[slot_id] = NULL; 920 } 921 922 /* 923 * Free a virt_device structure. 924 * If the virt_device added a tt_info (a hub) and has children pointing to 925 * that tt_info, then free the child first. Recursive. 926 * We can't rely on udev at this point to find child-parent relationships. 927 */ 928 void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) 929 { 930 struct xhci_virt_device *vdev; 931 struct list_head *tt_list_head; 932 struct xhci_tt_bw_info *tt_info, *next; 933 int i; 934 935 vdev = xhci->devs[slot_id]; 936 if (!vdev) 937 return; 938 939 if (vdev->real_port == 0 || 940 vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { 941 xhci_dbg(xhci, "Bad vdev->real_port.\n"); 942 goto out; 943 } 944 945 tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts); 946 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { 947 /* is this a hub device that added a tt_info to the tts list */ 948 if (tt_info->slot_id == slot_id) { 949 /* are any devices using this tt_info? */ 950 for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) { 951 vdev = xhci->devs[i]; 952 if (vdev && (vdev->tt_info == tt_info)) 953 xhci_free_virt_devices_depth_first( 954 xhci, i); 955 } 956 } 957 } 958 out: 959 /* we are now at a leaf device */ 960 xhci_debugfs_remove_slot(xhci, slot_id); 961 xhci_free_virt_device(xhci, slot_id); 962 } 963 964 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 965 struct usb_device *udev, gfp_t flags) 966 { 967 struct xhci_virt_device *dev; 968 int i; 969 970 /* Slot ID 0 is reserved */ 971 if (slot_id == 0 || xhci->devs[slot_id]) { 972 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); 973 return 0; 974 } 975 976 dev = kzalloc(sizeof(*dev), flags); 977 if (!dev) 978 return 0; 979 980 /* Allocate the (output) device context that will be used in the HC. */ 981 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); 982 if (!dev->out_ctx) 983 goto fail; 984 985 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, 986 (unsigned long long)dev->out_ctx->dma); 987 988 /* Allocate the (input) device context for address device command */ 989 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); 990 if (!dev->in_ctx) 991 goto fail; 992 993 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, 994 (unsigned long long)dev->in_ctx->dma); 995 996 /* Initialize the cancellation list and watchdog timers for each ep */ 997 for (i = 0; i < 31; i++) { 998 xhci_init_endpoint_timer(xhci, &dev->eps[i]); 999 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); 1000 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list); 1001 } 1002 1003 /* Allocate endpoint 0 ring */ 1004 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags); 1005 if (!dev->eps[0].ring) 1006 goto fail; 1007 1008 dev->udev = udev; 1009 1010 /* Point to output device context in dcbaa. */ 1011 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma); 1012 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 1013 slot_id, 1014 &xhci->dcbaa->dev_context_ptrs[slot_id], 1015 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); 1016 1017 trace_xhci_alloc_virt_device(dev); 1018 1019 xhci->devs[slot_id] = dev; 1020 1021 return 1; 1022 fail: 1023 1024 if (dev->in_ctx) 1025 xhci_free_container_ctx(xhci, dev->in_ctx); 1026 if (dev->out_ctx) 1027 xhci_free_container_ctx(xhci, dev->out_ctx); 1028 kfree(dev); 1029 1030 return 0; 1031 } 1032 1033 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, 1034 struct usb_device *udev) 1035 { 1036 struct xhci_virt_device *virt_dev; 1037 struct xhci_ep_ctx *ep0_ctx; 1038 struct xhci_ring *ep_ring; 1039 1040 virt_dev = xhci->devs[udev->slot_id]; 1041 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0); 1042 ep_ring = virt_dev->eps[0].ring; 1043 /* 1044 * FIXME we don't keep track of the dequeue pointer very well after a 1045 * Set TR dequeue pointer, so we're setting the dequeue pointer of the 1046 * host to our enqueue pointer. This should only be called after a 1047 * configured device has reset, so all control transfers should have 1048 * been completed or cancelled before the reset. 1049 */ 1050 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg, 1051 ep_ring->enqueue) 1052 | ep_ring->cycle_state); 1053 } 1054 1055 /* 1056 * The xHCI roothub may have ports of differing speeds in any order in the port 1057 * status registers. xhci->port_array provides an array of the port speed for 1058 * each offset into the port status registers. 1059 * 1060 * The xHCI hardware wants to know the roothub port number that the USB device 1061 * is attached to (or the roothub port its ancestor hub is attached to). All we 1062 * know is the index of that port under either the USB 2.0 or the USB 3.0 1063 * roothub, but that doesn't give us the real index into the HW port status 1064 * registers. Call xhci_find_raw_port_number() to get real index. 1065 */ 1066 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, 1067 struct usb_device *udev) 1068 { 1069 struct usb_device *top_dev; 1070 struct usb_hcd *hcd; 1071 1072 if (udev->speed >= USB_SPEED_SUPER) 1073 hcd = xhci->shared_hcd; 1074 else 1075 hcd = xhci->main_hcd; 1076 1077 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 1078 top_dev = top_dev->parent) 1079 /* Found device below root hub */; 1080 1081 return xhci_find_raw_port_number(hcd, top_dev->portnum); 1082 } 1083 1084 /* Setup an xHCI virtual device for a Set Address command */ 1085 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) 1086 { 1087 struct xhci_virt_device *dev; 1088 struct xhci_ep_ctx *ep0_ctx; 1089 struct xhci_slot_ctx *slot_ctx; 1090 u32 port_num; 1091 u32 max_packets; 1092 struct usb_device *top_dev; 1093 1094 dev = xhci->devs[udev->slot_id]; 1095 /* Slot ID 0 is reserved */ 1096 if (udev->slot_id == 0 || !dev) { 1097 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", 1098 udev->slot_id); 1099 return -EINVAL; 1100 } 1101 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); 1102 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); 1103 1104 /* 3) Only the control endpoint is valid - one endpoint context */ 1105 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); 1106 switch (udev->speed) { 1107 case USB_SPEED_SUPER_PLUS: 1108 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP); 1109 max_packets = MAX_PACKET(512); 1110 break; 1111 case USB_SPEED_SUPER: 1112 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); 1113 max_packets = MAX_PACKET(512); 1114 break; 1115 case USB_SPEED_HIGH: 1116 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); 1117 max_packets = MAX_PACKET(64); 1118 break; 1119 /* USB core guesses at a 64-byte max packet first for FS devices */ 1120 case USB_SPEED_FULL: 1121 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); 1122 max_packets = MAX_PACKET(64); 1123 break; 1124 case USB_SPEED_LOW: 1125 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); 1126 max_packets = MAX_PACKET(8); 1127 break; 1128 case USB_SPEED_WIRELESS: 1129 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 1130 return -EINVAL; 1131 break; 1132 default: 1133 /* Speed was set earlier, this shouldn't happen. */ 1134 return -EINVAL; 1135 } 1136 /* Find the root hub port this device is under */ 1137 port_num = xhci_find_real_port_number(xhci, udev); 1138 if (!port_num) 1139 return -EINVAL; 1140 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num)); 1141 /* Set the port number in the virtual_device to the faked port number */ 1142 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 1143 top_dev = top_dev->parent) 1144 /* Found device below root hub */; 1145 dev->fake_port = top_dev->portnum; 1146 dev->real_port = port_num; 1147 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num); 1148 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port); 1149 1150 /* Find the right bandwidth table that this device will be a part of. 1151 * If this is a full speed device attached directly to a root port (or a 1152 * decendent of one), it counts as a primary bandwidth domain, not a 1153 * secondary bandwidth domain under a TT. An xhci_tt_info structure 1154 * will never be created for the HS root hub. 1155 */ 1156 if (!udev->tt || !udev->tt->hub->parent) { 1157 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table; 1158 } else { 1159 struct xhci_root_port_bw_info *rh_bw; 1160 struct xhci_tt_bw_info *tt_bw; 1161 1162 rh_bw = &xhci->rh_bw[port_num - 1]; 1163 /* Find the right TT. */ 1164 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) { 1165 if (tt_bw->slot_id != udev->tt->hub->slot_id) 1166 continue; 1167 1168 if (!dev->udev->tt->multi || 1169 (udev->tt->multi && 1170 tt_bw->ttport == dev->udev->ttport)) { 1171 dev->bw_table = &tt_bw->bw_table; 1172 dev->tt_info = tt_bw; 1173 break; 1174 } 1175 } 1176 if (!dev->tt_info) 1177 xhci_warn(xhci, "WARN: Didn't find a matching TT\n"); 1178 } 1179 1180 /* Is this a LS/FS device under an external HS hub? */ 1181 if (udev->tt && udev->tt->hub->parent) { 1182 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id | 1183 (udev->ttport << 8)); 1184 if (udev->tt->multi) 1185 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 1186 } 1187 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); 1188 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); 1189 1190 /* Step 4 - ring already allocated */ 1191 /* Step 5 */ 1192 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); 1193 1194 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ 1195 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) | 1196 max_packets); 1197 1198 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | 1199 dev->eps[0].ring->cycle_state); 1200 1201 trace_xhci_setup_addressable_virt_device(dev); 1202 1203 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 1204 1205 return 0; 1206 } 1207 1208 /* 1209 * Convert interval expressed as 2^(bInterval - 1) == interval into 1210 * straight exponent value 2^n == interval. 1211 * 1212 */ 1213 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, 1214 struct usb_host_endpoint *ep) 1215 { 1216 unsigned int interval; 1217 1218 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; 1219 if (interval != ep->desc.bInterval - 1) 1220 dev_warn(&udev->dev, 1221 "ep %#x - rounding interval to %d %sframes\n", 1222 ep->desc.bEndpointAddress, 1223 1 << interval, 1224 udev->speed == USB_SPEED_FULL ? "" : "micro"); 1225 1226 if (udev->speed == USB_SPEED_FULL) { 1227 /* 1228 * Full speed isoc endpoints specify interval in frames, 1229 * not microframes. We are using microframes everywhere, 1230 * so adjust accordingly. 1231 */ 1232 interval += 3; /* 1 frame = 2^3 uframes */ 1233 } 1234 1235 return interval; 1236 } 1237 1238 /* 1239 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of 1240 * microframes, rounded down to nearest power of 2. 1241 */ 1242 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev, 1243 struct usb_host_endpoint *ep, unsigned int desc_interval, 1244 unsigned int min_exponent, unsigned int max_exponent) 1245 { 1246 unsigned int interval; 1247 1248 interval = fls(desc_interval) - 1; 1249 interval = clamp_val(interval, min_exponent, max_exponent); 1250 if ((1 << interval) != desc_interval) 1251 dev_dbg(&udev->dev, 1252 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", 1253 ep->desc.bEndpointAddress, 1254 1 << interval, 1255 desc_interval); 1256 1257 return interval; 1258 } 1259 1260 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev, 1261 struct usb_host_endpoint *ep) 1262 { 1263 if (ep->desc.bInterval == 0) 1264 return 0; 1265 return xhci_microframes_to_exponent(udev, ep, 1266 ep->desc.bInterval, 0, 15); 1267 } 1268 1269 1270 static unsigned int xhci_parse_frame_interval(struct usb_device *udev, 1271 struct usb_host_endpoint *ep) 1272 { 1273 return xhci_microframes_to_exponent(udev, ep, 1274 ep->desc.bInterval * 8, 3, 10); 1275 } 1276 1277 /* Return the polling or NAK interval. 1278 * 1279 * The polling interval is expressed in "microframes". If xHCI's Interval field 1280 * is set to N, it will service the endpoint every 2^(Interval)*125us. 1281 * 1282 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval 1283 * is set to 0. 1284 */ 1285 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, 1286 struct usb_host_endpoint *ep) 1287 { 1288 unsigned int interval = 0; 1289 1290 switch (udev->speed) { 1291 case USB_SPEED_HIGH: 1292 /* Max NAK rate */ 1293 if (usb_endpoint_xfer_control(&ep->desc) || 1294 usb_endpoint_xfer_bulk(&ep->desc)) { 1295 interval = xhci_parse_microframe_interval(udev, ep); 1296 break; 1297 } 1298 /* Fall through - SS and HS isoc/int have same decoding */ 1299 1300 case USB_SPEED_SUPER_PLUS: 1301 case USB_SPEED_SUPER: 1302 if (usb_endpoint_xfer_int(&ep->desc) || 1303 usb_endpoint_xfer_isoc(&ep->desc)) { 1304 interval = xhci_parse_exponent_interval(udev, ep); 1305 } 1306 break; 1307 1308 case USB_SPEED_FULL: 1309 if (usb_endpoint_xfer_isoc(&ep->desc)) { 1310 interval = xhci_parse_exponent_interval(udev, ep); 1311 break; 1312 } 1313 /* 1314 * Fall through for interrupt endpoint interval decoding 1315 * since it uses the same rules as low speed interrupt 1316 * endpoints. 1317 */ 1318 /* fall through */ 1319 1320 case USB_SPEED_LOW: 1321 if (usb_endpoint_xfer_int(&ep->desc) || 1322 usb_endpoint_xfer_isoc(&ep->desc)) { 1323 1324 interval = xhci_parse_frame_interval(udev, ep); 1325 } 1326 break; 1327 1328 default: 1329 BUG(); 1330 } 1331 return interval; 1332 } 1333 1334 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps. 1335 * High speed endpoint descriptors can define "the number of additional 1336 * transaction opportunities per microframe", but that goes in the Max Burst 1337 * endpoint context field. 1338 */ 1339 static u32 xhci_get_endpoint_mult(struct usb_device *udev, 1340 struct usb_host_endpoint *ep) 1341 { 1342 if (udev->speed < USB_SPEED_SUPER || 1343 !usb_endpoint_xfer_isoc(&ep->desc)) 1344 return 0; 1345 return ep->ss_ep_comp.bmAttributes; 1346 } 1347 1348 static u32 xhci_get_endpoint_max_burst(struct usb_device *udev, 1349 struct usb_host_endpoint *ep) 1350 { 1351 /* Super speed and Plus have max burst in ep companion desc */ 1352 if (udev->speed >= USB_SPEED_SUPER) 1353 return ep->ss_ep_comp.bMaxBurst; 1354 1355 if (udev->speed == USB_SPEED_HIGH && 1356 (usb_endpoint_xfer_isoc(&ep->desc) || 1357 usb_endpoint_xfer_int(&ep->desc))) 1358 return usb_endpoint_maxp_mult(&ep->desc) - 1; 1359 1360 return 0; 1361 } 1362 1363 static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep) 1364 { 1365 int in; 1366 1367 in = usb_endpoint_dir_in(&ep->desc); 1368 1369 switch (usb_endpoint_type(&ep->desc)) { 1370 case USB_ENDPOINT_XFER_CONTROL: 1371 return CTRL_EP; 1372 case USB_ENDPOINT_XFER_BULK: 1373 return in ? BULK_IN_EP : BULK_OUT_EP; 1374 case USB_ENDPOINT_XFER_ISOC: 1375 return in ? ISOC_IN_EP : ISOC_OUT_EP; 1376 case USB_ENDPOINT_XFER_INT: 1377 return in ? INT_IN_EP : INT_OUT_EP; 1378 } 1379 return 0; 1380 } 1381 1382 /* Return the maximum endpoint service interval time (ESIT) payload. 1383 * Basically, this is the maxpacket size, multiplied by the burst size 1384 * and mult size. 1385 */ 1386 static u32 xhci_get_max_esit_payload(struct usb_device *udev, 1387 struct usb_host_endpoint *ep) 1388 { 1389 int max_burst; 1390 int max_packet; 1391 1392 /* Only applies for interrupt or isochronous endpoints */ 1393 if (usb_endpoint_xfer_control(&ep->desc) || 1394 usb_endpoint_xfer_bulk(&ep->desc)) 1395 return 0; 1396 1397 /* SuperSpeedPlus Isoc ep sending over 48k per esit */ 1398 if ((udev->speed >= USB_SPEED_SUPER_PLUS) && 1399 USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes)) 1400 return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval); 1401 /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */ 1402 else if (udev->speed >= USB_SPEED_SUPER) 1403 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); 1404 1405 max_packet = usb_endpoint_maxp(&ep->desc); 1406 max_burst = usb_endpoint_maxp_mult(&ep->desc); 1407 /* A 0 in max burst means 1 transfer per ESIT */ 1408 return max_packet * max_burst; 1409 } 1410 1411 /* Set up an endpoint with one ring segment. Do not allocate stream rings. 1412 * Drivers will have to call usb_alloc_streams() to do that. 1413 */ 1414 int xhci_endpoint_init(struct xhci_hcd *xhci, 1415 struct xhci_virt_device *virt_dev, 1416 struct usb_device *udev, 1417 struct usb_host_endpoint *ep, 1418 gfp_t mem_flags) 1419 { 1420 unsigned int ep_index; 1421 struct xhci_ep_ctx *ep_ctx; 1422 struct xhci_ring *ep_ring; 1423 unsigned int max_packet; 1424 enum xhci_ring_type ring_type; 1425 u32 max_esit_payload; 1426 u32 endpoint_type; 1427 unsigned int max_burst; 1428 unsigned int interval; 1429 unsigned int mult; 1430 unsigned int avg_trb_len; 1431 unsigned int err_count = 0; 1432 1433 ep_index = xhci_get_endpoint_index(&ep->desc); 1434 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 1435 1436 endpoint_type = xhci_get_endpoint_type(ep); 1437 if (!endpoint_type) 1438 return -EINVAL; 1439 1440 ring_type = usb_endpoint_type(&ep->desc); 1441 1442 /* 1443 * Get values to fill the endpoint context, mostly from ep descriptor. 1444 * The average TRB buffer lengt for bulk endpoints is unclear as we 1445 * have no clue on scatter gather list entry size. For Isoc and Int, 1446 * set it to max available. See xHCI 1.1 spec 4.14.1.1 for details. 1447 */ 1448 max_esit_payload = xhci_get_max_esit_payload(udev, ep); 1449 interval = xhci_get_endpoint_interval(udev, ep); 1450 1451 /* Periodic endpoint bInterval limit quirk */ 1452 if (usb_endpoint_xfer_int(&ep->desc) || 1453 usb_endpoint_xfer_isoc(&ep->desc)) { 1454 if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) && 1455 udev->speed >= USB_SPEED_HIGH && 1456 interval >= 7) { 1457 interval = 6; 1458 } 1459 } 1460 1461 mult = xhci_get_endpoint_mult(udev, ep); 1462 max_packet = usb_endpoint_maxp(&ep->desc); 1463 max_burst = xhci_get_endpoint_max_burst(udev, ep); 1464 avg_trb_len = max_esit_payload; 1465 1466 /* FIXME dig Mult and streams info out of ep companion desc */ 1467 1468 /* Allow 3 retries for everything but isoc, set CErr = 3 */ 1469 if (!usb_endpoint_xfer_isoc(&ep->desc)) 1470 err_count = 3; 1471 /* Some devices get this wrong */ 1472 if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH) 1473 max_packet = 512; 1474 /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */ 1475 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100) 1476 avg_trb_len = 8; 1477 /* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */ 1478 if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2)) 1479 mult = 0; 1480 1481 /* Set up the endpoint ring */ 1482 virt_dev->eps[ep_index].new_ring = 1483 xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags); 1484 if (!virt_dev->eps[ep_index].new_ring) 1485 return -ENOMEM; 1486 1487 virt_dev->eps[ep_index].skip = false; 1488 ep_ring = virt_dev->eps[ep_index].new_ring; 1489 1490 /* Fill the endpoint context */ 1491 ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) | 1492 EP_INTERVAL(interval) | 1493 EP_MULT(mult)); 1494 ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) | 1495 MAX_PACKET(max_packet) | 1496 MAX_BURST(max_burst) | 1497 ERROR_COUNT(err_count)); 1498 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | 1499 ep_ring->cycle_state); 1500 1501 ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) | 1502 EP_AVG_TRB_LENGTH(avg_trb_len)); 1503 1504 return 0; 1505 } 1506 1507 void xhci_endpoint_zero(struct xhci_hcd *xhci, 1508 struct xhci_virt_device *virt_dev, 1509 struct usb_host_endpoint *ep) 1510 { 1511 unsigned int ep_index; 1512 struct xhci_ep_ctx *ep_ctx; 1513 1514 ep_index = xhci_get_endpoint_index(&ep->desc); 1515 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 1516 1517 ep_ctx->ep_info = 0; 1518 ep_ctx->ep_info2 = 0; 1519 ep_ctx->deq = 0; 1520 ep_ctx->tx_info = 0; 1521 /* Don't free the endpoint ring until the set interface or configuration 1522 * request succeeds. 1523 */ 1524 } 1525 1526 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info) 1527 { 1528 bw_info->ep_interval = 0; 1529 bw_info->mult = 0; 1530 bw_info->num_packets = 0; 1531 bw_info->max_packet_size = 0; 1532 bw_info->type = 0; 1533 bw_info->max_esit_payload = 0; 1534 } 1535 1536 void xhci_update_bw_info(struct xhci_hcd *xhci, 1537 struct xhci_container_ctx *in_ctx, 1538 struct xhci_input_control_ctx *ctrl_ctx, 1539 struct xhci_virt_device *virt_dev) 1540 { 1541 struct xhci_bw_info *bw_info; 1542 struct xhci_ep_ctx *ep_ctx; 1543 unsigned int ep_type; 1544 int i; 1545 1546 for (i = 1; i < 31; i++) { 1547 bw_info = &virt_dev->eps[i].bw_info; 1548 1549 /* We can't tell what endpoint type is being dropped, but 1550 * unconditionally clearing the bandwidth info for non-periodic 1551 * endpoints should be harmless because the info will never be 1552 * set in the first place. 1553 */ 1554 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) { 1555 /* Dropped endpoint */ 1556 xhci_clear_endpoint_bw_info(bw_info); 1557 continue; 1558 } 1559 1560 if (EP_IS_ADDED(ctrl_ctx, i)) { 1561 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i); 1562 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2)); 1563 1564 /* Ignore non-periodic endpoints */ 1565 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && 1566 ep_type != ISOC_IN_EP && 1567 ep_type != INT_IN_EP) 1568 continue; 1569 1570 /* Added or changed endpoint */ 1571 bw_info->ep_interval = CTX_TO_EP_INTERVAL( 1572 le32_to_cpu(ep_ctx->ep_info)); 1573 /* Number of packets and mult are zero-based in the 1574 * input context, but we want one-based for the 1575 * interval table. 1576 */ 1577 bw_info->mult = CTX_TO_EP_MULT( 1578 le32_to_cpu(ep_ctx->ep_info)) + 1; 1579 bw_info->num_packets = CTX_TO_MAX_BURST( 1580 le32_to_cpu(ep_ctx->ep_info2)) + 1; 1581 bw_info->max_packet_size = MAX_PACKET_DECODED( 1582 le32_to_cpu(ep_ctx->ep_info2)); 1583 bw_info->type = ep_type; 1584 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD( 1585 le32_to_cpu(ep_ctx->tx_info)); 1586 } 1587 } 1588 } 1589 1590 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. 1591 * Useful when you want to change one particular aspect of the endpoint and then 1592 * issue a configure endpoint command. 1593 */ 1594 void xhci_endpoint_copy(struct xhci_hcd *xhci, 1595 struct xhci_container_ctx *in_ctx, 1596 struct xhci_container_ctx *out_ctx, 1597 unsigned int ep_index) 1598 { 1599 struct xhci_ep_ctx *out_ep_ctx; 1600 struct xhci_ep_ctx *in_ep_ctx; 1601 1602 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1603 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 1604 1605 in_ep_ctx->ep_info = out_ep_ctx->ep_info; 1606 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 1607 in_ep_ctx->deq = out_ep_ctx->deq; 1608 in_ep_ctx->tx_info = out_ep_ctx->tx_info; 1609 } 1610 1611 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. 1612 * Useful when you want to change one particular aspect of the endpoint and then 1613 * issue a configure endpoint command. Only the context entries field matters, 1614 * but we'll copy the whole thing anyway. 1615 */ 1616 void xhci_slot_copy(struct xhci_hcd *xhci, 1617 struct xhci_container_ctx *in_ctx, 1618 struct xhci_container_ctx *out_ctx) 1619 { 1620 struct xhci_slot_ctx *in_slot_ctx; 1621 struct xhci_slot_ctx *out_slot_ctx; 1622 1623 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1624 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx); 1625 1626 in_slot_ctx->dev_info = out_slot_ctx->dev_info; 1627 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; 1628 in_slot_ctx->tt_info = out_slot_ctx->tt_info; 1629 in_slot_ctx->dev_state = out_slot_ctx->dev_state; 1630 } 1631 1632 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ 1633 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) 1634 { 1635 int i; 1636 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1637 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1638 1639 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 1640 "Allocating %d scratchpad buffers", num_sp); 1641 1642 if (!num_sp) 1643 return 0; 1644 1645 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); 1646 if (!xhci->scratchpad) 1647 goto fail_sp; 1648 1649 xhci->scratchpad->sp_array = dma_alloc_coherent(dev, 1650 num_sp * sizeof(u64), 1651 &xhci->scratchpad->sp_dma, flags); 1652 if (!xhci->scratchpad->sp_array) 1653 goto fail_sp2; 1654 1655 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); 1656 if (!xhci->scratchpad->sp_buffers) 1657 goto fail_sp3; 1658 1659 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1660 for (i = 0; i < num_sp; i++) { 1661 dma_addr_t dma; 1662 void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma, 1663 flags); 1664 if (!buf) 1665 goto fail_sp4; 1666 1667 xhci->scratchpad->sp_array[i] = dma; 1668 xhci->scratchpad->sp_buffers[i] = buf; 1669 } 1670 1671 return 0; 1672 1673 fail_sp4: 1674 for (i = i - 1; i >= 0; i--) { 1675 dma_free_coherent(dev, xhci->page_size, 1676 xhci->scratchpad->sp_buffers[i], 1677 xhci->scratchpad->sp_array[i]); 1678 } 1679 1680 kfree(xhci->scratchpad->sp_buffers); 1681 1682 fail_sp3: 1683 dma_free_coherent(dev, num_sp * sizeof(u64), 1684 xhci->scratchpad->sp_array, 1685 xhci->scratchpad->sp_dma); 1686 1687 fail_sp2: 1688 kfree(xhci->scratchpad); 1689 xhci->scratchpad = NULL; 1690 1691 fail_sp: 1692 return -ENOMEM; 1693 } 1694 1695 static void scratchpad_free(struct xhci_hcd *xhci) 1696 { 1697 int num_sp; 1698 int i; 1699 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1700 1701 if (!xhci->scratchpad) 1702 return; 1703 1704 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1705 1706 for (i = 0; i < num_sp; i++) { 1707 dma_free_coherent(dev, xhci->page_size, 1708 xhci->scratchpad->sp_buffers[i], 1709 xhci->scratchpad->sp_array[i]); 1710 } 1711 kfree(xhci->scratchpad->sp_buffers); 1712 dma_free_coherent(dev, num_sp * sizeof(u64), 1713 xhci->scratchpad->sp_array, 1714 xhci->scratchpad->sp_dma); 1715 kfree(xhci->scratchpad); 1716 xhci->scratchpad = NULL; 1717 } 1718 1719 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 1720 bool allocate_completion, gfp_t mem_flags) 1721 { 1722 struct xhci_command *command; 1723 1724 command = kzalloc(sizeof(*command), mem_flags); 1725 if (!command) 1726 return NULL; 1727 1728 if (allocate_completion) { 1729 command->completion = 1730 kzalloc(sizeof(struct completion), mem_flags); 1731 if (!command->completion) { 1732 kfree(command); 1733 return NULL; 1734 } 1735 init_completion(command->completion); 1736 } 1737 1738 command->status = 0; 1739 INIT_LIST_HEAD(&command->cmd_list); 1740 return command; 1741 } 1742 1743 struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci, 1744 bool allocate_completion, gfp_t mem_flags) 1745 { 1746 struct xhci_command *command; 1747 1748 command = xhci_alloc_command(xhci, allocate_completion, mem_flags); 1749 if (!command) 1750 return NULL; 1751 1752 command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, 1753 mem_flags); 1754 if (!command->in_ctx) { 1755 kfree(command->completion); 1756 kfree(command); 1757 return NULL; 1758 } 1759 return command; 1760 } 1761 1762 void xhci_urb_free_priv(struct urb_priv *urb_priv) 1763 { 1764 kfree(urb_priv); 1765 } 1766 1767 void xhci_free_command(struct xhci_hcd *xhci, 1768 struct xhci_command *command) 1769 { 1770 xhci_free_container_ctx(xhci, 1771 command->in_ctx); 1772 kfree(command->completion); 1773 kfree(command); 1774 } 1775 1776 int xhci_alloc_erst(struct xhci_hcd *xhci, 1777 struct xhci_ring *evt_ring, 1778 struct xhci_erst *erst, 1779 gfp_t flags) 1780 { 1781 size_t size; 1782 unsigned int val; 1783 struct xhci_segment *seg; 1784 struct xhci_erst_entry *entry; 1785 1786 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; 1787 erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev, 1788 size, &erst->erst_dma_addr, flags); 1789 if (!erst->entries) 1790 return -ENOMEM; 1791 1792 erst->num_entries = evt_ring->num_segs; 1793 1794 seg = evt_ring->first_seg; 1795 for (val = 0; val < evt_ring->num_segs; val++) { 1796 entry = &erst->entries[val]; 1797 entry->seg_addr = cpu_to_le64(seg->dma); 1798 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); 1799 entry->rsvd = 0; 1800 seg = seg->next; 1801 } 1802 1803 return 0; 1804 } 1805 1806 void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) 1807 { 1808 size_t size; 1809 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1810 1811 size = sizeof(struct xhci_erst_entry) * (erst->num_entries); 1812 if (erst->entries) 1813 dma_free_coherent(dev, size, 1814 erst->entries, 1815 erst->erst_dma_addr); 1816 erst->entries = NULL; 1817 } 1818 1819 void xhci_mem_cleanup(struct xhci_hcd *xhci) 1820 { 1821 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1822 int i, j, num_ports; 1823 1824 cancel_delayed_work_sync(&xhci->cmd_timer); 1825 1826 xhci_free_erst(xhci, &xhci->erst); 1827 1828 if (xhci->event_ring) 1829 xhci_ring_free(xhci, xhci->event_ring); 1830 xhci->event_ring = NULL; 1831 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring"); 1832 1833 if (xhci->lpm_command) 1834 xhci_free_command(xhci, xhci->lpm_command); 1835 xhci->lpm_command = NULL; 1836 if (xhci->cmd_ring) 1837 xhci_ring_free(xhci, xhci->cmd_ring); 1838 xhci->cmd_ring = NULL; 1839 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring"); 1840 xhci_cleanup_command_queue(xhci); 1841 1842 num_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1843 for (i = 0; i < num_ports && xhci->rh_bw; i++) { 1844 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; 1845 for (j = 0; j < XHCI_MAX_INTERVAL; j++) { 1846 struct list_head *ep = &bwt->interval_bw[j].endpoints; 1847 while (!list_empty(ep)) 1848 list_del_init(ep->next); 1849 } 1850 } 1851 1852 for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--) 1853 xhci_free_virt_devices_depth_first(xhci, i); 1854 1855 dma_pool_destroy(xhci->segment_pool); 1856 xhci->segment_pool = NULL; 1857 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool"); 1858 1859 dma_pool_destroy(xhci->device_pool); 1860 xhci->device_pool = NULL; 1861 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool"); 1862 1863 dma_pool_destroy(xhci->small_streams_pool); 1864 xhci->small_streams_pool = NULL; 1865 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 1866 "Freed small stream array pool"); 1867 1868 dma_pool_destroy(xhci->medium_streams_pool); 1869 xhci->medium_streams_pool = NULL; 1870 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 1871 "Freed medium stream array pool"); 1872 1873 if (xhci->dcbaa) 1874 dma_free_coherent(dev, sizeof(*xhci->dcbaa), 1875 xhci->dcbaa, xhci->dcbaa->dma); 1876 xhci->dcbaa = NULL; 1877 1878 scratchpad_free(xhci); 1879 1880 if (!xhci->rh_bw) 1881 goto no_bw; 1882 1883 for (i = 0; i < num_ports; i++) { 1884 struct xhci_tt_bw_info *tt, *n; 1885 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { 1886 list_del(&tt->tt_list); 1887 kfree(tt); 1888 } 1889 } 1890 1891 no_bw: 1892 xhci->cmd_ring_reserved_trbs = 0; 1893 xhci->num_usb2_ports = 0; 1894 xhci->num_usb3_ports = 0; 1895 xhci->num_active_eps = 0; 1896 kfree(xhci->usb2_ports); 1897 kfree(xhci->usb3_ports); 1898 kfree(xhci->port_array); 1899 kfree(xhci->rh_bw); 1900 kfree(xhci->ext_caps); 1901 1902 xhci->usb2_ports = NULL; 1903 xhci->usb3_ports = NULL; 1904 xhci->port_array = NULL; 1905 xhci->rh_bw = NULL; 1906 xhci->ext_caps = NULL; 1907 1908 xhci->page_size = 0; 1909 xhci->page_shift = 0; 1910 xhci->bus_state[0].bus_suspended = 0; 1911 xhci->bus_state[1].bus_suspended = 0; 1912 } 1913 1914 static int xhci_test_trb_in_td(struct xhci_hcd *xhci, 1915 struct xhci_segment *input_seg, 1916 union xhci_trb *start_trb, 1917 union xhci_trb *end_trb, 1918 dma_addr_t input_dma, 1919 struct xhci_segment *result_seg, 1920 char *test_name, int test_number) 1921 { 1922 unsigned long long start_dma; 1923 unsigned long long end_dma; 1924 struct xhci_segment *seg; 1925 1926 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb); 1927 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb); 1928 1929 seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false); 1930 if (seg != result_seg) { 1931 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n", 1932 test_name, test_number); 1933 xhci_warn(xhci, "Tested TRB math w/ seg %p and " 1934 "input DMA 0x%llx\n", 1935 input_seg, 1936 (unsigned long long) input_dma); 1937 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), " 1938 "ending TRB %p (0x%llx DMA)\n", 1939 start_trb, start_dma, 1940 end_trb, end_dma); 1941 xhci_warn(xhci, "Expected seg %p, got seg %p\n", 1942 result_seg, seg); 1943 trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, 1944 true); 1945 return -1; 1946 } 1947 return 0; 1948 } 1949 1950 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ 1951 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci) 1952 { 1953 struct { 1954 dma_addr_t input_dma; 1955 struct xhci_segment *result_seg; 1956 } simple_test_vector [] = { 1957 /* A zeroed DMA field should fail */ 1958 { 0, NULL }, 1959 /* One TRB before the ring start should fail */ 1960 { xhci->event_ring->first_seg->dma - 16, NULL }, 1961 /* One byte before the ring start should fail */ 1962 { xhci->event_ring->first_seg->dma - 1, NULL }, 1963 /* Starting TRB should succeed */ 1964 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg }, 1965 /* Ending TRB should succeed */ 1966 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, 1967 xhci->event_ring->first_seg }, 1968 /* One byte after the ring end should fail */ 1969 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, 1970 /* One TRB after the ring end should fail */ 1971 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL }, 1972 /* An address of all ones should fail */ 1973 { (dma_addr_t) (~0), NULL }, 1974 }; 1975 struct { 1976 struct xhci_segment *input_seg; 1977 union xhci_trb *start_trb; 1978 union xhci_trb *end_trb; 1979 dma_addr_t input_dma; 1980 struct xhci_segment *result_seg; 1981 } complex_test_vector [] = { 1982 /* Test feeding a valid DMA address from a different ring */ 1983 { .input_seg = xhci->event_ring->first_seg, 1984 .start_trb = xhci->event_ring->first_seg->trbs, 1985 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 1986 .input_dma = xhci->cmd_ring->first_seg->dma, 1987 .result_seg = NULL, 1988 }, 1989 /* Test feeding a valid end TRB from a different ring */ 1990 { .input_seg = xhci->event_ring->first_seg, 1991 .start_trb = xhci->event_ring->first_seg->trbs, 1992 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 1993 .input_dma = xhci->cmd_ring->first_seg->dma, 1994 .result_seg = NULL, 1995 }, 1996 /* Test feeding a valid start and end TRB from a different ring */ 1997 { .input_seg = xhci->event_ring->first_seg, 1998 .start_trb = xhci->cmd_ring->first_seg->trbs, 1999 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 2000 .input_dma = xhci->cmd_ring->first_seg->dma, 2001 .result_seg = NULL, 2002 }, 2003 /* TRB in this ring, but after this TD */ 2004 { .input_seg = xhci->event_ring->first_seg, 2005 .start_trb = &xhci->event_ring->first_seg->trbs[0], 2006 .end_trb = &xhci->event_ring->first_seg->trbs[3], 2007 .input_dma = xhci->event_ring->first_seg->dma + 4*16, 2008 .result_seg = NULL, 2009 }, 2010 /* TRB in this ring, but before this TD */ 2011 { .input_seg = xhci->event_ring->first_seg, 2012 .start_trb = &xhci->event_ring->first_seg->trbs[3], 2013 .end_trb = &xhci->event_ring->first_seg->trbs[6], 2014 .input_dma = xhci->event_ring->first_seg->dma + 2*16, 2015 .result_seg = NULL, 2016 }, 2017 /* TRB in this ring, but after this wrapped TD */ 2018 { .input_seg = xhci->event_ring->first_seg, 2019 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 2020 .end_trb = &xhci->event_ring->first_seg->trbs[1], 2021 .input_dma = xhci->event_ring->first_seg->dma + 2*16, 2022 .result_seg = NULL, 2023 }, 2024 /* TRB in this ring, but before this wrapped TD */ 2025 { .input_seg = xhci->event_ring->first_seg, 2026 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 2027 .end_trb = &xhci->event_ring->first_seg->trbs[1], 2028 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16, 2029 .result_seg = NULL, 2030 }, 2031 /* TRB not in this ring, and we have a wrapped TD */ 2032 { .input_seg = xhci->event_ring->first_seg, 2033 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 2034 .end_trb = &xhci->event_ring->first_seg->trbs[1], 2035 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16, 2036 .result_seg = NULL, 2037 }, 2038 }; 2039 2040 unsigned int num_tests; 2041 int i, ret; 2042 2043 num_tests = ARRAY_SIZE(simple_test_vector); 2044 for (i = 0; i < num_tests; i++) { 2045 ret = xhci_test_trb_in_td(xhci, 2046 xhci->event_ring->first_seg, 2047 xhci->event_ring->first_seg->trbs, 2048 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 2049 simple_test_vector[i].input_dma, 2050 simple_test_vector[i].result_seg, 2051 "Simple", i); 2052 if (ret < 0) 2053 return ret; 2054 } 2055 2056 num_tests = ARRAY_SIZE(complex_test_vector); 2057 for (i = 0; i < num_tests; i++) { 2058 ret = xhci_test_trb_in_td(xhci, 2059 complex_test_vector[i].input_seg, 2060 complex_test_vector[i].start_trb, 2061 complex_test_vector[i].end_trb, 2062 complex_test_vector[i].input_dma, 2063 complex_test_vector[i].result_seg, 2064 "Complex", i); 2065 if (ret < 0) 2066 return ret; 2067 } 2068 xhci_dbg(xhci, "TRB math tests passed.\n"); 2069 return 0; 2070 } 2071 2072 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) 2073 { 2074 u64 temp; 2075 dma_addr_t deq; 2076 2077 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 2078 xhci->event_ring->dequeue); 2079 if (deq == 0 && !in_interrupt()) 2080 xhci_warn(xhci, "WARN something wrong with SW event ring " 2081 "dequeue ptr.\n"); 2082 /* Update HC event ring dequeue pointer */ 2083 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2084 temp &= ERST_PTR_MASK; 2085 /* Don't clear the EHB bit (which is RW1C) because 2086 * there might be more events to service. 2087 */ 2088 temp &= ~ERST_EHB; 2089 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2090 "// Write event ring dequeue pointer, " 2091 "preserving EHB bit"); 2092 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, 2093 &xhci->ir_set->erst_dequeue); 2094 } 2095 2096 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, 2097 __le32 __iomem *addr, int max_caps) 2098 { 2099 u32 temp, port_offset, port_count; 2100 int i; 2101 u8 major_revision, minor_revision; 2102 struct xhci_hub *rhub; 2103 2104 temp = readl(addr); 2105 major_revision = XHCI_EXT_PORT_MAJOR(temp); 2106 minor_revision = XHCI_EXT_PORT_MINOR(temp); 2107 2108 if (major_revision == 0x03) { 2109 rhub = &xhci->usb3_rhub; 2110 } else if (major_revision <= 0x02) { 2111 rhub = &xhci->usb2_rhub; 2112 } else { 2113 xhci_warn(xhci, "Ignoring unknown port speed, " 2114 "Ext Cap %p, revision = 0x%x\n", 2115 addr, major_revision); 2116 /* Ignoring port protocol we can't understand. FIXME */ 2117 return; 2118 } 2119 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); 2120 2121 if (rhub->min_rev < minor_revision) 2122 rhub->min_rev = minor_revision; 2123 2124 /* Port offset and count in the third dword, see section 7.2 */ 2125 temp = readl(addr + 2); 2126 port_offset = XHCI_EXT_PORT_OFF(temp); 2127 port_count = XHCI_EXT_PORT_COUNT(temp); 2128 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2129 "Ext Cap %p, port offset = %u, " 2130 "count = %u, revision = 0x%x", 2131 addr, port_offset, port_count, major_revision); 2132 /* Port count includes the current port offset */ 2133 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) 2134 /* WTF? "Valid values are ‘1’ to MaxPorts" */ 2135 return; 2136 2137 rhub->psi_count = XHCI_EXT_PORT_PSIC(temp); 2138 if (rhub->psi_count) { 2139 rhub->psi = kcalloc(rhub->psi_count, sizeof(*rhub->psi), 2140 GFP_KERNEL); 2141 if (!rhub->psi) 2142 rhub->psi_count = 0; 2143 2144 rhub->psi_uid_count++; 2145 for (i = 0; i < rhub->psi_count; i++) { 2146 rhub->psi[i] = readl(addr + 4 + i); 2147 2148 /* count unique ID values, two consecutive entries can 2149 * have the same ID if link is assymetric 2150 */ 2151 if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) != 2152 XHCI_EXT_PORT_PSIV(rhub->psi[i - 1]))) 2153 rhub->psi_uid_count++; 2154 2155 xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n", 2156 XHCI_EXT_PORT_PSIV(rhub->psi[i]), 2157 XHCI_EXT_PORT_PSIE(rhub->psi[i]), 2158 XHCI_EXT_PORT_PLT(rhub->psi[i]), 2159 XHCI_EXT_PORT_PFD(rhub->psi[i]), 2160 XHCI_EXT_PORT_LP(rhub->psi[i]), 2161 XHCI_EXT_PORT_PSIM(rhub->psi[i])); 2162 } 2163 } 2164 /* cache usb2 port capabilities */ 2165 if (major_revision < 0x03 && xhci->num_ext_caps < max_caps) 2166 xhci->ext_caps[xhci->num_ext_caps++] = temp; 2167 2168 /* Check the host's USB2 LPM capability */ 2169 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) && 2170 (temp & XHCI_L1C)) { 2171 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2172 "xHCI 0.96: support USB2 software lpm"); 2173 xhci->sw_lpm_support = 1; 2174 } 2175 2176 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) { 2177 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2178 "xHCI 1.0: support USB2 software lpm"); 2179 xhci->sw_lpm_support = 1; 2180 if (temp & XHCI_HLC) { 2181 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2182 "xHCI 1.0: support USB2 hardware lpm"); 2183 xhci->hw_lpm_support = 1; 2184 } 2185 } 2186 2187 port_offset--; 2188 for (i = port_offset; i < (port_offset + port_count); i++) { 2189 /* Duplicate entry. Ignore the port if the revisions differ. */ 2190 if (xhci->port_array[i] != 0) { 2191 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p," 2192 " port %u\n", addr, i); 2193 xhci_warn(xhci, "Port was marked as USB %u, " 2194 "duplicated as USB %u\n", 2195 xhci->port_array[i], major_revision); 2196 /* Only adjust the roothub port counts if we haven't 2197 * found a similar duplicate. 2198 */ 2199 if (xhci->port_array[i] != major_revision && 2200 xhci->port_array[i] != DUPLICATE_ENTRY) { 2201 if (xhci->port_array[i] == 0x03) 2202 xhci->num_usb3_ports--; 2203 else 2204 xhci->num_usb2_ports--; 2205 xhci->port_array[i] = DUPLICATE_ENTRY; 2206 } 2207 /* FIXME: Should we disable the port? */ 2208 continue; 2209 } 2210 xhci->port_array[i] = major_revision; 2211 if (major_revision == 0x03) 2212 xhci->num_usb3_ports++; 2213 else 2214 xhci->num_usb2_ports++; 2215 } 2216 /* FIXME: Should we disable ports not in the Extended Capabilities? */ 2217 } 2218 2219 /* 2220 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that 2221 * specify what speeds each port is supposed to be. We can't count on the port 2222 * speed bits in the PORTSC register being correct until a device is connected, 2223 * but we need to set up the two fake roothubs with the correct number of USB 2224 * 3.0 and USB 2.0 ports at host controller initialization time. 2225 */ 2226 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) 2227 { 2228 void __iomem *base; 2229 u32 offset; 2230 unsigned int num_ports; 2231 int i, j, port_index; 2232 int cap_count = 0; 2233 u32 cap_start; 2234 2235 num_ports = HCS_MAX_PORTS(xhci->hcs_params1); 2236 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags); 2237 if (!xhci->port_array) 2238 return -ENOMEM; 2239 2240 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags); 2241 if (!xhci->rh_bw) 2242 return -ENOMEM; 2243 for (i = 0; i < num_ports; i++) { 2244 struct xhci_interval_bw_table *bw_table; 2245 2246 INIT_LIST_HEAD(&xhci->rh_bw[i].tts); 2247 bw_table = &xhci->rh_bw[i].bw_table; 2248 for (j = 0; j < XHCI_MAX_INTERVAL; j++) 2249 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); 2250 } 2251 base = &xhci->cap_regs->hc_capbase; 2252 2253 cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL); 2254 if (!cap_start) { 2255 xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n"); 2256 return -ENODEV; 2257 } 2258 2259 offset = cap_start; 2260 /* count extended protocol capability entries for later caching */ 2261 while (offset) { 2262 cap_count++; 2263 offset = xhci_find_next_ext_cap(base, offset, 2264 XHCI_EXT_CAPS_PROTOCOL); 2265 } 2266 2267 xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags); 2268 if (!xhci->ext_caps) 2269 return -ENOMEM; 2270 2271 offset = cap_start; 2272 2273 while (offset) { 2274 xhci_add_in_port(xhci, num_ports, base + offset, cap_count); 2275 if (xhci->num_usb2_ports + xhci->num_usb3_ports == num_ports) 2276 break; 2277 offset = xhci_find_next_ext_cap(base, offset, 2278 XHCI_EXT_CAPS_PROTOCOL); 2279 } 2280 2281 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) { 2282 xhci_warn(xhci, "No ports on the roothubs?\n"); 2283 return -ENODEV; 2284 } 2285 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2286 "Found %u USB 2.0 ports and %u USB 3.0 ports.", 2287 xhci->num_usb2_ports, xhci->num_usb3_ports); 2288 2289 /* Place limits on the number of roothub ports so that the hub 2290 * descriptors aren't longer than the USB core will allocate. 2291 */ 2292 if (xhci->num_usb3_ports > USB_SS_MAXPORTS) { 2293 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2294 "Limiting USB 3.0 roothub ports to %u.", 2295 USB_SS_MAXPORTS); 2296 xhci->num_usb3_ports = USB_SS_MAXPORTS; 2297 } 2298 if (xhci->num_usb2_ports > USB_MAXCHILDREN) { 2299 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2300 "Limiting USB 2.0 roothub ports to %u.", 2301 USB_MAXCHILDREN); 2302 xhci->num_usb2_ports = USB_MAXCHILDREN; 2303 } 2304 2305 /* 2306 * Note we could have all USB 3.0 ports, or all USB 2.0 ports. 2307 * Not sure how the USB core will handle a hub with no ports... 2308 */ 2309 if (xhci->num_usb2_ports) { 2310 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)* 2311 xhci->num_usb2_ports, flags); 2312 if (!xhci->usb2_ports) 2313 return -ENOMEM; 2314 2315 port_index = 0; 2316 for (i = 0; i < num_ports; i++) { 2317 if (xhci->port_array[i] == 0x03 || 2318 xhci->port_array[i] == 0 || 2319 xhci->port_array[i] == DUPLICATE_ENTRY) 2320 continue; 2321 2322 xhci->usb2_ports[port_index] = 2323 &xhci->op_regs->port_status_base + 2324 NUM_PORT_REGS*i; 2325 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2326 "USB 2.0 port at index %u, " 2327 "addr = %p", i, 2328 xhci->usb2_ports[port_index]); 2329 port_index++; 2330 if (port_index == xhci->num_usb2_ports) 2331 break; 2332 } 2333 } 2334 if (xhci->num_usb3_ports) { 2335 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)* 2336 xhci->num_usb3_ports, flags); 2337 if (!xhci->usb3_ports) 2338 return -ENOMEM; 2339 2340 port_index = 0; 2341 for (i = 0; i < num_ports; i++) 2342 if (xhci->port_array[i] == 0x03) { 2343 xhci->usb3_ports[port_index] = 2344 &xhci->op_regs->port_status_base + 2345 NUM_PORT_REGS*i; 2346 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2347 "USB 3.0 port at index %u, " 2348 "addr = %p", i, 2349 xhci->usb3_ports[port_index]); 2350 port_index++; 2351 if (port_index == xhci->num_usb3_ports) 2352 break; 2353 } 2354 } 2355 return 0; 2356 } 2357 2358 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 2359 { 2360 dma_addr_t dma; 2361 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 2362 unsigned int val, val2; 2363 u64 val_64; 2364 u32 page_size, temp; 2365 int i, ret; 2366 2367 INIT_LIST_HEAD(&xhci->cmd_list); 2368 2369 /* init command timeout work */ 2370 INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); 2371 init_completion(&xhci->cmd_ring_stop_completion); 2372 2373 page_size = readl(&xhci->op_regs->page_size); 2374 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2375 "Supported page size register = 0x%x", page_size); 2376 for (i = 0; i < 16; i++) { 2377 if ((0x1 & page_size) != 0) 2378 break; 2379 page_size = page_size >> 1; 2380 } 2381 if (i < 16) 2382 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2383 "Supported page size of %iK", (1 << (i+12)) / 1024); 2384 else 2385 xhci_warn(xhci, "WARN: no supported page size\n"); 2386 /* Use 4K pages, since that's common and the minimum the HC supports */ 2387 xhci->page_shift = 12; 2388 xhci->page_size = 1 << xhci->page_shift; 2389 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2390 "HCD page size set to %iK", xhci->page_size / 1024); 2391 2392 /* 2393 * Program the Number of Device Slots Enabled field in the CONFIG 2394 * register with the max value of slots the HC can handle. 2395 */ 2396 val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1)); 2397 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2398 "// xHC can handle at most %d device slots.", val); 2399 val2 = readl(&xhci->op_regs->config_reg); 2400 val |= (val2 & ~HCS_SLOTS_MASK); 2401 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2402 "// Setting Max device slots reg = 0x%x.", val); 2403 writel(val, &xhci->op_regs->config_reg); 2404 2405 /* 2406 * xHCI section 5.4.6 - doorbell array must be 2407 * "physically contiguous and 64-byte (cache line) aligned". 2408 */ 2409 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, 2410 flags); 2411 if (!xhci->dcbaa) 2412 goto fail; 2413 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); 2414 xhci->dcbaa->dma = dma; 2415 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2416 "// Device context base array address = 0x%llx (DMA), %p (virt)", 2417 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); 2418 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); 2419 2420 /* 2421 * Initialize the ring segment pool. The ring must be a contiguous 2422 * structure comprised of TRBs. The TRBs must be 16 byte aligned, 2423 * however, the command ring segment needs 64-byte aligned segments 2424 * and our use of dma addresses in the trb_address_map radix tree needs 2425 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need. 2426 */ 2427 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 2428 TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size); 2429 2430 /* See Table 46 and Note on Figure 55 */ 2431 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 2432 2112, 64, xhci->page_size); 2433 if (!xhci->segment_pool || !xhci->device_pool) 2434 goto fail; 2435 2436 /* Linear stream context arrays don't have any boundary restrictions, 2437 * and only need to be 16-byte aligned. 2438 */ 2439 xhci->small_streams_pool = 2440 dma_pool_create("xHCI 256 byte stream ctx arrays", 2441 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); 2442 xhci->medium_streams_pool = 2443 dma_pool_create("xHCI 1KB stream ctx arrays", 2444 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); 2445 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE 2446 * will be allocated with dma_alloc_coherent() 2447 */ 2448 2449 if (!xhci->small_streams_pool || !xhci->medium_streams_pool) 2450 goto fail; 2451 2452 /* Set up the command ring to have one segments for now. */ 2453 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags); 2454 if (!xhci->cmd_ring) 2455 goto fail; 2456 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2457 "Allocated command ring at %p", xhci->cmd_ring); 2458 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx", 2459 (unsigned long long)xhci->cmd_ring->first_seg->dma); 2460 2461 /* Set the address in the Command Ring Control register */ 2462 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 2463 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 2464 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | 2465 xhci->cmd_ring->cycle_state; 2466 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2467 "// Setting command ring address to 0x%016llx", val_64); 2468 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 2469 2470 xhci->lpm_command = xhci_alloc_command_with_ctx(xhci, true, flags); 2471 if (!xhci->lpm_command) 2472 goto fail; 2473 2474 /* Reserve one command ring TRB for disabling LPM. 2475 * Since the USB core grabs the shared usb_bus bandwidth mutex before 2476 * disabling LPM, we only need to reserve one TRB for all devices. 2477 */ 2478 xhci->cmd_ring_reserved_trbs++; 2479 2480 val = readl(&xhci->cap_regs->db_off); 2481 val &= DBOFF_MASK; 2482 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2483 "// Doorbell array is located at offset 0x%x" 2484 " from cap regs base addr", val); 2485 xhci->dba = (void __iomem *) xhci->cap_regs + val; 2486 /* Set ir_set to interrupt register set 0 */ 2487 xhci->ir_set = &xhci->run_regs->ir_set[0]; 2488 2489 /* 2490 * Event ring setup: Allocate a normal ring, but also setup 2491 * the event ring segment table (ERST). Section 4.9.3. 2492 */ 2493 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring"); 2494 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 2495 0, flags); 2496 if (!xhci->event_ring) 2497 goto fail; 2498 if (xhci_check_trb_in_td_math(xhci) < 0) 2499 goto fail; 2500 2501 ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags); 2502 if (ret) 2503 goto fail; 2504 2505 /* set ERST count with the number of entries in the segment table */ 2506 val = readl(&xhci->ir_set->erst_size); 2507 val &= ERST_SIZE_MASK; 2508 val |= ERST_NUM_SEGS; 2509 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2510 "// Write ERST size = %i to ir_set 0 (some bits preserved)", 2511 val); 2512 writel(val, &xhci->ir_set->erst_size); 2513 2514 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2515 "// Set ERST entries to point to event ring."); 2516 /* set the segment table base address */ 2517 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2518 "// Set ERST base address for ir_set 0 = 0x%llx", 2519 (unsigned long long)xhci->erst.erst_dma_addr); 2520 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); 2521 val_64 &= ERST_PTR_MASK; 2522 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); 2523 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); 2524 2525 /* Set the event ring dequeue address */ 2526 xhci_set_hc_event_deq(xhci); 2527 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2528 "Wrote ERST address to ir_set 0."); 2529 2530 /* 2531 * XXX: Might need to set the Interrupter Moderation Register to 2532 * something other than the default (~1ms minimum between interrupts). 2533 * See section 5.5.1.2. 2534 */ 2535 for (i = 0; i < MAX_HC_SLOTS; i++) 2536 xhci->devs[i] = NULL; 2537 for (i = 0; i < USB_MAXCHILDREN; i++) { 2538 xhci->bus_state[0].resume_done[i] = 0; 2539 xhci->bus_state[1].resume_done[i] = 0; 2540 /* Only the USB 2.0 completions will ever be used. */ 2541 init_completion(&xhci->bus_state[1].rexit_done[i]); 2542 } 2543 2544 if (scratchpad_alloc(xhci, flags)) 2545 goto fail; 2546 if (xhci_setup_port_arrays(xhci, flags)) 2547 goto fail; 2548 2549 /* Enable USB 3.0 device notifications for function remote wake, which 2550 * is necessary for allowing USB 3.0 devices to do remote wakeup from 2551 * U3 (device suspend). 2552 */ 2553 temp = readl(&xhci->op_regs->dev_notification); 2554 temp &= ~DEV_NOTE_MASK; 2555 temp |= DEV_NOTE_FWAKE; 2556 writel(temp, &xhci->op_regs->dev_notification); 2557 2558 return 0; 2559 2560 fail: 2561 xhci_halt(xhci); 2562 xhci_reset(xhci); 2563 xhci_mem_cleanup(xhci); 2564 return -ENOMEM; 2565 } 2566