1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * EHCI Host Controller Driver (EHCI) 31 * 32 * The EHCI driver is a software driver which interfaces to the Universal 33 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to 34 * the Host Controller is defined by the EHCI Host Controller Interface. 35 * 36 * This module contains the main EHCI driver code which handles all USB 37 * transfers, bandwidth allocations and other general functionalities. 38 */ 39 40 #include <sys/usb/hcd/ehci/ehcid.h> 41 #include <sys/usb/hcd/ehci/ehci_isoch.h> 42 #include <sys/usb/hcd/ehci/ehci_xfer.h> 43 44 /* Pointer to the state structure */ 45 extern void *ehci_statep; 46 47 extern void ehci_handle_endpoint_reclaimation(ehci_state_t *); 48 49 extern uint_t ehci_vt62x2_workaround; 50 51 /* Adjustable variables for the size of the pools */ 52 int ehci_qh_pool_size = EHCI_QH_POOL_SIZE; 53 int ehci_qtd_pool_size = EHCI_QTD_POOL_SIZE; 54 55 /* 56 * Initialize the values which the order of 32ms intr qh are executed 57 * by the host controller in the lattice tree. 58 */ 59 static uchar_t ehci_index[EHCI_NUM_INTR_QH_LISTS] = 60 {0x00, 0x10, 0x08, 0x18, 61 0x04, 0x14, 0x0c, 0x1c, 62 0x02, 0x12, 0x0a, 0x1a, 63 0x06, 0x16, 0x0e, 0x1e, 64 0x01, 0x11, 0x09, 0x19, 65 0x05, 0x15, 0x0d, 0x1d, 66 0x03, 0x13, 0x0b, 0x1b, 67 0x07, 0x17, 0x0f, 0x1f}; 68 69 /* 70 * Initialize the values which are used to calculate start split mask 71 * for the low/full/high speed interrupt and isochronous endpoints. 72 */ 73 static uint_t ehci_start_split_mask[15] = { 74 /* 75 * For high/full/low speed usb devices. For high speed 76 * device with polling interval greater than or equal 77 * to 8us (125us). 78 */ 79 0x01, /* 00000001 */ 80 0x02, /* 00000010 */ 81 0x04, /* 00000100 */ 82 0x08, /* 00001000 */ 83 0x10, /* 00010000 */ 84 0x20, /* 00100000 */ 85 0x40, /* 01000000 */ 86 0x80, /* 10000000 */ 87 88 /* Only for high speed devices with polling interval 4us */ 89 0x11, /* 00010001 */ 90 0x22, /* 00100010 */ 91 0x44, /* 01000100 */ 92 0x88, /* 10001000 */ 93 94 /* Only for high speed devices with polling interval 2us */ 95 0x55, /* 01010101 */ 96 0xaa, /* 10101010 */ 97 98 /* Only for high speed devices with polling interval 1us */ 99 0xff /* 11111111 */ 100 }; 101 102 /* 103 * Initialize the values which are used to calculate complete split mask 104 * for the low/full speed interrupt and isochronous endpoints. 105 */ 106 static uint_t ehci_intr_complete_split_mask[7] = { 107 /* Only full/low speed devices */ 108 0x1c, /* 00011100 */ 109 0x38, /* 00111000 */ 110 0x70, /* 01110000 */ 111 0xe0, /* 11100000 */ 112 0x00, /* Need FSTN feature */ 113 0x00, /* Need FSTN feature */ 114 0x00 /* Need FSTN feature */ 115 }; 116 117 118 /* 119 * EHCI Internal Function Prototypes 120 */ 121 122 /* Host Controller Driver (HCD) initialization functions */ 123 void ehci_set_dma_attributes(ehci_state_t *ehcip); 124 int ehci_allocate_pools(ehci_state_t *ehcip); 125 void ehci_decode_ddi_dma_addr_bind_handle_result( 126 ehci_state_t *ehcip, 127 int result); 128 int ehci_map_regs(ehci_state_t *ehcip); 129 int ehci_register_intrs_and_init_mutex( 130 ehci_state_t *ehcip); 131 int ehci_init_ctlr(ehci_state_t *ehcip); 132 static int ehci_take_control(ehci_state_t *ehcip); 133 static int ehci_init_periodic_frame_lst_table( 134 ehci_state_t *ehcip); 135 static void ehci_build_interrupt_lattice( 136 ehci_state_t *ehcip); 137 usba_hcdi_ops_t *ehci_alloc_hcdi_ops(ehci_state_t *ehcip); 138 139 /* Host Controller Driver (HCD) deinitialization functions */ 140 int ehci_cleanup(ehci_state_t *ehcip); 141 int ehci_cpr_suspend(ehci_state_t *ehcip); 142 int ehci_cpr_resume(ehci_state_t *ehcip); 143 144 /* Bandwidth Allocation functions */ 145 int ehci_allocate_bandwidth(ehci_state_t *ehcip, 146 usba_pipe_handle_data_t *ph, 147 uint_t *pnode, 148 uchar_t *smask, 149 uchar_t *cmask); 150 static int ehci_allocate_high_speed_bandwidth( 151 ehci_state_t *ehcip, 152 usba_pipe_handle_data_t *ph, 153 uint_t *hnode, 154 uchar_t *smask, 155 uchar_t *cmask); 156 static int ehci_allocate_classic_tt_bandwidth( 157 ehci_state_t *ehcip, 158 usba_pipe_handle_data_t *ph, 159 uint_t pnode); 160 void ehci_deallocate_bandwidth(ehci_state_t *ehcip, 161 usba_pipe_handle_data_t *ph, 162 uint_t pnode, 163 uchar_t smask, 164 uchar_t cmask); 165 static void ehci_deallocate_high_speed_bandwidth( 166 ehci_state_t *ehcip, 167 usba_pipe_handle_data_t *ph, 168 uint_t hnode, 169 uchar_t smask, 170 uchar_t cmask); 171 static void ehci_deallocate_classic_tt_bandwidth( 172 ehci_state_t *ehcip, 173 usba_pipe_handle_data_t *ph, 174 uint_t pnode); 175 static int ehci_compute_high_speed_bandwidth( 176 ehci_state_t *ehcip, 177 usb_ep_descr_t *endpoint, 178 usb_port_status_t port_status, 179 uint_t *sbandwidth, 180 uint_t *cbandwidth); 181 static int ehci_compute_classic_bandwidth( 182 usb_ep_descr_t *endpoint, 183 usb_port_status_t port_status, 184 uint_t *bandwidth); 185 int ehci_adjust_polling_interval( 186 ehci_state_t *ehcip, 187 usb_ep_descr_t *endpoint, 188 usb_port_status_t port_status); 189 static int ehci_adjust_high_speed_polling_interval( 190 ehci_state_t *ehcip, 191 usb_ep_descr_t *endpoint); 192 static uint_t ehci_lattice_height(uint_t interval); 193 static uint_t ehci_lattice_parent(uint_t node); 194 static uint_t ehci_find_periodic_node( 195 uint_t leaf, 196 int interval); 197 static uint_t ehci_leftmost_leaf(uint_t node, 198 uint_t height); 199 static uint_t ehci_pow_2(uint_t x); 200 static uint_t ehci_log_2(uint_t x); 201 static int ehci_find_bestfit_hs_mask( 202 ehci_state_t *ehcip, 203 uchar_t *smask, 204 uint_t *pnode, 205 usb_ep_descr_t *endpoint, 206 uint_t bandwidth, 207 int interval); 208 static int ehci_find_bestfit_ls_intr_mask( 209 ehci_state_t *ehcip, 210 uchar_t *smask, 211 uchar_t *cmask, 212 uint_t *pnode, 213 uint_t sbandwidth, 214 uint_t cbandwidth, 215 int interval); 216 static int ehci_find_bestfit_sitd_in_mask( 217 ehci_state_t *ehcip, 218 uchar_t *smask, 219 uchar_t *cmask, 220 uint_t *pnode, 221 uint_t sbandwidth, 222 uint_t cbandwidth, 223 int interval); 224 static int ehci_find_bestfit_sitd_out_mask( 225 ehci_state_t *ehcip, 226 uchar_t *smask, 227 uint_t *pnode, 228 uint_t sbandwidth, 229 int interval); 230 static uint_t ehci_calculate_bw_availability_mask( 231 ehci_state_t *ehcip, 232 uint_t bandwidth, 233 int leaf, 234 int leaf_count, 235 uchar_t *bw_mask); 236 static void ehci_update_bw_availability( 237 ehci_state_t *ehcip, 238 int bandwidth, 239 int leftmost_leaf, 240 int leaf_count, 241 uchar_t mask); 242 243 /* Miscellaneous functions */ 244 ehci_state_t *ehci_obtain_state( 245 dev_info_t *dip); 246 int ehci_state_is_operational( 247 ehci_state_t *ehcip); 248 int ehci_do_soft_reset( 249 ehci_state_t *ehcip); 250 usb_req_attrs_t ehci_get_xfer_attrs(ehci_state_t *ehcip, 251 ehci_pipe_private_t *pp, 252 ehci_trans_wrapper_t *tw); 253 usb_frame_number_t ehci_get_current_frame_number( 254 ehci_state_t *ehcip); 255 static void ehci_cpr_cleanup( 256 ehci_state_t *ehcip); 257 int ehci_wait_for_sof( 258 ehci_state_t *ehcip); 259 void ehci_toggle_scheduler( 260 ehci_state_t *ehcip); 261 void ehci_print_caps(ehci_state_t *ehcip); 262 void ehci_print_regs(ehci_state_t *ehcip); 263 void ehci_print_qh(ehci_state_t *ehcip, 264 ehci_qh_t *qh); 265 void ehci_print_qtd(ehci_state_t *ehcip, 266 ehci_qtd_t *qtd); 267 void ehci_create_stats(ehci_state_t *ehcip); 268 void ehci_destroy_stats(ehci_state_t *ehcip); 269 void ehci_do_intrs_stats(ehci_state_t *ehcip, 270 int val); 271 void ehci_do_byte_stats(ehci_state_t *ehcip, 272 size_t len, 273 uint8_t attr, 274 uint8_t addr); 275 276 /* 277 * Host Controller Driver (HCD) initialization functions 278 */ 279 280 /* 281 * ehci_set_dma_attributes: 282 * 283 * Set the limits in the DMA attributes structure. Most of the values used 284 * in the DMA limit structures are the default values as specified by the 285 * Writing PCI device drivers document. 286 */ 287 void 288 ehci_set_dma_attributes(ehci_state_t *ehcip) 289 { 290 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 291 "ehci_set_dma_attributes:"); 292 293 /* Initialize the DMA attributes */ 294 ehcip->ehci_dma_attr.dma_attr_version = DMA_ATTR_V0; 295 ehcip->ehci_dma_attr.dma_attr_addr_lo = 0x00000000ull; 296 ehcip->ehci_dma_attr.dma_attr_addr_hi = 0xfffffffeull; 297 298 /* 32 bit addressing */ 299 ehcip->ehci_dma_attr.dma_attr_count_max = EHCI_DMA_ATTR_COUNT_MAX; 300 301 /* Byte alignment */ 302 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 303 304 /* 305 * Since PCI specification is byte alignment, the 306 * burst size field should be set to 1 for PCI devices. 307 */ 308 ehcip->ehci_dma_attr.dma_attr_burstsizes = 0x1; 309 310 ehcip->ehci_dma_attr.dma_attr_minxfer = 0x1; 311 ehcip->ehci_dma_attr.dma_attr_maxxfer = EHCI_DMA_ATTR_MAX_XFER; 312 ehcip->ehci_dma_attr.dma_attr_seg = 0xffffffffull; 313 ehcip->ehci_dma_attr.dma_attr_sgllen = 1; 314 ehcip->ehci_dma_attr.dma_attr_granular = EHCI_DMA_ATTR_GRANULAR; 315 ehcip->ehci_dma_attr.dma_attr_flags = 0; 316 } 317 318 319 /* 320 * ehci_allocate_pools: 321 * 322 * Allocate the system memory for the Endpoint Descriptor (QH) and for the 323 * Transfer Descriptor (QTD) pools. Both QH and QTD structures must be aligned 324 * to a 16 byte boundary. 325 */ 326 int 327 ehci_allocate_pools(ehci_state_t *ehcip) 328 { 329 ddi_device_acc_attr_t dev_attr; 330 size_t real_length; 331 int result; 332 uint_t ccount; 333 int i; 334 335 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 336 "ehci_allocate_pools:"); 337 338 /* The host controller will be little endian */ 339 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 340 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 341 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 342 343 /* Byte alignment */ 344 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_TD_QH_ALIGNMENT; 345 346 /* Allocate the QTD pool DMA handle */ 347 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr, 348 DDI_DMA_SLEEP, 0, 349 &ehcip->ehci_qtd_pool_dma_handle) != DDI_SUCCESS) { 350 351 goto failure; 352 } 353 354 /* Allocate the memory for the QTD pool */ 355 if (ddi_dma_mem_alloc(ehcip->ehci_qtd_pool_dma_handle, 356 ehci_qtd_pool_size * sizeof (ehci_qtd_t), 357 &dev_attr, 358 DDI_DMA_CONSISTENT, 359 DDI_DMA_SLEEP, 360 0, 361 (caddr_t *)&ehcip->ehci_qtd_pool_addr, 362 &real_length, 363 &ehcip->ehci_qtd_pool_mem_handle)) { 364 365 goto failure; 366 } 367 368 /* Map the QTD pool into the I/O address space */ 369 result = ddi_dma_addr_bind_handle( 370 ehcip->ehci_qtd_pool_dma_handle, 371 NULL, 372 (caddr_t)ehcip->ehci_qtd_pool_addr, 373 real_length, 374 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 375 DDI_DMA_SLEEP, 376 NULL, 377 &ehcip->ehci_qtd_pool_cookie, 378 &ccount); 379 380 bzero((void *)ehcip->ehci_qtd_pool_addr, 381 ehci_qtd_pool_size * sizeof (ehci_qtd_t)); 382 383 /* Process the result */ 384 if (result == DDI_DMA_MAPPED) { 385 /* The cookie count should be 1 */ 386 if (ccount != 1) { 387 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 388 "ehci_allocate_pools: More than 1 cookie"); 389 390 goto failure; 391 } 392 } else { 393 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 394 "ehci_allocate_pools: Result = %d", result); 395 396 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 397 398 goto failure; 399 } 400 401 /* 402 * DMA addresses for QTD pools are bound 403 */ 404 ehcip->ehci_dma_addr_bind_flag |= EHCI_QTD_POOL_BOUND; 405 406 /* Initialize the QTD pool */ 407 for (i = 0; i < ehci_qtd_pool_size; i ++) { 408 Set_QTD(ehcip->ehci_qtd_pool_addr[i]. 409 qtd_state, EHCI_QTD_FREE); 410 } 411 412 /* Allocate the QTD pool DMA handle */ 413 if (ddi_dma_alloc_handle(ehcip->ehci_dip, 414 &ehcip->ehci_dma_attr, 415 DDI_DMA_SLEEP, 416 0, 417 &ehcip->ehci_qh_pool_dma_handle) != DDI_SUCCESS) { 418 419 goto failure; 420 } 421 422 /* Allocate the memory for the QH pool */ 423 if (ddi_dma_mem_alloc(ehcip->ehci_qh_pool_dma_handle, 424 ehci_qh_pool_size * sizeof (ehci_qh_t), 425 &dev_attr, 426 DDI_DMA_CONSISTENT, 427 DDI_DMA_SLEEP, 428 0, 429 (caddr_t *)&ehcip->ehci_qh_pool_addr, 430 &real_length, 431 &ehcip->ehci_qh_pool_mem_handle) != DDI_SUCCESS) { 432 433 goto failure; 434 } 435 436 result = ddi_dma_addr_bind_handle(ehcip->ehci_qh_pool_dma_handle, 437 NULL, 438 (caddr_t)ehcip->ehci_qh_pool_addr, 439 real_length, 440 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 441 DDI_DMA_SLEEP, 442 NULL, 443 &ehcip->ehci_qh_pool_cookie, 444 &ccount); 445 446 bzero((void *)ehcip->ehci_qh_pool_addr, 447 ehci_qh_pool_size * sizeof (ehci_qh_t)); 448 449 /* Process the result */ 450 if (result == DDI_DMA_MAPPED) { 451 /* The cookie count should be 1 */ 452 if (ccount != 1) { 453 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 454 "ehci_allocate_pools: More than 1 cookie"); 455 456 goto failure; 457 } 458 } else { 459 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 460 461 goto failure; 462 } 463 464 /* 465 * DMA addresses for QH pools are bound 466 */ 467 ehcip->ehci_dma_addr_bind_flag |= EHCI_QH_POOL_BOUND; 468 469 /* Initialize the QH pool */ 470 for (i = 0; i < ehci_qh_pool_size; i ++) { 471 Set_QH(ehcip->ehci_qh_pool_addr[i].qh_state, EHCI_QH_FREE); 472 } 473 474 /* Byte alignment */ 475 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 476 477 return (DDI_SUCCESS); 478 479 failure: 480 /* Byte alignment */ 481 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 482 483 return (DDI_FAILURE); 484 } 485 486 487 /* 488 * ehci_decode_ddi_dma_addr_bind_handle_result: 489 * 490 * Process the return values of ddi_dma_addr_bind_handle() 491 */ 492 void 493 ehci_decode_ddi_dma_addr_bind_handle_result( 494 ehci_state_t *ehcip, 495 int result) 496 { 497 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 498 "ehci_decode_ddi_dma_addr_bind_handle_result:"); 499 500 switch (result) { 501 case DDI_DMA_PARTIAL_MAP: 502 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 503 "Partial transfers not allowed"); 504 break; 505 case DDI_DMA_INUSE: 506 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 507 "Handle is in use"); 508 break; 509 case DDI_DMA_NORESOURCES: 510 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 511 "No resources"); 512 break; 513 case DDI_DMA_NOMAPPING: 514 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 515 "No mapping"); 516 break; 517 case DDI_DMA_TOOBIG: 518 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 519 "Object is too big"); 520 break; 521 default: 522 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 523 "Unknown dma error"); 524 } 525 } 526 527 528 /* 529 * ehci_map_regs: 530 * 531 * The Host Controller (HC) contains a set of on-chip operational registers 532 * and which should be mapped into a non-cacheable portion of the system 533 * addressable space. 534 */ 535 int 536 ehci_map_regs(ehci_state_t *ehcip) 537 { 538 ddi_device_acc_attr_t attr; 539 uint16_t cmd_reg; 540 uint_t length; 541 542 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_map_regs:"); 543 544 /* Check to make sure we have memory access */ 545 if (pci_config_setup(ehcip->ehci_dip, 546 &ehcip->ehci_config_handle) != DDI_SUCCESS) { 547 548 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 549 "ehci_map_regs: Config error"); 550 551 return (DDI_FAILURE); 552 } 553 554 /* Make sure Memory Access Enable is set */ 555 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM); 556 557 if (!(cmd_reg & PCI_COMM_MAE)) { 558 559 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 560 "ehci_map_regs: Memory base address access disabled"); 561 562 return (DDI_FAILURE); 563 } 564 565 /* The host controller will be little endian */ 566 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 567 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 568 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 569 570 /* Map in EHCI Capability registers */ 571 if (ddi_regs_map_setup(ehcip->ehci_dip, 1, 572 (caddr_t *)&ehcip->ehci_capsp, 0, 573 sizeof (ehci_caps_t), &attr, 574 &ehcip->ehci_caps_handle) != DDI_SUCCESS) { 575 576 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 577 "ehci_map_regs: Map setup error"); 578 579 return (DDI_FAILURE); 580 } 581 582 length = ddi_get8(ehcip->ehci_caps_handle, 583 (uint8_t *)&ehcip->ehci_capsp->ehci_caps_length); 584 585 /* Free the original mapping */ 586 ddi_regs_map_free(&ehcip->ehci_caps_handle); 587 588 /* Re-map in EHCI Capability and Operational registers */ 589 if (ddi_regs_map_setup(ehcip->ehci_dip, 1, 590 (caddr_t *)&ehcip->ehci_capsp, 0, 591 length + sizeof (ehci_regs_t), &attr, 592 &ehcip->ehci_caps_handle) != DDI_SUCCESS) { 593 594 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 595 "ehci_map_regs: Map setup error"); 596 597 return (DDI_FAILURE); 598 } 599 600 /* Get the pointer to EHCI Operational Register */ 601 ehcip->ehci_regsp = (ehci_regs_t *) 602 ((uintptr_t)ehcip->ehci_capsp + length); 603 604 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 605 "ehci_map_regs: Capsp 0x%p Regsp 0x%p\n", 606 ehcip->ehci_capsp, ehcip->ehci_regsp); 607 608 return (DDI_SUCCESS); 609 } 610 611 612 /* 613 * ehci_register_intrs_and_init_mutex: 614 * 615 * Register interrupts and initialize each mutex and condition variables 616 */ 617 int 618 ehci_register_intrs_and_init_mutex(ehci_state_t *ehcip) 619 { 620 int type, count = 0, actual, ret; 621 622 #if defined(__x86) 623 uint8_t iline; 624 #endif 625 626 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 627 "ehci_register_intrs_and_init_mutex:"); 628 629 #if defined(__x86) 630 /* 631 * Make sure that the interrupt pin is connected to the 632 * interrupt controller on x86. Interrupt line 255 means 633 * "unknown" or "not connected" (PCI spec 6.2.4, footnote 43). 634 */ 635 iline = pci_config_get8(ehcip->ehci_config_handle, 636 PCI_CONF_ILINE); 637 638 if (iline == 255) { 639 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 640 "ehci_register_intrs_and_init_mutex: " 641 "interrupt line value out of range (%d)", 642 iline); 643 644 return (DDI_FAILURE); 645 } 646 #endif /* __x86 */ 647 648 ret = ddi_intr_get_supported_types(ehcip->ehci_dip, &type); 649 650 if ((ret != DDI_SUCCESS) || (!(type & DDI_INTR_TYPE_FIXED))) { 651 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 652 "ehci_register_intrs_and_init_mutex: " 653 "Fixed type interrupt is not supported"); 654 655 return (DDI_FAILURE); 656 } 657 658 ret = ddi_intr_get_nintrs(ehcip->ehci_dip, DDI_INTR_TYPE_FIXED, &count); 659 660 /* 661 * Fixed interrupts can only have one interrupt. Check to make 662 * sure that number of supported interrupts and number of 663 * available interrupts are both equal to 1. 664 */ 665 if ((ret != DDI_SUCCESS) || (count != 1)) { 666 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 667 "ehci_register_intrs_and_init_mutex: " 668 "no fixed interrupts"); 669 670 return (DDI_FAILURE); 671 } 672 673 ehcip->ehci_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP); 674 ret = ddi_intr_alloc(ehcip->ehci_dip, ehcip->ehci_htable, 675 DDI_INTR_TYPE_FIXED, 0, count, &actual, 0); 676 677 if ((ret != DDI_SUCCESS) || (actual != 1)) { 678 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 679 "ehci_register_intrs_and_init_mutex: " 680 "ddi_intr_alloc() failed 0x%x", ret); 681 682 kmem_free(ehcip->ehci_htable, sizeof (ddi_intr_handle_t)); 683 684 return (DDI_FAILURE); 685 } 686 687 /* Sanity check that count and avail are the same. */ 688 ASSERT(count == actual); 689 690 if (ddi_intr_get_pri(ehcip->ehci_htable[0], &ehcip->ehci_intr_pri)) { 691 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 692 "ehci_register_intrs_and_init_mutex: " 693 "ddi_intr_get_pri() failed"); 694 695 (void) ddi_intr_free(ehcip->ehci_htable[0]); 696 kmem_free(ehcip->ehci_htable, sizeof (ddi_intr_handle_t)); 697 698 return (DDI_FAILURE); 699 } 700 701 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 702 "Supported Interrupt priority = 0x%x", ehcip->ehci_intr_pri); 703 704 /* Test for high level mutex */ 705 if (ehcip->ehci_intr_pri >= ddi_intr_get_hilevel_pri()) { 706 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 707 "ehci_register_intrs_and_init_mutex: " 708 "Hi level interrupt not supported"); 709 710 (void) ddi_intr_free(ehcip->ehci_htable[0]); 711 kmem_free(ehcip->ehci_htable, sizeof (ddi_intr_handle_t)); 712 713 return (DDI_FAILURE); 714 } 715 716 /* Initialize the mutex */ 717 mutex_init(&ehcip->ehci_int_mutex, NULL, MUTEX_DRIVER, 718 (void *)(uintptr_t)ehcip->ehci_intr_pri); 719 720 if (ddi_intr_add_handler(ehcip->ehci_htable[0], 721 (ddi_intr_handler_t *)ehci_intr, (caddr_t)ehcip, NULL) != 722 DDI_SUCCESS) { 723 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 724 "ehci_register_intrs_and_init_mutex: " 725 "ddi_intr_add_handler() failed"); 726 727 mutex_destroy(&ehcip->ehci_int_mutex); 728 (void) ddi_intr_free(ehcip->ehci_htable[0]); 729 kmem_free(ehcip->ehci_htable, sizeof (ddi_intr_handle_t)); 730 731 return (DDI_FAILURE); 732 } 733 734 if (ddi_intr_enable(ehcip->ehci_htable[0]) != DDI_SUCCESS) { 735 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 736 "ehci_register_intrs_and_init_mutex: " 737 "ddi_intr_enable() failed"); 738 739 (void) ddi_intr_remove_handler(ehcip->ehci_htable[0]); 740 mutex_destroy(&ehcip->ehci_int_mutex); 741 (void) ddi_intr_free(ehcip->ehci_htable[0]); 742 kmem_free(ehcip->ehci_htable, sizeof (ddi_intr_handle_t)); 743 744 return (DDI_FAILURE); 745 } 746 747 /* Create prototype for advance on async schedule */ 748 cv_init(&ehcip->ehci_async_schedule_advance_cv, 749 NULL, CV_DRIVER, NULL); 750 751 return (DDI_SUCCESS); 752 } 753 754 755 /* 756 * ehci_init_ctlr: 757 * 758 * Initialize the Host Controller (HC). 759 */ 760 int 761 ehci_init_ctlr(ehci_state_t *ehcip) 762 { 763 int revision; 764 uint16_t cmd_reg; 765 clock_t sof_time_wait; 766 int abort_on_BIOS_take_over_failure; 767 768 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_init_ctlr:"); 769 770 /* Take control from the BIOS */ 771 if (ehci_take_control(ehcip) != USB_SUCCESS) { 772 773 /* read .conf file properties */ 774 abort_on_BIOS_take_over_failure = 775 ddi_prop_get_int(DDI_DEV_T_ANY, 776 ehcip->ehci_dip, DDI_PROP_DONTPASS, 777 "abort-on-BIOS-take-over-failure", 0); 778 779 if (abort_on_BIOS_take_over_failure) { 780 781 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 782 "Unable to take control from BIOS."); 783 784 return (DDI_FAILURE); 785 } 786 787 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 788 "Unable to take control from BIOS. Failure is ignored."); 789 } 790 791 /* set Memory Master Enable */ 792 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM); 793 cmd_reg |= (PCI_COMM_MAE | PCI_COMM_ME); 794 pci_config_put16(ehcip->ehci_config_handle, PCI_CONF_COMM, cmd_reg); 795 796 /* Reset the EHCI host controller */ 797 Set_OpReg(ehci_command, 798 Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET); 799 800 /* Wait 10ms for reset to complete */ 801 drv_usecwait(EHCI_RESET_TIMEWAIT); 802 803 ASSERT(Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED); 804 805 /* Verify the version number */ 806 revision = Get_16Cap(ehci_version); 807 808 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 809 "ehci_init_ctlr: Revision 0x%x", revision); 810 811 /* 812 * EHCI driver supports EHCI host controllers compliant to 813 * 0.95 and higher revisions of EHCI specifications. 814 */ 815 if (revision < EHCI_REVISION_0_95) { 816 817 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 818 "Revision 0x%x is not supported", revision); 819 820 return (DDI_FAILURE); 821 } 822 823 if (ehcip->ehci_hc_soft_state == EHCI_CTLR_INIT_STATE) { 824 825 /* Get the ehci chip vendor and device id */ 826 ehcip->ehci_vendor_id = pci_config_get16( 827 ehcip->ehci_config_handle, PCI_CONF_VENID); 828 ehcip->ehci_device_id = pci_config_get16( 829 ehcip->ehci_config_handle, PCI_CONF_DEVID); 830 ehcip->ehci_rev_id = pci_config_get8( 831 ehcip->ehci_config_handle, PCI_CONF_REVID); 832 833 /* Initialize the Frame list base address area */ 834 if (ehci_init_periodic_frame_lst_table(ehcip) != DDI_SUCCESS) { 835 836 return (DDI_FAILURE); 837 } 838 839 /* 840 * For performance reasons, do not insert anything into the 841 * asynchronous list or activate the asynch list schedule until 842 * there is a valid QH. 843 */ 844 ehcip->ehci_head_of_async_sched_list = NULL; 845 846 if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) && 847 (ehci_vt62x2_workaround & EHCI_VIA_ASYNC_SCHEDULE)) { 848 /* 849 * The driver is unable to reliably stop the asynch 850 * list schedule on VIA VT6202 controllers, so we 851 * always keep a dummy QH on the list. 852 */ 853 ehci_qh_t *dummy_async_qh = 854 ehci_alloc_qh(ehcip, NULL, NULL); 855 856 Set_QH(dummy_async_qh->qh_link_ptr, 857 ((ehci_qh_cpu_to_iommu(ehcip, dummy_async_qh) & 858 EHCI_QH_LINK_PTR) | EHCI_QH_LINK_REF_QH)); 859 860 /* Set this QH to be the "head" of the circular list */ 861 Set_QH(dummy_async_qh->qh_ctrl, 862 Get_QH(dummy_async_qh->qh_ctrl) | 863 EHCI_QH_CTRL_RECLAIM_HEAD); 864 865 Set_QH(dummy_async_qh->qh_next_qtd, 866 EHCI_QH_NEXT_QTD_PTR_VALID); 867 Set_QH(dummy_async_qh->qh_alt_next_qtd, 868 EHCI_QH_ALT_NEXT_QTD_PTR_VALID); 869 870 ehcip->ehci_head_of_async_sched_list = dummy_async_qh; 871 ehcip->ehci_open_async_count++; 872 } 873 } 874 875 /* 876 * Check for Asynchronous schedule park capability feature. If this 877 * feature is supported, then, program ehci command register with 878 * appropriate values.. 879 */ 880 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_ASYNC_SCHED_PARK_CAP) { 881 882 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 883 "ehci_init_ctlr: Async park mode is supported"); 884 885 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) | 886 (EHCI_CMD_ASYNC_PARK_ENABLE | 887 EHCI_CMD_ASYNC_PARK_COUNT_3))); 888 } 889 890 /* 891 * Check for programmable periodic frame list feature. If this 892 * feature is supported, then, program ehci command register with 893 * 1024 frame list value. 894 */ 895 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_PROG_FRAME_LIST_FLAG) { 896 897 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 898 "ehci_init_ctlr: Variable programmable periodic " 899 "frame list is supported"); 900 901 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) | 902 EHCI_CMD_FRAME_1024_SIZE)); 903 } 904 905 /* 906 * Currently EHCI driver doesn't support 64 bit addressing. 907 * 908 * If we are using 64 bit addressing capability, then, program 909 * ehci_ctrl_segment register with 4 Gigabyte segment where all 910 * of the interface data structures are allocated. 911 */ 912 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_64BIT_ADDR_CAP) { 913 914 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 915 "ehci_init_ctlr: EHCI driver doesn't support " 916 "64 bit addressing"); 917 } 918 919 /* 64 bit addressing is not support */ 920 Set_OpReg(ehci_ctrl_segment, 0x00000000); 921 922 /* Turn on/off the schedulers */ 923 ehci_toggle_scheduler(ehcip); 924 925 /* 926 * Set the Periodic Frame List Base Address register with the 927 * starting physical address of the Periodic Frame List. 928 */ 929 Set_OpReg(ehci_periodic_list_base, 930 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address & 0xFFFFF000)); 931 932 /* 933 * Set ehci_interrupt to enable all interrupts except Root 934 * Hub Status change interrupt. 935 */ 936 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR | 937 EHCI_INTR_FRAME_LIST_ROLLOVER | EHCI_INTR_USB_ERROR | 938 EHCI_INTR_USB); 939 940 /* 941 * Set the desired interrupt threshold and turn on EHCI host controller. 942 */ 943 Set_OpReg(ehci_command, 944 ((Get_OpReg(ehci_command) & ~EHCI_CMD_INTR_THRESHOLD) | 945 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN))); 946 947 ASSERT(Get_OpReg(ehci_command) & EHCI_CMD_HOST_CTRL_RUN); 948 949 /* 950 * Acer Labs Inc. M5273 EHCI controller does not send 951 * interrupts unless the Root hub ports are routed to the EHCI 952 * host controller; so route the ports now, before we test for 953 * the presence of SOFs interrupts. 954 */ 955 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) { 956 /* Route all Root hub ports to EHCI host controller */ 957 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI); 958 } 959 960 /* 961 * VIA chips have some issues and may not work reliably. 962 * If we were bound using class pciclass,0c0320, 963 * complain, else proceed. This will allow the user 964 * to bind ehci specifically to this chip and not 965 * have the warnings 966 */ 967 if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) && 968 (strcmp(DEVI(ehcip->ehci_dip)->devi_binding_name, 969 "pciclass,0c0320") == 0)) { 970 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 971 "Due to recently discovered incompatibilities"); 972 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 973 "with this USB controller, USB2.x transfer"); 974 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 975 "support has been disabled. This device will"); 976 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 977 "continue to function as a USB1.x controller."); 978 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 979 "If you are interested in enabling USB2.x"); 980 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 981 "support please, refer to the ehci(7D) man page."); 982 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 983 "Please also refer to www.sun.com/io for"); 984 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 985 "Solaris Ready products and to"); 986 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 987 "www.sun.com/bigadmin/hcl for additional"); 988 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 989 "compatible USB products."); 990 991 return (DDI_FAILURE); 992 } else if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) && 993 ehci_vt62x2_workaround) { 994 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 995 "Applying VIA workarounds"); 996 } 997 998 /* 999 * Get the number of clock ticks to wait. 1000 * This is based on the maximum time it takes for a frame list rollover 1001 * and maximum time wait for SOFs to begin. 1002 */ 1003 sof_time_wait = drv_usectohz((EHCI_NUM_PERIODIC_FRAME_LISTS * 1000) + 1004 EHCI_SOF_TIMEWAIT); 1005 1006 /* Tell the ISR to broadcast ehci_async_schedule_advance_cv */ 1007 ehcip->ehci_flags |= EHCI_CV_INTR; 1008 1009 /* We need to add a delay to allow the chip time to start running */ 1010 (void) cv_timedwait(&ehcip->ehci_async_schedule_advance_cv, 1011 &ehcip->ehci_int_mutex, ddi_get_lbolt() + sof_time_wait); 1012 1013 /* 1014 * Check EHCI host controller is running, otherwise return failure. 1015 */ 1016 if ((ehcip->ehci_flags & EHCI_CV_INTR) || 1017 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) { 1018 1019 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1020 "No SOF interrupts have been received, this USB EHCI host" 1021 "controller is unusable"); 1022 1023 /* 1024 * Route all Root hub ports to Classic host 1025 * controller, in case this is an unusable ALI M5273 1026 * EHCI controller. 1027 */ 1028 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) { 1029 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC); 1030 } 1031 1032 return (DDI_FAILURE); 1033 } 1034 1035 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1036 "ehci_init_ctlr: SOF's have started"); 1037 1038 /* Route all Root hub ports to EHCI host controller */ 1039 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI); 1040 1041 /* Set host controller soft state to operational */ 1042 ehcip->ehci_hc_soft_state = EHCI_CTLR_OPERATIONAL_STATE; 1043 1044 return (DDI_SUCCESS); 1045 } 1046 1047 /* 1048 * ehci_take_control: 1049 * 1050 * Handshake to take EHCI control from BIOS if necessary. Its only valid for 1051 * x86 machines, because sparc doesn't have a BIOS. 1052 * On x86 machine, the take control process includes 1053 * o get the base address of the extended capability list 1054 * o find out the capability for handoff synchronization in the list. 1055 * o check if BIOS has owned the host controller. 1056 * o set the OS Owned semaphore bit, ask the BIOS to release the ownership. 1057 * o wait for a constant time and check if BIOS has relinquished control. 1058 */ 1059 /* ARGSUSED */ 1060 static int 1061 ehci_take_control(ehci_state_t *ehcip) 1062 { 1063 #if defined(__x86) 1064 uint32_t extended_cap; 1065 uint32_t extended_cap_offset; 1066 uint32_t extended_cap_id; 1067 uint_t retry; 1068 1069 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1070 "ehci_take_control:"); 1071 1072 /* 1073 * According EHCI Spec 2.2.4, get EECP base address from HCCPARAMS 1074 * register. 1075 */ 1076 extended_cap_offset = (Get_Cap(ehci_hcc_params) & EHCI_HCC_EECP) >> 1077 EHCI_HCC_EECP_SHIFT; 1078 1079 /* 1080 * According EHCI Spec 2.2.4, if the extended capability offset is 1081 * less than 40h then its not valid. This means we don't need to 1082 * worry about BIOS handoff. 1083 */ 1084 if (extended_cap_offset < EHCI_HCC_EECP_MIN_OFFSET) { 1085 1086 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1087 "ehci_take_control: Hardware doesn't support legacy."); 1088 1089 goto success; 1090 } 1091 1092 /* 1093 * According EHCI Spec 2.1.7, A zero offset indicates the 1094 * end of the extended capability list. 1095 */ 1096 while (extended_cap_offset) { 1097 1098 /* Get the extended capability value. */ 1099 extended_cap = pci_config_get32(ehcip->ehci_config_handle, 1100 extended_cap_offset); 1101 1102 /* Get the capability ID */ 1103 extended_cap_id = (extended_cap & EHCI_EX_CAP_ID) >> 1104 EHCI_EX_CAP_ID_SHIFT; 1105 1106 /* Check if the card support legacy */ 1107 if (extended_cap_id == EHCI_EX_CAP_ID_BIOS_HANDOFF) { 1108 break; 1109 } 1110 1111 /* Get the offset of the next capability */ 1112 extended_cap_offset = (extended_cap & EHCI_EX_CAP_NEXT_PTR) >> 1113 EHCI_EX_CAP_NEXT_PTR_SHIFT; 1114 } 1115 1116 /* 1117 * Unable to find legacy support in hardware's extended capability list. 1118 * This means we don't need to worry about BIOS handoff. 1119 */ 1120 if (extended_cap_id != EHCI_EX_CAP_ID_BIOS_HANDOFF) { 1121 1122 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1123 "ehci_take_control: Hardware doesn't support legacy"); 1124 1125 goto success; 1126 } 1127 1128 /* Check if BIOS has owned it. */ 1129 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) { 1130 1131 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1132 "ehci_take_control: BIOS does not own EHCI"); 1133 1134 goto success; 1135 } 1136 1137 /* 1138 * According EHCI Spec 5.1, The OS driver initiates an ownership 1139 * request by setting the OS Owned semaphore to a one. The OS 1140 * waits for the BIOS Owned bit to go to a zero before attempting 1141 * to use the EHCI controller. The time that OS must wait for BIOS 1142 * to respond to the request for ownership is beyond the scope of 1143 * this specification. 1144 * It waits up to EHCI_TAKEOVER_WAIT_COUNT*EHCI_TAKEOVER_DELAY ms 1145 * for BIOS to release the ownership. 1146 */ 1147 extended_cap |= EHCI_LEGSUP_OS_OWNED_SEM; 1148 pci_config_put32(ehcip->ehci_config_handle, extended_cap_offset, 1149 extended_cap); 1150 1151 for (retry = 0; retry < EHCI_TAKEOVER_WAIT_COUNT; retry++) { 1152 1153 /* wait a special interval */ 1154 delay(drv_usectohz(EHCI_TAKEOVER_DELAY)); 1155 1156 /* Check to see if the BIOS has released the ownership */ 1157 extended_cap = pci_config_get32( 1158 ehcip->ehci_config_handle, extended_cap_offset); 1159 1160 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) { 1161 1162 USB_DPRINTF_L3(PRINT_MASK_ATTA, 1163 ehcip->ehci_log_hdl, 1164 "ehci_take_control: BIOS has released " 1165 "the ownership. retry = %d", retry); 1166 1167 goto success; 1168 } 1169 1170 } 1171 1172 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1173 "ehci_take_control: take control from BIOS failed."); 1174 1175 return (USB_FAILURE); 1176 1177 success: 1178 1179 #endif /* __x86 */ 1180 return (USB_SUCCESS); 1181 } 1182 1183 1184 /* 1185 * ehci_init_periodic_frame_list_table : 1186 * 1187 * Allocate the system memory and initialize Host Controller 1188 * Periodic Frame List table area. The starting of the Periodic 1189 * Frame List Table area must be 4096 byte aligned. 1190 */ 1191 static int 1192 ehci_init_periodic_frame_lst_table(ehci_state_t *ehcip) 1193 { 1194 ddi_device_acc_attr_t dev_attr; 1195 size_t real_length; 1196 uint_t ccount; 1197 int result; 1198 1199 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1200 1201 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1202 "ehci_init_periodic_frame_lst_table:"); 1203 1204 /* The host controller will be little endian */ 1205 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1206 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1207 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1208 1209 /* Force the required 4K restrictive alignment */ 1210 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_PFL_ALIGNMENT; 1211 1212 /* Create space for the Periodic Frame List */ 1213 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr, 1214 DDI_DMA_SLEEP, 0, &ehcip->ehci_pflt_dma_handle) != DDI_SUCCESS) { 1215 1216 goto failure; 1217 } 1218 1219 if (ddi_dma_mem_alloc(ehcip->ehci_pflt_dma_handle, 1220 sizeof (ehci_periodic_frame_list_t), 1221 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 1222 0, (caddr_t *)&ehcip->ehci_periodic_frame_list_tablep, 1223 &real_length, &ehcip->ehci_pflt_mem_handle)) { 1224 1225 goto failure; 1226 } 1227 1228 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1229 "ehci_init_periodic_frame_lst_table: " 1230 "Real length %lu", real_length); 1231 1232 /* Map the whole Periodic Frame List into the I/O address space */ 1233 result = ddi_dma_addr_bind_handle(ehcip->ehci_pflt_dma_handle, 1234 NULL, (caddr_t)ehcip->ehci_periodic_frame_list_tablep, 1235 real_length, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1236 DDI_DMA_SLEEP, NULL, &ehcip->ehci_pflt_cookie, &ccount); 1237 1238 if (result == DDI_DMA_MAPPED) { 1239 /* The cookie count should be 1 */ 1240 if (ccount != 1) { 1241 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1242 "ehci_init_periodic_frame_lst_table: " 1243 "More than 1 cookie"); 1244 1245 goto failure; 1246 } 1247 } else { 1248 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 1249 1250 goto failure; 1251 } 1252 1253 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1254 "ehci_init_periodic_frame_lst_table: virtual 0x%p physical 0x%x", 1255 (void *)ehcip->ehci_periodic_frame_list_tablep, 1256 ehcip->ehci_pflt_cookie.dmac_address); 1257 1258 /* 1259 * DMA addresses for Periodic Frame List are bound. 1260 */ 1261 ehcip->ehci_dma_addr_bind_flag |= EHCI_PFLT_DMA_BOUND; 1262 1263 bzero((void *)ehcip->ehci_periodic_frame_list_tablep, real_length); 1264 1265 /* Initialize the Periodic Frame List */ 1266 ehci_build_interrupt_lattice(ehcip); 1267 1268 /* Reset Byte Alignment to Default */ 1269 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 1270 1271 return (DDI_SUCCESS); 1272 failure: 1273 /* Byte alignment */ 1274 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 1275 1276 return (DDI_FAILURE); 1277 } 1278 1279 1280 /* 1281 * ehci_build_interrupt_lattice: 1282 * 1283 * Construct the interrupt lattice tree using static Endpoint Descriptors 1284 * (QH). This interrupt lattice tree will have total of 32 interrupt QH 1285 * lists and the Host Controller (HC) processes one interrupt QH list in 1286 * every frame. The Host Controller traverses the periodic schedule by 1287 * constructing an array offset reference from the Periodic List Base Address 1288 * register and bits 12 to 3 of Frame Index register. It fetches the element 1289 * and begins traversing the graph of linked schedule data structures. 1290 */ 1291 static void 1292 ehci_build_interrupt_lattice(ehci_state_t *ehcip) 1293 { 1294 ehci_qh_t *list_array = ehcip->ehci_qh_pool_addr; 1295 ushort_t ehci_index[EHCI_NUM_PERIODIC_FRAME_LISTS]; 1296 ehci_periodic_frame_list_t *periodic_frame_list = 1297 ehcip->ehci_periodic_frame_list_tablep; 1298 ushort_t *temp, num_of_nodes; 1299 uintptr_t addr; 1300 int i, j, k; 1301 1302 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1303 "ehci_build_interrupt_lattice:"); 1304 1305 /* 1306 * Reserve the first 63 Endpoint Descriptor (QH) structures 1307 * in the pool as static endpoints & these are required for 1308 * constructing interrupt lattice tree. 1309 */ 1310 for (i = 0; i < EHCI_NUM_STATIC_NODES; i++) { 1311 Set_QH(list_array[i].qh_state, EHCI_QH_STATIC); 1312 Set_QH(list_array[i].qh_status, EHCI_QH_STS_HALTED); 1313 Set_QH(list_array[i].qh_next_qtd, EHCI_QH_NEXT_QTD_PTR_VALID); 1314 Set_QH(list_array[i].qh_alt_next_qtd, 1315 EHCI_QH_ALT_NEXT_QTD_PTR_VALID); 1316 } 1317 1318 /* 1319 * Make sure that last Endpoint on the periodic frame list terminates 1320 * periodic schedule. 1321 */ 1322 Set_QH(list_array[0].qh_link_ptr, EHCI_QH_LINK_PTR_VALID); 1323 1324 /* Build the interrupt lattice tree */ 1325 for (i = 0; i < (EHCI_NUM_STATIC_NODES / 2); i++) { 1326 /* 1327 * The next pointer in the host controller endpoint 1328 * descriptor must contain an iommu address. Calculate 1329 * the offset into the cpu address and add this to the 1330 * starting iommu address. 1331 */ 1332 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)&list_array[i]); 1333 1334 Set_QH(list_array[2*i + 1].qh_link_ptr, 1335 addr | EHCI_QH_LINK_REF_QH); 1336 Set_QH(list_array[2*i + 2].qh_link_ptr, 1337 addr | EHCI_QH_LINK_REF_QH); 1338 } 1339 1340 /* Build the tree bottom */ 1341 temp = (unsigned short *) 1342 kmem_zalloc(EHCI_NUM_PERIODIC_FRAME_LISTS * 2, KM_SLEEP); 1343 1344 num_of_nodes = 1; 1345 1346 /* 1347 * Initialize the values which are used for setting up head pointers 1348 * for the 32ms scheduling lists which starts from the Periodic Frame 1349 * List. 1350 */ 1351 for (i = 0; i < ehci_log_2(EHCI_NUM_PERIODIC_FRAME_LISTS); i++) { 1352 for (j = 0, k = 0; k < num_of_nodes; k++, j++) { 1353 ehci_index[j++] = temp[k]; 1354 ehci_index[j] = temp[k] + ehci_pow_2(i); 1355 } 1356 1357 num_of_nodes *= 2; 1358 for (k = 0; k < num_of_nodes; k++) 1359 temp[k] = ehci_index[k]; 1360 } 1361 1362 kmem_free((void *)temp, (EHCI_NUM_PERIODIC_FRAME_LISTS * 2)); 1363 1364 /* 1365 * Initialize the interrupt list in the Periodic Frame List Table 1366 * so that it points to the bottom of the tree. 1367 */ 1368 for (i = 0, j = 0; i < ehci_pow_2(TREE_HEIGHT); i++) { 1369 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *) 1370 (&list_array[((EHCI_NUM_STATIC_NODES + 1) / 2) + i - 1])); 1371 1372 ASSERT(addr); 1373 1374 for (k = 0; k < ehci_pow_2(TREE_HEIGHT); k++) { 1375 Set_PFLT(periodic_frame_list-> 1376 ehci_periodic_frame_list_table[ehci_index[j++]], 1377 (uint32_t)(addr | EHCI_QH_LINK_REF_QH)); 1378 } 1379 } 1380 } 1381 1382 1383 /* 1384 * ehci_alloc_hcdi_ops: 1385 * 1386 * The HCDI interfaces or entry points are the software interfaces used by 1387 * the Universal Serial Bus Driver (USBA) to access the services of the 1388 * Host Controller Driver (HCD). During HCD initialization, inform USBA 1389 * about all available HCDI interfaces or entry points. 1390 */ 1391 usba_hcdi_ops_t * 1392 ehci_alloc_hcdi_ops(ehci_state_t *ehcip) 1393 { 1394 usba_hcdi_ops_t *usba_hcdi_ops; 1395 1396 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1397 "ehci_alloc_hcdi_ops:"); 1398 1399 usba_hcdi_ops = usba_alloc_hcdi_ops(); 1400 1401 usba_hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION; 1402 1403 usba_hcdi_ops->usba_hcdi_pipe_open = ehci_hcdi_pipe_open; 1404 usba_hcdi_ops->usba_hcdi_pipe_close = ehci_hcdi_pipe_close; 1405 1406 usba_hcdi_ops->usba_hcdi_pipe_reset = ehci_hcdi_pipe_reset; 1407 1408 usba_hcdi_ops->usba_hcdi_pipe_ctrl_xfer = ehci_hcdi_pipe_ctrl_xfer; 1409 usba_hcdi_ops->usba_hcdi_pipe_bulk_xfer = ehci_hcdi_pipe_bulk_xfer; 1410 usba_hcdi_ops->usba_hcdi_pipe_intr_xfer = ehci_hcdi_pipe_intr_xfer; 1411 usba_hcdi_ops->usba_hcdi_pipe_isoc_xfer = ehci_hcdi_pipe_isoc_xfer; 1412 1413 usba_hcdi_ops->usba_hcdi_bulk_transfer_size = 1414 ehci_hcdi_bulk_transfer_size; 1415 1416 usba_hcdi_ops->usba_hcdi_pipe_stop_intr_polling = 1417 ehci_hcdi_pipe_stop_intr_polling; 1418 usba_hcdi_ops->usba_hcdi_pipe_stop_isoc_polling = 1419 ehci_hcdi_pipe_stop_isoc_polling; 1420 1421 usba_hcdi_ops->usba_hcdi_get_current_frame_number = 1422 ehci_hcdi_get_current_frame_number; 1423 usba_hcdi_ops->usba_hcdi_get_max_isoc_pkts = 1424 ehci_hcdi_get_max_isoc_pkts; 1425 1426 usba_hcdi_ops->usba_hcdi_console_input_init = 1427 ehci_hcdi_polled_input_init; 1428 usba_hcdi_ops->usba_hcdi_console_input_enter = 1429 ehci_hcdi_polled_input_enter; 1430 usba_hcdi_ops->usba_hcdi_console_read = 1431 ehci_hcdi_polled_read; 1432 usba_hcdi_ops->usba_hcdi_console_input_exit = 1433 ehci_hcdi_polled_input_exit; 1434 usba_hcdi_ops->usba_hcdi_console_input_fini = 1435 ehci_hcdi_polled_input_fini; 1436 return (usba_hcdi_ops); 1437 } 1438 1439 1440 /* 1441 * Host Controller Driver (HCD) deinitialization functions 1442 */ 1443 1444 /* 1445 * ehci_cleanup: 1446 * 1447 * Cleanup on attach failure or detach 1448 */ 1449 int 1450 ehci_cleanup(ehci_state_t *ehcip) 1451 { 1452 ehci_trans_wrapper_t *tw; 1453 ehci_pipe_private_t *pp; 1454 ehci_qtd_t *qtd; 1455 int i, ctrl, rval; 1456 int flags = ehcip->ehci_flags; 1457 1458 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_cleanup:"); 1459 1460 if (flags & EHCI_RHREG) { 1461 /* Unload the root hub driver */ 1462 if (ehci_unload_root_hub_driver(ehcip) != USB_SUCCESS) { 1463 1464 return (DDI_FAILURE); 1465 } 1466 } 1467 1468 if (flags & EHCI_USBAREG) { 1469 /* Unregister this HCD instance with USBA */ 1470 usba_hcdi_unregister(ehcip->ehci_dip); 1471 } 1472 1473 if (flags & EHCI_INTR) { 1474 1475 mutex_enter(&ehcip->ehci_int_mutex); 1476 1477 /* Route all Root hub ports to Classic host controller */ 1478 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC); 1479 1480 /* Disable all EHCI QH list processing */ 1481 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) & 1482 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | 1483 EHCI_CMD_PERIODIC_SCHED_ENABLE))); 1484 1485 /* Disable all EHCI interrupts */ 1486 Set_OpReg(ehci_interrupt, 0); 1487 1488 /* wait for the next SOF */ 1489 (void) ehci_wait_for_sof(ehcip); 1490 1491 /* Stop the EHCI host controller */ 1492 Set_OpReg(ehci_command, 1493 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN); 1494 1495 /* Wait for sometime */ 1496 drv_usecwait(EHCI_TIMEWAIT); 1497 1498 mutex_exit(&ehcip->ehci_int_mutex); 1499 1500 /* disable interrupt */ 1501 (void) ddi_intr_disable(ehcip->ehci_htable[0]); 1502 1503 /* Remove interrupt handler */ 1504 (void) ddi_intr_remove_handler(ehcip->ehci_htable[0]); 1505 1506 /* free interrupt handle */ 1507 (void) ddi_intr_free(ehcip->ehci_htable[0]); 1508 1509 /* free memory */ 1510 kmem_free(ehcip->ehci_htable, sizeof (ddi_intr_handle_t)); 1511 } 1512 1513 /* Unmap the EHCI registers */ 1514 if (ehcip->ehci_caps_handle) { 1515 ddi_regs_map_free(&ehcip->ehci_caps_handle); 1516 } 1517 1518 if (ehcip->ehci_config_handle) { 1519 pci_config_teardown(&ehcip->ehci_config_handle); 1520 } 1521 1522 /* Free all the buffers */ 1523 if (ehcip->ehci_qtd_pool_addr && ehcip->ehci_qtd_pool_mem_handle) { 1524 for (i = 0; i < ehci_qtd_pool_size; i ++) { 1525 qtd = &ehcip->ehci_qtd_pool_addr[i]; 1526 ctrl = Get_QTD(ehcip-> 1527 ehci_qtd_pool_addr[i].qtd_state); 1528 1529 if ((ctrl != EHCI_QTD_FREE) && 1530 (ctrl != EHCI_QTD_DUMMY) && 1531 (qtd->qtd_trans_wrapper)) { 1532 1533 mutex_enter(&ehcip->ehci_int_mutex); 1534 1535 tw = (ehci_trans_wrapper_t *) 1536 EHCI_LOOKUP_ID((uint32_t) 1537 Get_QTD(qtd->qtd_trans_wrapper)); 1538 1539 /* Obtain the pipe private structure */ 1540 pp = tw->tw_pipe_private; 1541 1542 /* Stop the the transfer timer */ 1543 ehci_stop_xfer_timer(ehcip, tw, 1544 EHCI_REMOVE_XFER_ALWAYS); 1545 1546 ehci_deallocate_tw(ehcip, pp, tw); 1547 1548 mutex_exit(&ehcip->ehci_int_mutex); 1549 } 1550 } 1551 1552 /* 1553 * If EHCI_QTD_POOL_BOUND flag is set, then unbind 1554 * the handle for QTD pools. 1555 */ 1556 if ((ehcip->ehci_dma_addr_bind_flag & 1557 EHCI_QTD_POOL_BOUND) == EHCI_QTD_POOL_BOUND) { 1558 1559 rval = ddi_dma_unbind_handle( 1560 ehcip->ehci_qtd_pool_dma_handle); 1561 1562 ASSERT(rval == DDI_SUCCESS); 1563 } 1564 ddi_dma_mem_free(&ehcip->ehci_qtd_pool_mem_handle); 1565 } 1566 1567 /* Free the QTD pool */ 1568 if (ehcip->ehci_qtd_pool_dma_handle) { 1569 ddi_dma_free_handle(&ehcip->ehci_qtd_pool_dma_handle); 1570 } 1571 1572 if (ehcip->ehci_qh_pool_addr && ehcip->ehci_qh_pool_mem_handle) { 1573 /* 1574 * If EHCI_QH_POOL_BOUND flag is set, then unbind 1575 * the handle for QH pools. 1576 */ 1577 if ((ehcip->ehci_dma_addr_bind_flag & 1578 EHCI_QH_POOL_BOUND) == EHCI_QH_POOL_BOUND) { 1579 1580 rval = ddi_dma_unbind_handle( 1581 ehcip->ehci_qh_pool_dma_handle); 1582 1583 ASSERT(rval == DDI_SUCCESS); 1584 } 1585 1586 ddi_dma_mem_free(&ehcip->ehci_qh_pool_mem_handle); 1587 } 1588 1589 /* Free the QH pool */ 1590 if (ehcip->ehci_qh_pool_dma_handle) { 1591 ddi_dma_free_handle(&ehcip->ehci_qh_pool_dma_handle); 1592 } 1593 1594 /* Free the Periodic frame list table (PFLT) area */ 1595 if (ehcip->ehci_periodic_frame_list_tablep && 1596 ehcip->ehci_pflt_mem_handle) { 1597 /* 1598 * If EHCI_PFLT_DMA_BOUND flag is set, then unbind 1599 * the handle for PFLT. 1600 */ 1601 if ((ehcip->ehci_dma_addr_bind_flag & 1602 EHCI_PFLT_DMA_BOUND) == EHCI_PFLT_DMA_BOUND) { 1603 1604 rval = ddi_dma_unbind_handle( 1605 ehcip->ehci_pflt_dma_handle); 1606 1607 ASSERT(rval == DDI_SUCCESS); 1608 } 1609 1610 ddi_dma_mem_free(&ehcip->ehci_pflt_mem_handle); 1611 } 1612 1613 (void) ehci_isoc_cleanup(ehcip); 1614 1615 if (ehcip->ehci_pflt_dma_handle) { 1616 ddi_dma_free_handle(&ehcip->ehci_pflt_dma_handle); 1617 } 1618 1619 if (flags & EHCI_INTR) { 1620 /* Destroy the mutex */ 1621 mutex_destroy(&ehcip->ehci_int_mutex); 1622 1623 /* Destroy the async schedule advance condition variable */ 1624 cv_destroy(&ehcip->ehci_async_schedule_advance_cv); 1625 } 1626 1627 /* clean up kstat structs */ 1628 ehci_destroy_stats(ehcip); 1629 1630 /* Free ehci hcdi ops */ 1631 if (ehcip->ehci_hcdi_ops) { 1632 usba_free_hcdi_ops(ehcip->ehci_hcdi_ops); 1633 } 1634 1635 if (flags & EHCI_ZALLOC) { 1636 1637 usb_free_log_hdl(ehcip->ehci_log_hdl); 1638 1639 /* Remove all properties that might have been created */ 1640 ddi_prop_remove_all(ehcip->ehci_dip); 1641 1642 /* Free the soft state */ 1643 ddi_soft_state_free(ehci_statep, 1644 ddi_get_instance(ehcip->ehci_dip)); 1645 } 1646 1647 return (DDI_SUCCESS); 1648 } 1649 1650 1651 /* 1652 * ehci_cpr_suspend 1653 */ 1654 int 1655 ehci_cpr_suspend(ehci_state_t *ehcip) 1656 { 1657 int i; 1658 1659 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1660 "ehci_cpr_suspend:"); 1661 1662 /* Call into the root hub and suspend it */ 1663 if (usba_hubdi_detach(ehcip->ehci_dip, DDI_SUSPEND) != DDI_SUCCESS) { 1664 1665 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1666 "ehci_cpr_suspend: root hub fails to suspend"); 1667 1668 return (DDI_FAILURE); 1669 } 1670 1671 /* Only root hub's intr pipe should be open at this time */ 1672 mutex_enter(&ehcip->ehci_int_mutex); 1673 1674 ASSERT(ehcip->ehci_open_pipe_count == 0); 1675 1676 /* Just wait till all resources are reclaimed */ 1677 i = 0; 1678 while ((ehcip->ehci_reclaim_list != NULL) && (i++ < 3)) { 1679 ehci_handle_endpoint_reclaimation(ehcip); 1680 (void) ehci_wait_for_sof(ehcip); 1681 } 1682 ASSERT(ehcip->ehci_reclaim_list == NULL); 1683 1684 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1685 "ehci_cpr_suspend: Disable HC QH list processing"); 1686 1687 /* Disable all EHCI QH list processing */ 1688 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) & 1689 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE))); 1690 1691 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1692 "ehci_cpr_suspend: Disable HC interrupts"); 1693 1694 /* Disable all EHCI interrupts */ 1695 Set_OpReg(ehci_interrupt, 0); 1696 1697 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1698 "ehci_cpr_suspend: Wait for the next SOF"); 1699 1700 /* Wait for the next SOF */ 1701 if (ehci_wait_for_sof(ehcip) != USB_SUCCESS) { 1702 1703 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1704 "ehci_cpr_suspend: ehci host controller suspend failed"); 1705 1706 mutex_exit(&ehcip->ehci_int_mutex); 1707 return (DDI_FAILURE); 1708 } 1709 1710 /* 1711 * Stop the ehci host controller 1712 * if usb keyboard is not connected. 1713 */ 1714 if (ehcip->ehci_polled_kbd_count == 0) { 1715 Set_OpReg(ehci_command, 1716 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN); 1717 } 1718 1719 /* Set host controller soft state to suspend */ 1720 ehcip->ehci_hc_soft_state = EHCI_CTLR_SUSPEND_STATE; 1721 1722 mutex_exit(&ehcip->ehci_int_mutex); 1723 1724 return (DDI_SUCCESS); 1725 } 1726 1727 1728 /* 1729 * ehci_cpr_resume 1730 */ 1731 int 1732 ehci_cpr_resume(ehci_state_t *ehcip) 1733 { 1734 mutex_enter(&ehcip->ehci_int_mutex); 1735 1736 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1737 "ehci_cpr_resume: Restart the controller"); 1738 1739 /* Cleanup ehci specific information across cpr */ 1740 ehci_cpr_cleanup(ehcip); 1741 1742 /* Restart the controller */ 1743 if (ehci_init_ctlr(ehcip) != DDI_SUCCESS) { 1744 1745 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1746 "ehci_cpr_resume: ehci host controller resume failed "); 1747 1748 mutex_exit(&ehcip->ehci_int_mutex); 1749 1750 return (DDI_FAILURE); 1751 } 1752 1753 mutex_exit(&ehcip->ehci_int_mutex); 1754 1755 /* Now resume the root hub */ 1756 if (usba_hubdi_attach(ehcip->ehci_dip, DDI_RESUME) != DDI_SUCCESS) { 1757 1758 return (DDI_FAILURE); 1759 } 1760 1761 return (DDI_SUCCESS); 1762 } 1763 1764 1765 /* 1766 * Bandwidth Allocation functions 1767 */ 1768 1769 /* 1770 * ehci_allocate_bandwidth: 1771 * 1772 * Figure out whether or not this interval may be supported. Return the index 1773 * into the lattice if it can be supported. Return allocation failure if it 1774 * can not be supported. 1775 */ 1776 int 1777 ehci_allocate_bandwidth( 1778 ehci_state_t *ehcip, 1779 usba_pipe_handle_data_t *ph, 1780 uint_t *pnode, 1781 uchar_t *smask, 1782 uchar_t *cmask) 1783 { 1784 int error = USB_SUCCESS; 1785 1786 /* This routine is protected by the ehci_int_mutex */ 1787 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1788 1789 /* Reset the pnode to the last checked pnode */ 1790 *pnode = 0; 1791 1792 /* Allocate high speed bandwidth */ 1793 if ((error = ehci_allocate_high_speed_bandwidth(ehcip, 1794 ph, pnode, smask, cmask)) != USB_SUCCESS) { 1795 1796 return (error); 1797 } 1798 1799 /* 1800 * For low/full speed usb devices, allocate classic TT bandwidth 1801 * in additional to high speed bandwidth. 1802 */ 1803 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) { 1804 1805 /* Allocate classic TT bandwidth */ 1806 if ((error = ehci_allocate_classic_tt_bandwidth( 1807 ehcip, ph, *pnode)) != USB_SUCCESS) { 1808 1809 /* Deallocate high speed bandwidth */ 1810 ehci_deallocate_high_speed_bandwidth( 1811 ehcip, ph, *pnode, *smask, *cmask); 1812 } 1813 } 1814 1815 return (error); 1816 } 1817 1818 1819 /* 1820 * ehci_allocate_high_speed_bandwidth: 1821 * 1822 * Allocate high speed bandwidth for the low/full/high speed interrupt and 1823 * isochronous endpoints. 1824 */ 1825 static int 1826 ehci_allocate_high_speed_bandwidth( 1827 ehci_state_t *ehcip, 1828 usba_pipe_handle_data_t *ph, 1829 uint_t *pnode, 1830 uchar_t *smask, 1831 uchar_t *cmask) 1832 { 1833 uint_t sbandwidth, cbandwidth; 1834 int interval; 1835 usb_ep_descr_t *endpoint = &ph->p_ep; 1836 usba_device_t *child_ud; 1837 usb_port_status_t port_status; 1838 int error; 1839 1840 /* This routine is protected by the ehci_int_mutex */ 1841 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1842 1843 /* Get child's usba device structure */ 1844 child_ud = ph->p_usba_device; 1845 1846 mutex_enter(&child_ud->usb_mutex); 1847 1848 /* Get the current usb device's port status */ 1849 port_status = ph->p_usba_device->usb_port_status; 1850 1851 mutex_exit(&child_ud->usb_mutex); 1852 1853 /* 1854 * Calculate the length in bytes of a transaction on this 1855 * periodic endpoint. Return failure if maximum packet is 1856 * zero. 1857 */ 1858 error = ehci_compute_high_speed_bandwidth(ehcip, endpoint, 1859 port_status, &sbandwidth, &cbandwidth); 1860 if (error != USB_SUCCESS) { 1861 1862 return (error); 1863 } 1864 1865 /* 1866 * Adjust polling interval to be a power of 2. 1867 * If this interval can't be supported, return 1868 * allocation failure. 1869 */ 1870 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 1871 if (interval == USB_FAILURE) { 1872 1873 return (USB_FAILURE); 1874 } 1875 1876 if (port_status == USBA_HIGH_SPEED_DEV) { 1877 /* Allocate bandwidth for high speed devices, except ITD */ 1878 error = ehci_find_bestfit_hs_mask(ehcip, smask, pnode, 1879 endpoint, sbandwidth, interval); 1880 *cmask = 0x00; 1881 1882 } else { 1883 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) == 1884 USB_EP_ATTR_INTR) { 1885 1886 /* Allocate bandwidth for low speed interrupt */ 1887 error = ehci_find_bestfit_ls_intr_mask(ehcip, 1888 smask, cmask, pnode, sbandwidth, cbandwidth, 1889 interval); 1890 } else { 1891 if ((endpoint->bEndpointAddress & 1892 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 1893 1894 /* Allocate bandwidth for sitd in */ 1895 error = ehci_find_bestfit_sitd_in_mask(ehcip, 1896 smask, cmask, pnode, sbandwidth, cbandwidth, 1897 interval); 1898 } else { 1899 1900 /* Allocate bandwidth for sitd out */ 1901 error = ehci_find_bestfit_sitd_out_mask(ehcip, 1902 smask, pnode, sbandwidth, interval); 1903 *cmask = 0x00; 1904 } 1905 } 1906 } 1907 1908 if (error != USB_SUCCESS) { 1909 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 1910 "ehci_allocate_high_speed_bandwidth: Reached maximum " 1911 "bandwidth value and cannot allocate bandwidth for a " 1912 "given high-speed periodic endpoint"); 1913 1914 return (USB_NO_BANDWIDTH); 1915 } 1916 1917 return (error); 1918 } 1919 1920 1921 /* 1922 * ehci_allocate_classic_tt_speed_bandwidth: 1923 * 1924 * Allocate classic TT bandwidth for the low/full speed interrupt and 1925 * isochronous endpoints. 1926 */ 1927 static int 1928 ehci_allocate_classic_tt_bandwidth( 1929 ehci_state_t *ehcip, 1930 usba_pipe_handle_data_t *ph, 1931 uint_t pnode) 1932 { 1933 uint_t bandwidth, min; 1934 uint_t height, leftmost, list; 1935 usb_ep_descr_t *endpoint = &ph->p_ep; 1936 usba_device_t *child_ud, *parent_ud; 1937 usb_port_status_t port_status; 1938 int i, interval; 1939 1940 /* This routine is protected by the ehci_int_mutex */ 1941 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1942 1943 /* Get child's usba device structure */ 1944 child_ud = ph->p_usba_device; 1945 1946 mutex_enter(&child_ud->usb_mutex); 1947 1948 /* Get the current usb device's port status */ 1949 port_status = child_ud->usb_port_status; 1950 1951 /* Get the parent high speed hub's usba device structure */ 1952 parent_ud = child_ud->usb_hs_hub_usba_dev; 1953 1954 mutex_exit(&child_ud->usb_mutex); 1955 1956 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 1957 "ehci_allocate_classic_tt_bandwidth: " 1958 "child_ud 0x%p parent_ud 0x%p", child_ud, parent_ud); 1959 1960 /* 1961 * Calculate the length in bytes of a transaction on this 1962 * periodic endpoint. Return failure if maximum packet is 1963 * zero. 1964 */ 1965 if (ehci_compute_classic_bandwidth(endpoint, 1966 port_status, &bandwidth) != USB_SUCCESS) { 1967 1968 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 1969 "ehci_allocate_classic_tt_bandwidth: Periodic endpoint " 1970 "with zero endpoint maximum packet size is not supported"); 1971 1972 return (USB_NOT_SUPPORTED); 1973 } 1974 1975 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 1976 "ehci_allocate_classic_tt_bandwidth: bandwidth %d", bandwidth); 1977 1978 mutex_enter(&parent_ud->usb_mutex); 1979 1980 /* 1981 * If the length in bytes plus the allocated bandwidth exceeds 1982 * the maximum, return bandwidth allocation failure. 1983 */ 1984 if ((parent_ud->usb_hs_hub_min_bandwidth + bandwidth) > 1985 FS_PERIODIC_BANDWIDTH) { 1986 1987 mutex_exit(&parent_ud->usb_mutex); 1988 1989 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 1990 "ehci_allocate_classic_tt_bandwidth: Reached maximum " 1991 "bandwidth value and cannot allocate bandwidth for a " 1992 "given low/full speed periodic endpoint"); 1993 1994 return (USB_NO_BANDWIDTH); 1995 } 1996 1997 mutex_exit(&parent_ud->usb_mutex); 1998 1999 /* Adjust polling interval to be a power of 2 */ 2000 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2001 2002 /* Find the height in the tree */ 2003 height = ehci_lattice_height(interval); 2004 2005 /* Find the leftmost leaf in the subtree specified by the node. */ 2006 leftmost = ehci_leftmost_leaf(pnode, height); 2007 2008 mutex_enter(&parent_ud->usb_mutex); 2009 2010 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2011 list = ehci_index[leftmost + i]; 2012 2013 if ((parent_ud->usb_hs_hub_bandwidth[list] + 2014 bandwidth) > FS_PERIODIC_BANDWIDTH) { 2015 2016 mutex_exit(&parent_ud->usb_mutex); 2017 2018 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2019 "ehci_allocate_classic_tt_bandwidth: Reached " 2020 "maximum bandwidth value and cannot allocate " 2021 "bandwidth for low/full periodic endpoint"); 2022 2023 return (USB_NO_BANDWIDTH); 2024 } 2025 } 2026 2027 /* 2028 * All the leaves for this node must be updated with the bandwidth. 2029 */ 2030 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2031 list = ehci_index[leftmost + i]; 2032 parent_ud->usb_hs_hub_bandwidth[list] += bandwidth; 2033 } 2034 2035 /* Find the leaf with the smallest allocated bandwidth */ 2036 min = parent_ud->usb_hs_hub_bandwidth[0]; 2037 2038 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) { 2039 if (parent_ud->usb_hs_hub_bandwidth[i] < min) { 2040 min = parent_ud->usb_hs_hub_bandwidth[i]; 2041 } 2042 } 2043 2044 /* Save the minimum for later use */ 2045 parent_ud->usb_hs_hub_min_bandwidth = min; 2046 2047 mutex_exit(&parent_ud->usb_mutex); 2048 2049 return (USB_SUCCESS); 2050 } 2051 2052 2053 /* 2054 * ehci_deallocate_bandwidth: 2055 * 2056 * Deallocate bandwidth for the given node in the lattice and the length 2057 * of transfer. 2058 */ 2059 void 2060 ehci_deallocate_bandwidth( 2061 ehci_state_t *ehcip, 2062 usba_pipe_handle_data_t *ph, 2063 uint_t pnode, 2064 uchar_t smask, 2065 uchar_t cmask) 2066 { 2067 /* This routine is protected by the ehci_int_mutex */ 2068 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2069 2070 ehci_deallocate_high_speed_bandwidth(ehcip, ph, pnode, smask, cmask); 2071 2072 /* 2073 * For low/full speed usb devices, deallocate classic TT bandwidth 2074 * in additional to high speed bandwidth. 2075 */ 2076 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) { 2077 2078 /* Deallocate classic TT bandwidth */ 2079 ehci_deallocate_classic_tt_bandwidth(ehcip, ph, pnode); 2080 } 2081 } 2082 2083 2084 /* 2085 * ehci_deallocate_high_speed_bandwidth: 2086 * 2087 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint. 2088 */ 2089 static void 2090 ehci_deallocate_high_speed_bandwidth( 2091 ehci_state_t *ehcip, 2092 usba_pipe_handle_data_t *ph, 2093 uint_t pnode, 2094 uchar_t smask, 2095 uchar_t cmask) 2096 { 2097 uint_t height, leftmost; 2098 uint_t list_count; 2099 uint_t sbandwidth, cbandwidth; 2100 int interval; 2101 usb_ep_descr_t *endpoint = &ph->p_ep; 2102 usba_device_t *child_ud; 2103 usb_port_status_t port_status; 2104 2105 /* This routine is protected by the ehci_int_mutex */ 2106 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2107 2108 /* Get child's usba device structure */ 2109 child_ud = ph->p_usba_device; 2110 2111 mutex_enter(&child_ud->usb_mutex); 2112 2113 /* Get the current usb device's port status */ 2114 port_status = ph->p_usba_device->usb_port_status; 2115 2116 mutex_exit(&child_ud->usb_mutex); 2117 2118 (void) ehci_compute_high_speed_bandwidth(ehcip, endpoint, 2119 port_status, &sbandwidth, &cbandwidth); 2120 2121 /* Adjust polling interval to be a power of 2 */ 2122 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2123 2124 /* Find the height in the tree */ 2125 height = ehci_lattice_height(interval); 2126 2127 /* 2128 * Find the leftmost leaf in the subtree specified by the node 2129 */ 2130 leftmost = ehci_leftmost_leaf(pnode, height); 2131 2132 list_count = EHCI_NUM_INTR_QH_LISTS/interval; 2133 2134 /* Delete the bandwidth from the appropriate lists */ 2135 if (port_status == USBA_HIGH_SPEED_DEV) { 2136 2137 ehci_update_bw_availability(ehcip, -sbandwidth, 2138 leftmost, list_count, smask); 2139 } else { 2140 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) == 2141 USB_EP_ATTR_INTR) { 2142 2143 ehci_update_bw_availability(ehcip, -sbandwidth, 2144 leftmost, list_count, smask); 2145 ehci_update_bw_availability(ehcip, -cbandwidth, 2146 leftmost, list_count, cmask); 2147 } else { 2148 if ((endpoint->bEndpointAddress & 2149 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2150 2151 ehci_update_bw_availability(ehcip, -sbandwidth, 2152 leftmost, list_count, smask); 2153 ehci_update_bw_availability(ehcip, 2154 -MAX_UFRAME_SITD_XFER, leftmost, 2155 list_count, cmask); 2156 } else { 2157 2158 ehci_update_bw_availability(ehcip, 2159 -MAX_UFRAME_SITD_XFER, leftmost, 2160 list_count, smask); 2161 } 2162 } 2163 } 2164 } 2165 2166 /* 2167 * ehci_deallocate_classic_tt_bandwidth: 2168 * 2169 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint. 2170 */ 2171 static void 2172 ehci_deallocate_classic_tt_bandwidth( 2173 ehci_state_t *ehcip, 2174 usba_pipe_handle_data_t *ph, 2175 uint_t pnode) 2176 { 2177 uint_t bandwidth, height, leftmost, list, min; 2178 int i, interval; 2179 usb_ep_descr_t *endpoint = &ph->p_ep; 2180 usba_device_t *child_ud, *parent_ud; 2181 usb_port_status_t port_status; 2182 2183 /* This routine is protected by the ehci_int_mutex */ 2184 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2185 2186 /* Get child's usba device structure */ 2187 child_ud = ph->p_usba_device; 2188 2189 mutex_enter(&child_ud->usb_mutex); 2190 2191 /* Get the current usb device's port status */ 2192 port_status = child_ud->usb_port_status; 2193 2194 /* Get the parent high speed hub's usba device structure */ 2195 parent_ud = child_ud->usb_hs_hub_usba_dev; 2196 2197 mutex_exit(&child_ud->usb_mutex); 2198 2199 /* Obtain the bandwidth */ 2200 (void) ehci_compute_classic_bandwidth(endpoint, 2201 port_status, &bandwidth); 2202 2203 /* Adjust polling interval to be a power of 2 */ 2204 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2205 2206 /* Find the height in the tree */ 2207 height = ehci_lattice_height(interval); 2208 2209 /* Find the leftmost leaf in the subtree specified by the node */ 2210 leftmost = ehci_leftmost_leaf(pnode, height); 2211 2212 mutex_enter(&parent_ud->usb_mutex); 2213 2214 /* Delete the bandwidth from the appropriate lists */ 2215 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2216 list = ehci_index[leftmost + i]; 2217 parent_ud->usb_hs_hub_bandwidth[list] -= bandwidth; 2218 } 2219 2220 /* Find the leaf with the smallest allocated bandwidth */ 2221 min = parent_ud->usb_hs_hub_bandwidth[0]; 2222 2223 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) { 2224 if (parent_ud->usb_hs_hub_bandwidth[i] < min) { 2225 min = parent_ud->usb_hs_hub_bandwidth[i]; 2226 } 2227 } 2228 2229 /* Save the minimum for later use */ 2230 parent_ud->usb_hs_hub_min_bandwidth = min; 2231 2232 mutex_exit(&parent_ud->usb_mutex); 2233 } 2234 2235 2236 /* 2237 * ehci_compute_high_speed_bandwidth: 2238 * 2239 * Given a periodic endpoint (interrupt or isochronous) determine the total 2240 * bandwidth for one transaction. The EHCI host controller traverses the 2241 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 2242 * services an endpoint, only a single transaction attempt is made. The HC 2243 * moves to the next Endpoint Descriptor after the first transaction attempt 2244 * rather than finishing the entire Transfer Descriptor. Therefore, when a 2245 * Transfer Descriptor is inserted into the lattice, we will only count the 2246 * number of bytes for one transaction. 2247 * 2248 * The following are the formulas used for calculating bandwidth in terms 2249 * bytes and it is for the single USB high speed transaction. The protocol 2250 * overheads will be different for each of type of USB transfer & all these 2251 * formulas & protocol overheads are derived from the 5.11.3 section of the 2252 * USB 2.0 Specification. 2253 * 2254 * High-Speed: 2255 * Protocol overhead + ((MaxPktSz * 7)/6) + Host_Delay 2256 * 2257 * Split Transaction: (Low/Full speed devices connected behind usb2.0 hub) 2258 * 2259 * Protocol overhead + Split transaction overhead + 2260 * ((MaxPktSz * 7)/6) + Host_Delay; 2261 */ 2262 /* ARGSUSED */ 2263 static int 2264 ehci_compute_high_speed_bandwidth( 2265 ehci_state_t *ehcip, 2266 usb_ep_descr_t *endpoint, 2267 usb_port_status_t port_status, 2268 uint_t *sbandwidth, 2269 uint_t *cbandwidth) 2270 { 2271 ushort_t maxpacketsize = endpoint->wMaxPacketSize; 2272 2273 /* Return failure if endpoint maximum packet is zero */ 2274 if (maxpacketsize == 0) { 2275 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2276 "ehci_allocate_high_speed_bandwidth: Periodic endpoint " 2277 "with zero endpoint maximum packet size is not supported"); 2278 2279 return (USB_NOT_SUPPORTED); 2280 } 2281 2282 /* Add bit-stuffing overhead */ 2283 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6); 2284 2285 /* Add Host Controller specific delay to required bandwidth */ 2286 *sbandwidth = EHCI_HOST_CONTROLLER_DELAY; 2287 2288 /* Add xfer specific protocol overheads */ 2289 if ((endpoint->bmAttributes & 2290 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) { 2291 /* High speed interrupt transaction */ 2292 *sbandwidth += HS_NON_ISOC_PROTO_OVERHEAD; 2293 } else { 2294 /* Isochronous transaction */ 2295 *sbandwidth += HS_ISOC_PROTO_OVERHEAD; 2296 } 2297 2298 /* 2299 * For low/full speed devices, add split transaction specific 2300 * overheads. 2301 */ 2302 if (port_status != USBA_HIGH_SPEED_DEV) { 2303 /* 2304 * Add start and complete split transaction 2305 * tokens overheads. 2306 */ 2307 *cbandwidth = *sbandwidth + COMPLETE_SPLIT_OVERHEAD; 2308 *sbandwidth += START_SPLIT_OVERHEAD; 2309 2310 /* Add data overhead depending on data direction */ 2311 if ((endpoint->bEndpointAddress & 2312 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2313 *cbandwidth += maxpacketsize; 2314 } else { 2315 if ((endpoint->bmAttributes & 2316 USB_EP_ATTR_MASK) == USB_EP_ATTR_ISOCH) { 2317 /* There is no compete splits for out */ 2318 *cbandwidth = 0; 2319 } 2320 *sbandwidth += maxpacketsize; 2321 } 2322 } else { 2323 uint_t xactions; 2324 2325 /* Get the max transactions per microframe */ 2326 xactions = ((maxpacketsize & USB_EP_MAX_XACTS_MASK) >> 2327 USB_EP_MAX_XACTS_SHIFT) + 1; 2328 2329 /* High speed transaction */ 2330 *sbandwidth += maxpacketsize; 2331 2332 /* Calculate bandwidth per micro-frame */ 2333 *sbandwidth *= xactions; 2334 2335 *cbandwidth = 0; 2336 } 2337 2338 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2339 "ehci_allocate_high_speed_bandwidth: " 2340 "Start split bandwidth %d Complete split bandwidth %d", 2341 *sbandwidth, *cbandwidth); 2342 2343 return (USB_SUCCESS); 2344 } 2345 2346 2347 /* 2348 * ehci_compute_classic_bandwidth: 2349 * 2350 * Given a periodic endpoint (interrupt or isochronous) determine the total 2351 * bandwidth for one transaction. The EHCI host controller traverses the 2352 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 2353 * services an endpoint, only a single transaction attempt is made. The HC 2354 * moves to the next Endpoint Descriptor after the first transaction attempt 2355 * rather than finishing the entire Transfer Descriptor. Therefore, when a 2356 * Transfer Descriptor is inserted into the lattice, we will only count the 2357 * number of bytes for one transaction. 2358 * 2359 * The following are the formulas used for calculating bandwidth in terms 2360 * bytes and it is for the single USB high speed transaction. The protocol 2361 * overheads will be different for each of type of USB transfer & all these 2362 * formulas & protocol overheads are derived from the 5.11.3 section of the 2363 * USB 2.0 Specification. 2364 * 2365 * Low-Speed: 2366 * Protocol overhead + Hub LS overhead + 2367 * (Low Speed clock * ((MaxPktSz * 7)/6)) + TT_Delay 2368 * 2369 * Full-Speed: 2370 * Protocol overhead + ((MaxPktSz * 7)/6) + TT_Delay 2371 */ 2372 /* ARGSUSED */ 2373 static int 2374 ehci_compute_classic_bandwidth( 2375 usb_ep_descr_t *endpoint, 2376 usb_port_status_t port_status, 2377 uint_t *bandwidth) 2378 { 2379 ushort_t maxpacketsize = endpoint->wMaxPacketSize; 2380 2381 /* 2382 * If endpoint maximum packet is zero, then return immediately. 2383 */ 2384 if (maxpacketsize == 0) { 2385 2386 return (USB_NOT_SUPPORTED); 2387 } 2388 2389 /* Add TT delay to required bandwidth */ 2390 *bandwidth = TT_DELAY; 2391 2392 /* Add bit-stuffing overhead */ 2393 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6); 2394 2395 switch (port_status) { 2396 case USBA_LOW_SPEED_DEV: 2397 /* Low speed interrupt transaction */ 2398 *bandwidth += (LOW_SPEED_PROTO_OVERHEAD + 2399 HUB_LOW_SPEED_PROTO_OVERHEAD + 2400 (LOW_SPEED_CLOCK * maxpacketsize)); 2401 break; 2402 case USBA_FULL_SPEED_DEV: 2403 /* Full speed transaction */ 2404 *bandwidth += maxpacketsize; 2405 2406 /* Add xfer specific protocol overheads */ 2407 if ((endpoint->bmAttributes & 2408 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) { 2409 /* Full speed interrupt transaction */ 2410 *bandwidth += FS_NON_ISOC_PROTO_OVERHEAD; 2411 } else { 2412 /* Isochronous and input transaction */ 2413 if ((endpoint->bEndpointAddress & 2414 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2415 *bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD; 2416 } else { 2417 /* Isochronous and output transaction */ 2418 *bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD; 2419 } 2420 } 2421 break; 2422 } 2423 2424 return (USB_SUCCESS); 2425 } 2426 2427 2428 /* 2429 * ehci_adjust_polling_interval: 2430 * 2431 * Adjust bandwidth according usb device speed. 2432 */ 2433 /* ARGSUSED */ 2434 int 2435 ehci_adjust_polling_interval( 2436 ehci_state_t *ehcip, 2437 usb_ep_descr_t *endpoint, 2438 usb_port_status_t port_status) 2439 { 2440 uint_t interval; 2441 int i = 0; 2442 2443 /* Get the polling interval */ 2444 interval = endpoint->bInterval; 2445 2446 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2447 "ehci_adjust_polling_interval: Polling interval 0x%x", interval); 2448 2449 /* 2450 * According USB 2.0 Specifications, a high-speed endpoint's 2451 * polling intervals are specified interms of 125us or micro 2452 * frame, where as full/low endpoint's polling intervals are 2453 * specified in milliseconds. 2454 * 2455 * A high speed interrupt/isochronous endpoints can specify 2456 * desired polling interval between 1 to 16 micro-frames, 2457 * where as full/low endpoints can specify between 1 to 255 2458 * milliseconds. 2459 */ 2460 switch (port_status) { 2461 case USBA_LOW_SPEED_DEV: 2462 /* 2463 * Low speed endpoints are limited to specifying 2464 * only 8ms to 255ms in this driver. If a device 2465 * reports a polling interval that is less than 8ms, 2466 * it will use 8 ms instead. 2467 */ 2468 if (interval < LS_MIN_POLL_INTERVAL) { 2469 2470 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2471 "Low speed endpoint's poll interval of %d ms " 2472 "is below threshold. Rounding up to %d ms", 2473 interval, LS_MIN_POLL_INTERVAL); 2474 2475 interval = LS_MIN_POLL_INTERVAL; 2476 } 2477 2478 /* 2479 * Return an error if the polling interval is greater 2480 * than 255ms. 2481 */ 2482 if (interval > LS_MAX_POLL_INTERVAL) { 2483 2484 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2485 "Low speed endpoint's poll interval is " 2486 "greater than %d ms", LS_MAX_POLL_INTERVAL); 2487 2488 return (USB_FAILURE); 2489 } 2490 break; 2491 2492 case USBA_FULL_SPEED_DEV: 2493 /* 2494 * Return an error if the polling interval is less 2495 * than 1ms and greater than 255ms. 2496 */ 2497 if ((interval < FS_MIN_POLL_INTERVAL) && 2498 (interval > FS_MAX_POLL_INTERVAL)) { 2499 2500 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2501 "Full speed endpoint's poll interval must " 2502 "be between %d and %d ms", FS_MIN_POLL_INTERVAL, 2503 FS_MAX_POLL_INTERVAL); 2504 2505 return (USB_FAILURE); 2506 } 2507 break; 2508 case USBA_HIGH_SPEED_DEV: 2509 /* 2510 * Return an error if the polling interval is less 1 2511 * and greater than 16. Convert this value to 125us 2512 * units using 2^(bInterval -1). refer usb 2.0 spec 2513 * page 51 for details. 2514 */ 2515 if ((interval < HS_MIN_POLL_INTERVAL) && 2516 (interval > HS_MAX_POLL_INTERVAL)) { 2517 2518 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2519 "High speed endpoint's poll interval " 2520 "must be between %d and %d units", 2521 HS_MIN_POLL_INTERVAL, HS_MAX_POLL_INTERVAL); 2522 2523 return (USB_FAILURE); 2524 } 2525 2526 /* Adjust high speed device polling interval */ 2527 interval = 2528 ehci_adjust_high_speed_polling_interval(ehcip, endpoint); 2529 2530 break; 2531 } 2532 2533 /* 2534 * If polling interval is greater than 32ms, 2535 * adjust polling interval equal to 32ms. 2536 */ 2537 if (interval > EHCI_NUM_INTR_QH_LISTS) { 2538 interval = EHCI_NUM_INTR_QH_LISTS; 2539 } 2540 2541 /* 2542 * Find the nearest power of 2 that's less 2543 * than interval. 2544 */ 2545 while ((ehci_pow_2(i)) <= interval) { 2546 i++; 2547 } 2548 2549 return (ehci_pow_2((i - 1))); 2550 } 2551 2552 2553 /* 2554 * ehci_adjust_high_speed_polling_interval: 2555 */ 2556 /* ARGSUSED */ 2557 static int 2558 ehci_adjust_high_speed_polling_interval( 2559 ehci_state_t *ehcip, 2560 usb_ep_descr_t *endpoint) 2561 { 2562 uint_t interval; 2563 2564 /* Get the polling interval */ 2565 interval = ehci_pow_2(endpoint->bInterval - 1); 2566 2567 /* 2568 * Convert polling interval from micro seconds 2569 * to milli seconds. 2570 */ 2571 if (interval <= EHCI_MAX_UFRAMES) { 2572 interval = 1; 2573 } else { 2574 interval = interval/EHCI_MAX_UFRAMES; 2575 } 2576 2577 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2578 "ehci_adjust_high_speed_polling_interval: " 2579 "High speed adjusted interval 0x%x", interval); 2580 2581 return (interval); 2582 } 2583 2584 2585 /* 2586 * ehci_lattice_height: 2587 * 2588 * Given the requested bandwidth, find the height in the tree at which the 2589 * nodes for this bandwidth fall. The height is measured as the number of 2590 * nodes from the leaf to the level specified by bandwidth The root of the 2591 * tree is at height TREE_HEIGHT. 2592 */ 2593 static uint_t 2594 ehci_lattice_height(uint_t interval) 2595 { 2596 return (TREE_HEIGHT - (ehci_log_2(interval))); 2597 } 2598 2599 2600 /* 2601 * ehci_lattice_parent: 2602 * 2603 * Given a node in the lattice, find the index of the parent node 2604 */ 2605 static uint_t 2606 ehci_lattice_parent(uint_t node) 2607 { 2608 if ((node % 2) == 0) { 2609 2610 return ((node/2) - 1); 2611 } else { 2612 2613 return ((node + 1)/2 - 1); 2614 } 2615 } 2616 2617 2618 /* 2619 * ehci_find_periodic_node: 2620 * 2621 * Based on the "real" array leaf node and interval, get the periodic node. 2622 */ 2623 static uint_t 2624 ehci_find_periodic_node(uint_t leaf, int interval) { 2625 uint_t lattice_leaf; 2626 uint_t height = ehci_lattice_height(interval); 2627 uint_t pnode; 2628 int i; 2629 2630 /* Get the leaf number in the lattice */ 2631 lattice_leaf = leaf + EHCI_NUM_INTR_QH_LISTS - 1; 2632 2633 /* Get the node in the lattice based on the height and leaf */ 2634 pnode = lattice_leaf; 2635 for (i = 0; i < height; i++) { 2636 pnode = ehci_lattice_parent(pnode); 2637 } 2638 2639 return (pnode); 2640 } 2641 2642 2643 /* 2644 * ehci_leftmost_leaf: 2645 * 2646 * Find the leftmost leaf in the subtree specified by the node. Height refers 2647 * to number of nodes from the bottom of the tree to the node, including the 2648 * node. 2649 * 2650 * The formula for a zero based tree is: 2651 * 2^H * Node + 2^H - 1 2652 * The leaf of the tree is an array, convert the number for the array. 2653 * Subtract the size of nodes not in the array 2654 * 2^H * Node + 2^H - 1 - (EHCI_NUM_INTR_QH_LISTS - 1) = 2655 * 2^H * Node + 2^H - EHCI_NUM_INTR_QH_LISTS = 2656 * 2^H * (Node + 1) - EHCI_NUM_INTR_QH_LISTS 2657 * 0 2658 * 1 2 2659 * 0 1 2 3 2660 */ 2661 static uint_t 2662 ehci_leftmost_leaf( 2663 uint_t node, 2664 uint_t height) 2665 { 2666 return ((ehci_pow_2(height) * (node + 1)) - EHCI_NUM_INTR_QH_LISTS); 2667 } 2668 2669 2670 /* 2671 * ehci_pow_2: 2672 * 2673 * Compute 2 to the power 2674 */ 2675 static uint_t 2676 ehci_pow_2(uint_t x) 2677 { 2678 if (x == 0) { 2679 2680 return (1); 2681 } else { 2682 2683 return (2 << (x - 1)); 2684 } 2685 } 2686 2687 2688 /* 2689 * ehci_log_2: 2690 * 2691 * Compute log base 2 of x 2692 */ 2693 static uint_t 2694 ehci_log_2(uint_t x) 2695 { 2696 int i = 0; 2697 2698 while (x != 1) { 2699 x = x >> 1; 2700 i++; 2701 } 2702 2703 return (i); 2704 } 2705 2706 2707 /* 2708 * ehci_find_bestfit_hs_mask: 2709 * 2710 * Find the smask and cmask in the bandwidth allocation, and update the 2711 * bandwidth allocation. 2712 */ 2713 static int 2714 ehci_find_bestfit_hs_mask( 2715 ehci_state_t *ehcip, 2716 uchar_t *smask, 2717 uint_t *pnode, 2718 usb_ep_descr_t *endpoint, 2719 uint_t bandwidth, 2720 int interval) 2721 { 2722 int i; 2723 uint_t elements, index; 2724 int array_leaf, best_array_leaf; 2725 uint_t node_bandwidth, best_node_bandwidth; 2726 uint_t leaf_count; 2727 uchar_t bw_mask; 2728 uchar_t best_smask; 2729 2730 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2731 "ehci_find_bestfit_hs_mask: "); 2732 2733 /* Get all the valid smasks */ 2734 switch (ehci_pow_2(endpoint->bInterval - 1)) { 2735 case EHCI_INTR_1US_POLL: 2736 index = EHCI_1US_MASK_INDEX; 2737 elements = EHCI_INTR_1US_POLL; 2738 break; 2739 case EHCI_INTR_2US_POLL: 2740 index = EHCI_2US_MASK_INDEX; 2741 elements = EHCI_INTR_2US_POLL; 2742 break; 2743 case EHCI_INTR_4US_POLL: 2744 index = EHCI_4US_MASK_INDEX; 2745 elements = EHCI_INTR_4US_POLL; 2746 break; 2747 case EHCI_INTR_XUS_POLL: 2748 default: 2749 index = EHCI_XUS_MASK_INDEX; 2750 elements = EHCI_INTR_XUS_POLL; 2751 break; 2752 } 2753 2754 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 2755 2756 /* 2757 * Because of the way the leaves are setup, we will automatically 2758 * hit the leftmost leaf of every possible node with this interval. 2759 */ 2760 best_smask = 0x00; 2761 best_node_bandwidth = 0; 2762 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 2763 /* Find the bandwidth mask */ 2764 node_bandwidth = ehci_calculate_bw_availability_mask(ehcip, 2765 bandwidth, ehci_index[array_leaf], leaf_count, &bw_mask); 2766 2767 /* 2768 * If this node cannot support our requirements skip to the 2769 * next leaf. 2770 */ 2771 if (bw_mask == 0x00) { 2772 continue; 2773 } 2774 2775 /* 2776 * Now make sure our bandwidth requirements can be 2777 * satisfied with one of smasks in this node. 2778 */ 2779 *smask = 0x00; 2780 for (i = index; i < (index + elements); i++) { 2781 /* Check the start split mask value */ 2782 if (ehci_start_split_mask[index] & bw_mask) { 2783 *smask = ehci_start_split_mask[index]; 2784 break; 2785 } 2786 } 2787 2788 /* 2789 * If an appropriate smask is found save the information if: 2790 * o best_smask has not been found yet. 2791 * - or - 2792 * o This is the node with the least amount of bandwidth 2793 */ 2794 if ((*smask != 0x00) && 2795 ((best_smask == 0x00) || 2796 (best_node_bandwidth > node_bandwidth))) { 2797 2798 best_node_bandwidth = node_bandwidth; 2799 best_array_leaf = array_leaf; 2800 best_smask = *smask; 2801 } 2802 } 2803 2804 /* 2805 * If we find node that can handle the bandwidth populate the 2806 * appropriate variables and return success. 2807 */ 2808 if (best_smask) { 2809 *smask = best_smask; 2810 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 2811 interval); 2812 ehci_update_bw_availability(ehcip, bandwidth, 2813 ehci_index[best_array_leaf], leaf_count, best_smask); 2814 2815 return (USB_SUCCESS); 2816 } 2817 2818 return (USB_FAILURE); 2819 } 2820 2821 2822 /* 2823 * ehci_find_bestfit_ls_intr_mask: 2824 * 2825 * Find the smask and cmask in the bandwidth allocation. 2826 */ 2827 static int 2828 ehci_find_bestfit_ls_intr_mask( 2829 ehci_state_t *ehcip, 2830 uchar_t *smask, 2831 uchar_t *cmask, 2832 uint_t *pnode, 2833 uint_t sbandwidth, 2834 uint_t cbandwidth, 2835 int interval) 2836 { 2837 int i; 2838 uint_t elements, index; 2839 int array_leaf, best_array_leaf; 2840 uint_t node_sbandwidth, node_cbandwidth; 2841 uint_t best_node_bandwidth; 2842 uint_t leaf_count; 2843 uchar_t bw_smask, bw_cmask; 2844 uchar_t best_smask, best_cmask; 2845 2846 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2847 "ehci_find_bestfit_ls_intr_mask: "); 2848 2849 /* For low and full speed devices */ 2850 index = EHCI_XUS_MASK_INDEX; 2851 elements = EHCI_INTR_4MS_POLL; 2852 2853 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 2854 2855 /* 2856 * Because of the way the leaves are setup, we will automatically 2857 * hit the leftmost leaf of every possible node with this interval. 2858 */ 2859 best_smask = 0x00; 2860 best_node_bandwidth = 0; 2861 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 2862 /* Find the bandwidth mask */ 2863 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 2864 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask); 2865 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip, 2866 cbandwidth, ehci_index[array_leaf], leaf_count, &bw_cmask); 2867 2868 /* 2869 * If this node cannot support our requirements skip to the 2870 * next leaf. 2871 */ 2872 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) { 2873 continue; 2874 } 2875 2876 /* 2877 * Now make sure our bandwidth requirements can be 2878 * satisfied with one of smasks in this node. 2879 */ 2880 *smask = 0x00; 2881 *cmask = 0x00; 2882 for (i = index; i < (index + elements); i++) { 2883 /* Check the start split mask value */ 2884 if ((ehci_start_split_mask[index] & bw_smask) && 2885 (ehci_intr_complete_split_mask[index] & bw_cmask)) { 2886 *smask = ehci_start_split_mask[index]; 2887 *cmask = ehci_intr_complete_split_mask[index]; 2888 break; 2889 } 2890 } 2891 2892 /* 2893 * If an appropriate smask is found save the information if: 2894 * o best_smask has not been found yet. 2895 * - or - 2896 * o This is the node with the least amount of bandwidth 2897 */ 2898 if ((*smask != 0x00) && 2899 ((best_smask == 0x00) || 2900 (best_node_bandwidth > 2901 (node_sbandwidth + node_cbandwidth)))) { 2902 best_node_bandwidth = node_sbandwidth + node_cbandwidth; 2903 best_array_leaf = array_leaf; 2904 best_smask = *smask; 2905 best_cmask = *cmask; 2906 } 2907 } 2908 2909 /* 2910 * If we find node that can handle the bandwidth populate the 2911 * appropriate variables and return success. 2912 */ 2913 if (best_smask) { 2914 *smask = best_smask; 2915 *cmask = best_cmask; 2916 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 2917 interval); 2918 ehci_update_bw_availability(ehcip, sbandwidth, 2919 ehci_index[best_array_leaf], leaf_count, best_smask); 2920 ehci_update_bw_availability(ehcip, cbandwidth, 2921 ehci_index[best_array_leaf], leaf_count, best_cmask); 2922 2923 return (USB_SUCCESS); 2924 } 2925 2926 return (USB_FAILURE); 2927 } 2928 2929 2930 /* 2931 * ehci_find_bestfit_sitd_in_mask: 2932 * 2933 * Find the smask and cmask in the bandwidth allocation. 2934 */ 2935 static int 2936 ehci_find_bestfit_sitd_in_mask( 2937 ehci_state_t *ehcip, 2938 uchar_t *smask, 2939 uchar_t *cmask, 2940 uint_t *pnode, 2941 uint_t sbandwidth, 2942 uint_t cbandwidth, 2943 int interval) 2944 { 2945 int i, uFrames, found; 2946 int array_leaf, best_array_leaf; 2947 uint_t node_sbandwidth, node_cbandwidth; 2948 uint_t best_node_bandwidth; 2949 uint_t leaf_count; 2950 uchar_t bw_smask, bw_cmask; 2951 uchar_t best_smask, best_cmask; 2952 2953 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2954 "ehci_find_bestfit_sitd_in_mask: "); 2955 2956 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 2957 2958 /* 2959 * Because of the way the leaves are setup, we will automatically 2960 * hit the leftmost leaf of every possible node with this interval. 2961 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame. 2962 */ 2963 /* 2964 * Need to add an additional 2 uFrames, if the "L"ast 2965 * complete split is before uFrame 6. See section 2966 * 11.8.4 in USB 2.0 Spec. Currently we do not support 2967 * the "Back Ptr" which means we support on IN of 2968 * ~4*MAX_UFRAME_SITD_XFER bandwidth/ 2969 */ 2970 uFrames = (cbandwidth / MAX_UFRAME_SITD_XFER) + 2; 2971 if (cbandwidth % MAX_UFRAME_SITD_XFER) { 2972 uFrames++; 2973 } 2974 if (uFrames > 6) { 2975 2976 return (USB_FAILURE); 2977 } 2978 *smask = 0x1; 2979 *cmask = 0x00; 2980 for (i = 0; i < uFrames; i++) { 2981 *cmask = *cmask << 1; 2982 *cmask |= 0x1; 2983 } 2984 /* cmask must start 2 frames after the smask */ 2985 *cmask = *cmask << 2; 2986 2987 found = 0; 2988 best_smask = 0x00; 2989 best_node_bandwidth = 0; 2990 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 2991 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 2992 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask); 2993 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip, 2994 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count, 2995 &bw_cmask); 2996 2997 /* 2998 * If this node cannot support our requirements skip to the 2999 * next leaf. 3000 */ 3001 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) { 3002 continue; 3003 } 3004 3005 for (i = 0; i < (EHCI_MAX_UFRAMES - uFrames - 2); i++) { 3006 if ((*smask & bw_smask) && (*cmask & bw_cmask)) { 3007 found = 1; 3008 break; 3009 } 3010 *smask = *smask << 1; 3011 *cmask = *cmask << 1; 3012 } 3013 3014 /* 3015 * If an appropriate smask is found save the information if: 3016 * o best_smask has not been found yet. 3017 * - or - 3018 * o This is the node with the least amount of bandwidth 3019 */ 3020 if (found && 3021 ((best_smask == 0x00) || 3022 (best_node_bandwidth > 3023 (node_sbandwidth + node_cbandwidth)))) { 3024 best_node_bandwidth = node_sbandwidth + node_cbandwidth; 3025 best_array_leaf = array_leaf; 3026 best_smask = *smask; 3027 best_cmask = *cmask; 3028 } 3029 } 3030 3031 /* 3032 * If we find node that can handle the bandwidth populate the 3033 * appropriate variables and return success. 3034 */ 3035 if (best_smask) { 3036 *smask = best_smask; 3037 *cmask = best_cmask; 3038 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3039 interval); 3040 ehci_update_bw_availability(ehcip, sbandwidth, 3041 ehci_index[best_array_leaf], leaf_count, best_smask); 3042 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER, 3043 ehci_index[best_array_leaf], leaf_count, best_cmask); 3044 3045 return (USB_SUCCESS); 3046 } 3047 3048 return (USB_FAILURE); 3049 } 3050 3051 3052 /* 3053 * ehci_find_bestfit_sitd_out_mask: 3054 * 3055 * Find the smask in the bandwidth allocation. 3056 */ 3057 static int 3058 ehci_find_bestfit_sitd_out_mask( 3059 ehci_state_t *ehcip, 3060 uchar_t *smask, 3061 uint_t *pnode, 3062 uint_t sbandwidth, 3063 int interval) 3064 { 3065 int i, uFrames, found; 3066 int array_leaf, best_array_leaf; 3067 uint_t node_sbandwidth; 3068 uint_t best_node_bandwidth; 3069 uint_t leaf_count; 3070 uchar_t bw_smask; 3071 uchar_t best_smask; 3072 3073 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3074 "ehci_find_bestfit_sitd_out_mask: "); 3075 3076 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 3077 3078 /* 3079 * Because of the way the leaves are setup, we will automatically 3080 * hit the leftmost leaf of every possible node with this interval. 3081 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame. 3082 */ 3083 *smask = 0x00; 3084 uFrames = sbandwidth / MAX_UFRAME_SITD_XFER; 3085 if (sbandwidth % MAX_UFRAME_SITD_XFER) { 3086 uFrames++; 3087 } 3088 for (i = 0; i < uFrames; i++) { 3089 *smask = *smask << 1; 3090 *smask |= 0x1; 3091 } 3092 3093 found = 0; 3094 best_smask = 0x00; 3095 best_node_bandwidth = 0; 3096 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3097 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3098 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count, 3099 &bw_smask); 3100 3101 /* 3102 * If this node cannot support our requirements skip to the 3103 * next leaf. 3104 */ 3105 if (bw_smask == 0x00) { 3106 continue; 3107 } 3108 3109 /* You cannot have a start split on the 8th uFrame */ 3110 for (i = 0; (*smask & 0x80) == 0; i++) { 3111 if (*smask & bw_smask) { 3112 found = 1; 3113 break; 3114 } 3115 *smask = *smask << 1; 3116 } 3117 3118 /* 3119 * If an appropriate smask is found save the information if: 3120 * o best_smask has not been found yet. 3121 * - or - 3122 * o This is the node with the least amount of bandwidth 3123 */ 3124 if (found && 3125 ((best_smask == 0x00) || 3126 (best_node_bandwidth > node_sbandwidth))) { 3127 best_node_bandwidth = node_sbandwidth; 3128 best_array_leaf = array_leaf; 3129 best_smask = *smask; 3130 } 3131 } 3132 3133 /* 3134 * If we find node that can handle the bandwidth populate the 3135 * appropriate variables and return success. 3136 */ 3137 if (best_smask) { 3138 *smask = best_smask; 3139 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3140 interval); 3141 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER, 3142 ehci_index[best_array_leaf], leaf_count, best_smask); 3143 3144 return (USB_SUCCESS); 3145 } 3146 3147 return (USB_FAILURE); 3148 } 3149 3150 3151 /* 3152 * ehci_calculate_bw_availability_mask: 3153 * 3154 * Returns the "total bandwidth used" in this node. 3155 * Populates bw_mask with the uFrames that can support the bandwidth. 3156 * 3157 * If all the Frames cannot support this bandwidth, then bw_mask 3158 * will return 0x00 and the "total bandwidth used" will be invalid. 3159 */ 3160 static uint_t 3161 ehci_calculate_bw_availability_mask( 3162 ehci_state_t *ehcip, 3163 uint_t bandwidth, 3164 int leaf, 3165 int leaf_count, 3166 uchar_t *bw_mask) 3167 { 3168 int i, j; 3169 uchar_t bw_uframe; 3170 int uframe_total; 3171 ehci_frame_bandwidth_t *fbp; 3172 uint_t total_bandwidth = 0; 3173 3174 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3175 "ehci_calculate_bw_availability_mask: leaf %d leaf count %d", 3176 leaf, leaf_count); 3177 3178 /* Start by saying all uFrames are available */ 3179 *bw_mask = 0xFF; 3180 3181 for (i = 0; (i < leaf_count) || (*bw_mask == 0x00); i++) { 3182 fbp = &ehcip->ehci_frame_bandwidth[leaf + i]; 3183 3184 total_bandwidth += fbp->ehci_allocated_frame_bandwidth; 3185 3186 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3187 /* 3188 * If the uFrame in bw_mask is available check to see if 3189 * it can support the additional bandwidth. 3190 */ 3191 bw_uframe = (*bw_mask & (0x1 << j)); 3192 uframe_total = 3193 fbp->ehci_micro_frame_bandwidth[j] + 3194 bandwidth; 3195 if ((bw_uframe) && 3196 (uframe_total > HS_PERIODIC_BANDWIDTH)) { 3197 *bw_mask = *bw_mask & ~bw_uframe; 3198 } 3199 } 3200 } 3201 3202 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3203 "ehci_calculate_bw_availability_mask: bandwidth mask 0x%x", 3204 *bw_mask); 3205 3206 return (total_bandwidth); 3207 } 3208 3209 3210 /* 3211 * ehci_update_bw_availability: 3212 * 3213 * The leftmost leaf needs to be in terms of array position and 3214 * not the actual lattice position. 3215 */ 3216 static void 3217 ehci_update_bw_availability( 3218 ehci_state_t *ehcip, 3219 int bandwidth, 3220 int leftmost_leaf, 3221 int leaf_count, 3222 uchar_t mask) 3223 { 3224 int i, j; 3225 ehci_frame_bandwidth_t *fbp; 3226 int uFrame_bandwidth[8]; 3227 3228 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3229 "ehci_update_bw_availability: " 3230 "leaf %d count %d bandwidth 0x%x mask 0x%x", 3231 leftmost_leaf, leaf_count, bandwidth, mask); 3232 3233 ASSERT(leftmost_leaf < 32); 3234 ASSERT(leftmost_leaf >= 0); 3235 3236 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3237 if (mask & 0x1) { 3238 uFrame_bandwidth[j] = bandwidth; 3239 } else { 3240 uFrame_bandwidth[j] = 0; 3241 } 3242 3243 mask = mask >> 1; 3244 } 3245 3246 /* Updated all the effected leafs with the bandwidth */ 3247 for (i = 0; i < leaf_count; i++) { 3248 fbp = &ehcip->ehci_frame_bandwidth[leftmost_leaf + i]; 3249 3250 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3251 fbp->ehci_micro_frame_bandwidth[j] += 3252 uFrame_bandwidth[j]; 3253 fbp->ehci_allocated_frame_bandwidth += 3254 uFrame_bandwidth[j]; 3255 } 3256 } 3257 } 3258 3259 /* 3260 * Miscellaneous functions 3261 */ 3262 3263 /* 3264 * ehci_obtain_state: 3265 * 3266 * NOTE: This function is also called from POLLED MODE. 3267 */ 3268 ehci_state_t * 3269 ehci_obtain_state(dev_info_t *dip) 3270 { 3271 int instance = ddi_get_instance(dip); 3272 3273 ehci_state_t *state = ddi_get_soft_state(ehci_statep, instance); 3274 3275 ASSERT(state != NULL); 3276 3277 return (state); 3278 } 3279 3280 3281 /* 3282 * ehci_state_is_operational: 3283 * 3284 * Check the Host controller state and return proper values. 3285 */ 3286 int 3287 ehci_state_is_operational(ehci_state_t *ehcip) 3288 { 3289 int val; 3290 3291 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3292 3293 switch (ehcip->ehci_hc_soft_state) { 3294 case EHCI_CTLR_INIT_STATE: 3295 case EHCI_CTLR_SUSPEND_STATE: 3296 val = USB_FAILURE; 3297 break; 3298 case EHCI_CTLR_OPERATIONAL_STATE: 3299 val = USB_SUCCESS; 3300 break; 3301 case EHCI_CTLR_ERROR_STATE: 3302 val = USB_HC_HARDWARE_ERROR; 3303 break; 3304 default: 3305 val = USB_FAILURE; 3306 break; 3307 } 3308 3309 return (val); 3310 } 3311 3312 3313 /* 3314 * ehci_do_soft_reset 3315 * 3316 * Do soft reset of ehci host controller. 3317 */ 3318 int 3319 ehci_do_soft_reset(ehci_state_t *ehcip) 3320 { 3321 usb_frame_number_t before_frame_number, after_frame_number; 3322 ehci_regs_t *ehci_save_regs; 3323 3324 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3325 3326 /* Increment host controller error count */ 3327 ehcip->ehci_hc_error++; 3328 3329 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3330 "ehci_do_soft_reset:" 3331 "Reset ehci host controller 0x%x", ehcip->ehci_hc_error); 3332 3333 /* 3334 * Allocate space for saving current Host Controller 3335 * registers. Don't do any recovery if allocation 3336 * fails. 3337 */ 3338 ehci_save_regs = (ehci_regs_t *) 3339 kmem_zalloc(sizeof (ehci_regs_t), KM_NOSLEEP); 3340 3341 if (ehci_save_regs == NULL) { 3342 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3343 "ehci_do_soft_reset: kmem_zalloc failed"); 3344 3345 return (USB_FAILURE); 3346 } 3347 3348 /* Save current ehci registers */ 3349 ehci_save_regs->ehci_command = Get_OpReg(ehci_command); 3350 ehci_save_regs->ehci_interrupt = Get_OpReg(ehci_interrupt); 3351 ehci_save_regs->ehci_ctrl_segment = Get_OpReg(ehci_ctrl_segment); 3352 ehci_save_regs->ehci_async_list_addr = Get_OpReg(ehci_async_list_addr); 3353 ehci_save_regs->ehci_config_flag = Get_OpReg(ehci_config_flag); 3354 ehci_save_regs->ehci_periodic_list_base = 3355 Get_OpReg(ehci_periodic_list_base); 3356 3357 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3358 "ehci_do_soft_reset: Save reg = 0x%p", ehci_save_regs); 3359 3360 /* Disable all list processing and interrupts */ 3361 Set_OpReg(ehci_command, Get_OpReg(ehci_command) & 3362 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE)); 3363 3364 /* Disable all EHCI interrupts */ 3365 Set_OpReg(ehci_interrupt, 0); 3366 3367 /* Wait for few milliseconds */ 3368 drv_usecwait(EHCI_SOF_TIMEWAIT); 3369 3370 /* Do light soft reset of ehci host controller */ 3371 Set_OpReg(ehci_command, 3372 Get_OpReg(ehci_command) | EHCI_CMD_LIGHT_HC_RESET); 3373 3374 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3375 "ehci_do_soft_reset: Reset in progress"); 3376 3377 /* Wait for reset to complete */ 3378 drv_usecwait(EHCI_RESET_TIMEWAIT); 3379 3380 /* 3381 * Restore previous saved EHCI register value 3382 * into the current EHCI registers. 3383 */ 3384 Set_OpReg(ehci_ctrl_segment, (uint32_t) 3385 ehci_save_regs->ehci_ctrl_segment); 3386 3387 Set_OpReg(ehci_periodic_list_base, (uint32_t) 3388 ehci_save_regs->ehci_periodic_list_base); 3389 3390 Set_OpReg(ehci_async_list_addr, (uint32_t) 3391 ehci_save_regs->ehci_async_list_addr); 3392 3393 Set_OpReg(ehci_config_flag, (uint32_t) 3394 ehci_save_regs->ehci_config_flag); 3395 3396 /* Enable both Asynchronous and Periodic Schedule if necessary */ 3397 ehci_toggle_scheduler(ehcip); 3398 3399 /* 3400 * Set ehci_interrupt to enable all interrupts except Root 3401 * Hub Status change and frame list rollover interrupts. 3402 */ 3403 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR | 3404 EHCI_INTR_FRAME_LIST_ROLLOVER | 3405 EHCI_INTR_USB_ERROR | 3406 EHCI_INTR_USB); 3407 3408 /* 3409 * Deallocate the space that allocated for saving 3410 * HC registers. 3411 */ 3412 kmem_free((void *) ehci_save_regs, sizeof (ehci_regs_t)); 3413 3414 /* 3415 * Set the desired interrupt threshold, frame list size (if 3416 * applicable) and turn EHCI host controller. 3417 */ 3418 Set_OpReg(ehci_command, ((Get_OpReg(ehci_command) & 3419 ~EHCI_CMD_INTR_THRESHOLD) | 3420 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN))); 3421 3422 /* Wait 10ms for EHCI to start sending SOF */ 3423 drv_usecwait(EHCI_RESET_TIMEWAIT); 3424 3425 /* 3426 * Get the current usb frame number before waiting for 3427 * few milliseconds. 3428 */ 3429 before_frame_number = ehci_get_current_frame_number(ehcip); 3430 3431 /* Wait for few milliseconds */ 3432 drv_usecwait(EHCI_SOF_TIMEWAIT); 3433 3434 /* 3435 * Get the current usb frame number after waiting for 3436 * few milliseconds. 3437 */ 3438 after_frame_number = ehci_get_current_frame_number(ehcip); 3439 3440 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3441 "ehci_do_soft_reset: Before Frame Number 0x%llx " 3442 "After Frame Number 0x%llx", 3443 before_frame_number, after_frame_number); 3444 3445 if ((after_frame_number <= before_frame_number) && 3446 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) { 3447 3448 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3449 "ehci_do_soft_reset: Soft reset failed"); 3450 3451 return (USB_FAILURE); 3452 } 3453 3454 return (USB_SUCCESS); 3455 } 3456 3457 3458 /* 3459 * ehci_get_xfer_attrs: 3460 * 3461 * Get the attributes of a particular xfer. 3462 * 3463 * NOTE: This function is also called from POLLED MODE. 3464 */ 3465 usb_req_attrs_t 3466 ehci_get_xfer_attrs( 3467 ehci_state_t *ehcip, 3468 ehci_pipe_private_t *pp, 3469 ehci_trans_wrapper_t *tw) 3470 { 3471 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep; 3472 usb_req_attrs_t attrs = USB_ATTRS_NONE; 3473 3474 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3475 "ehci_get_xfer_attrs:"); 3476 3477 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) { 3478 case USB_EP_ATTR_CONTROL: 3479 attrs = ((usb_ctrl_req_t *) 3480 tw->tw_curr_xfer_reqp)->ctrl_attributes; 3481 break; 3482 case USB_EP_ATTR_BULK: 3483 attrs = ((usb_bulk_req_t *) 3484 tw->tw_curr_xfer_reqp)->bulk_attributes; 3485 break; 3486 case USB_EP_ATTR_INTR: 3487 attrs = ((usb_intr_req_t *) 3488 tw->tw_curr_xfer_reqp)->intr_attributes; 3489 break; 3490 } 3491 3492 return (attrs); 3493 } 3494 3495 3496 /* 3497 * ehci_get_current_frame_number: 3498 * 3499 * Get the current software based usb frame number. 3500 */ 3501 usb_frame_number_t 3502 ehci_get_current_frame_number(ehci_state_t *ehcip) 3503 { 3504 usb_frame_number_t usb_frame_number; 3505 usb_frame_number_t ehci_fno, micro_frame_number; 3506 3507 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3508 3509 ehci_fno = ehcip->ehci_fno; 3510 micro_frame_number = Get_OpReg(ehci_frame_index) & 0x3FFF; 3511 3512 /* 3513 * Calculate current software based usb frame number. 3514 * 3515 * This code accounts for the fact that frame number is 3516 * updated by the Host Controller before the ehci driver 3517 * gets an FrameListRollover interrupt that will adjust 3518 * Frame higher part. 3519 * 3520 * Refer ehci specification 1.0, section 2.3.2, page 21. 3521 */ 3522 micro_frame_number = ((micro_frame_number & 0x1FFF) | 3523 ehci_fno) + (((micro_frame_number & 0x3FFF) ^ 3524 ehci_fno) & 0x2000); 3525 3526 /* 3527 * Micro Frame number is equivalent to 125 usec. Eight 3528 * Micro Frame numbers are equivalent to one millsecond 3529 * or one usb frame number. 3530 */ 3531 usb_frame_number = micro_frame_number >> 3532 EHCI_uFRAMES_PER_USB_FRAME_SHIFT; 3533 3534 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3535 "ehci_get_current_frame_number: " 3536 "Current usb uframe number = 0x%llx " 3537 "Current usb frame number = 0x%llx", 3538 micro_frame_number, usb_frame_number); 3539 3540 return (usb_frame_number); 3541 } 3542 3543 3544 /* 3545 * ehci_cpr_cleanup: 3546 * 3547 * Cleanup ehci state and other ehci specific informations across 3548 * Check Point Resume (CPR). 3549 */ 3550 static void 3551 ehci_cpr_cleanup(ehci_state_t *ehcip) 3552 { 3553 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3554 3555 /* Reset software part of usb frame number */ 3556 ehcip->ehci_fno = 0; 3557 } 3558 3559 3560 /* 3561 * ehci_wait_for_sof: 3562 * 3563 * Wait for couple of SOF interrupts 3564 */ 3565 int 3566 ehci_wait_for_sof(ehci_state_t *ehcip) 3567 { 3568 usb_frame_number_t before_frame_number, after_frame_number; 3569 int error = USB_SUCCESS; 3570 3571 USB_DPRINTF_L4(PRINT_MASK_LISTS, 3572 ehcip->ehci_log_hdl, "ehci_wait_for_sof"); 3573 3574 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3575 3576 error = ehci_state_is_operational(ehcip); 3577 3578 if (error != USB_SUCCESS) { 3579 3580 return (error); 3581 } 3582 3583 /* Get the current usb frame number before waiting for two SOFs */ 3584 before_frame_number = ehci_get_current_frame_number(ehcip); 3585 3586 mutex_exit(&ehcip->ehci_int_mutex); 3587 3588 /* Wait for few milliseconds */ 3589 delay(drv_usectohz(EHCI_SOF_TIMEWAIT)); 3590 3591 mutex_enter(&ehcip->ehci_int_mutex); 3592 3593 /* Get the current usb frame number after woken up */ 3594 after_frame_number = ehci_get_current_frame_number(ehcip); 3595 3596 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3597 "ehci_wait_for_sof: framenumber: before 0x%llx " 3598 "after 0x%llx", before_frame_number, after_frame_number); 3599 3600 /* Return failure, if usb frame number has not been changed */ 3601 if (after_frame_number <= before_frame_number) { 3602 3603 if ((ehci_do_soft_reset(ehcip)) != USB_SUCCESS) { 3604 3605 USB_DPRINTF_L0(PRINT_MASK_LISTS, 3606 ehcip->ehci_log_hdl, "No SOF interrupts"); 3607 3608 /* Set host controller soft state to error */ 3609 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE; 3610 3611 return (USB_FAILURE); 3612 } 3613 3614 /* Get new usb frame number */ 3615 after_frame_number = before_frame_number = 3616 ehci_get_current_frame_number(ehcip); 3617 } 3618 3619 ASSERT(after_frame_number > before_frame_number); 3620 3621 return (USB_SUCCESS); 3622 } 3623 3624 3625 /* 3626 * ehci_toggle_scheduler: 3627 * 3628 * Turn scheduler based on pipe open count. 3629 */ 3630 void 3631 ehci_toggle_scheduler(ehci_state_t *ehcip) { 3632 uint_t temp_reg, cmd_reg; 3633 3634 cmd_reg = Get_OpReg(ehci_command); 3635 temp_reg = cmd_reg; 3636 3637 /* 3638 * Enable/Disable asynchronous scheduler, and 3639 * turn on/off async list door bell 3640 */ 3641 if (ehcip->ehci_open_async_count) { 3642 if (!(cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE)) { 3643 /* 3644 * For some reason this address might get nulled out by 3645 * the ehci chip. Set it here just in case it is null. 3646 */ 3647 Set_OpReg(ehci_async_list_addr, 3648 ehci_qh_cpu_to_iommu(ehcip, 3649 ehcip->ehci_head_of_async_sched_list)); 3650 } 3651 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE; 3652 } else { 3653 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE; 3654 } 3655 3656 if (ehcip->ehci_open_periodic_count) { 3657 if (!(cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE)) { 3658 /* 3659 * For some reason this address get's nulled out by 3660 * the ehci chip. Set it here just in case it is null. 3661 */ 3662 Set_OpReg(ehci_periodic_list_base, 3663 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address & 3664 0xFFFFF000)); 3665 } 3666 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE; 3667 } else { 3668 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE; 3669 } 3670 3671 /* Just an optimization */ 3672 if (temp_reg != cmd_reg) { 3673 Set_OpReg(ehci_command, cmd_reg); 3674 } 3675 } 3676 3677 /* 3678 * ehci print functions 3679 */ 3680 3681 /* 3682 * ehci_print_caps: 3683 */ 3684 void 3685 ehci_print_caps(ehci_state_t *ehcip) 3686 { 3687 uint_t i; 3688 3689 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3690 "\n\tUSB 2.0 Host Controller Characteristics\n"); 3691 3692 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3693 "Caps Length: 0x%x Version: 0x%x\n", 3694 Get_8Cap(ehci_caps_length), Get_16Cap(ehci_version)); 3695 3696 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3697 "Structural Parameters\n"); 3698 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3699 "Port indicators: %s", (Get_Cap(ehci_hcs_params) & 3700 EHCI_HCS_PORT_INDICATOR) ? "Yes" : "No"); 3701 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3702 "No of Classic host controllers: 0x%x", 3703 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_COMP_CTRLS) 3704 >> EHCI_HCS_NUM_COMP_CTRL_SHIFT); 3705 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3706 "No of ports per Classic host controller: 0x%x", 3707 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS_CC) 3708 >> EHCI_HCS_NUM_PORTS_CC_SHIFT); 3709 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3710 "Port routing rules: %s", (Get_Cap(ehci_hcs_params) & 3711 EHCI_HCS_PORT_ROUTING_RULES) ? "Yes" : "No"); 3712 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3713 "Port power control: %s", (Get_Cap(ehci_hcs_params) & 3714 EHCI_HCS_PORT_POWER_CONTROL) ? "Yes" : "No"); 3715 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3716 "No of root hub ports: 0x%x\n", 3717 Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); 3718 3719 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3720 "Capability Parameters\n"); 3721 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3722 "EHCI extended capability: %s", (Get_Cap(ehci_hcc_params) & 3723 EHCI_HCC_EECP) ? "Yes" : "No"); 3724 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3725 "Isoch schedule threshold: 0x%x", 3726 Get_Cap(ehci_hcc_params) & EHCI_HCC_ISOCH_SCHED_THRESHOLD); 3727 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3728 "Async schedule park capability: %s", (Get_Cap(ehci_hcc_params) & 3729 EHCI_HCC_ASYNC_SCHED_PARK_CAP) ? "Yes" : "No"); 3730 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3731 "Programmable frame list flag: %s", (Get_Cap(ehci_hcc_params) & 3732 EHCI_HCC_PROG_FRAME_LIST_FLAG) ? "256/512/1024" : "1024"); 3733 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3734 "64bit addressing capability: %s\n", (Get_Cap(ehci_hcc_params) & 3735 EHCI_HCC_64BIT_ADDR_CAP) ? "Yes" : "No"); 3736 3737 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3738 "Classic Port Route Description"); 3739 3740 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) { 3741 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3742 "\tPort Route 0x%x: 0x%x", i, Get_8Cap(ehci_port_route[i])); 3743 } 3744 } 3745 3746 3747 /* 3748 * ehci_print_regs: 3749 */ 3750 void 3751 ehci_print_regs(ehci_state_t *ehcip) 3752 { 3753 uint_t i; 3754 3755 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3756 "\n\tEHCI%d Operational Registers\n", 3757 ddi_get_instance(ehcip->ehci_dip)); 3758 3759 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3760 "Command: 0x%x Status: 0x%x", 3761 Get_OpReg(ehci_command), Get_OpReg(ehci_status)); 3762 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3763 "Interrupt: 0x%x Frame Index: 0x%x", 3764 Get_OpReg(ehci_interrupt), Get_OpReg(ehci_frame_index)); 3765 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3766 "Control Segment: 0x%x Periodic List Base: 0x%x", 3767 Get_OpReg(ehci_ctrl_segment), Get_OpReg(ehci_periodic_list_base)); 3768 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3769 "Async List Addr: 0x%x Config Flag: 0x%x", 3770 Get_OpReg(ehci_async_list_addr), Get_OpReg(ehci_config_flag)); 3771 3772 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3773 "Root Hub Port Status"); 3774 3775 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) { 3776 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3777 "\tPort Status 0x%x: 0x%x ", i, 3778 Get_OpReg(ehci_rh_port_status[i])); 3779 } 3780 } 3781 3782 3783 /* 3784 * ehci_print_qh: 3785 */ 3786 void 3787 ehci_print_qh( 3788 ehci_state_t *ehcip, 3789 ehci_qh_t *qh) 3790 { 3791 uint_t i; 3792 3793 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3794 "ehci_print_qh: qh = 0x%p", (void *)qh); 3795 3796 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3797 "\tqh_link_ptr: 0x%x ", Get_QH(qh->qh_link_ptr)); 3798 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3799 "\tqh_ctrl: 0x%x ", Get_QH(qh->qh_ctrl)); 3800 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3801 "\tqh_split_ctrl: 0x%x ", Get_QH(qh->qh_split_ctrl)); 3802 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3803 "\tqh_curr_qtd: 0x%x ", Get_QH(qh->qh_curr_qtd)); 3804 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3805 "\tqh_next_qtd: 0x%x ", Get_QH(qh->qh_next_qtd)); 3806 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3807 "\tqh_alt_next_qtd: 0x%x ", Get_QH(qh->qh_alt_next_qtd)); 3808 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3809 "\tqh_status: 0x%x ", Get_QH(qh->qh_status)); 3810 3811 for (i = 0; i < 5; i++) { 3812 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3813 "\tqh_buf[%d]: 0x%x ", i, Get_QH(qh->qh_buf[i])); 3814 } 3815 3816 for (i = 0; i < 5; i++) { 3817 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3818 "\tqh_buf_high[%d]: 0x%x ", 3819 i, Get_QH(qh->qh_buf_high[i])); 3820 } 3821 3822 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3823 "\tqh_dummy_qtd: 0x%x ", Get_QH(qh->qh_dummy_qtd)); 3824 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3825 "\tqh_prev: 0x%x ", Get_QH(qh->qh_prev)); 3826 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3827 "\tqh_state: 0x%x ", Get_QH(qh->qh_state)); 3828 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3829 "\tqh_reclaim_next: 0x%x ", Get_QH(qh->qh_reclaim_next)); 3830 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3831 "\tqh_reclaim_frame: 0x%x ", Get_QH(qh->qh_reclaim_frame)); 3832 } 3833 3834 3835 /* 3836 * ehci_print_qtd: 3837 */ 3838 void 3839 ehci_print_qtd( 3840 ehci_state_t *ehcip, 3841 ehci_qtd_t *qtd) 3842 { 3843 uint_t i; 3844 3845 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3846 "ehci_print_qtd: qtd = 0x%p", (void *)qtd); 3847 3848 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3849 "\tqtd_next_qtd: 0x%x ", Get_QTD(qtd->qtd_next_qtd)); 3850 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3851 "\tqtd_alt_next_qtd: 0x%x ", Get_QTD(qtd->qtd_alt_next_qtd)); 3852 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3853 "\tqtd_ctrl: 0x%x ", Get_QTD(qtd->qtd_ctrl)); 3854 3855 for (i = 0; i < 5; i++) { 3856 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3857 "\tqtd_buf[%d]: 0x%x ", i, Get_QTD(qtd->qtd_buf[i])); 3858 } 3859 3860 for (i = 0; i < 5; i++) { 3861 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3862 "\tqtd_buf_high[%d]: 0x%x ", 3863 i, Get_QTD(qtd->qtd_buf_high[i])); 3864 } 3865 3866 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3867 "\tqtd_trans_wrapper: 0x%x ", Get_QTD(qtd->qtd_trans_wrapper)); 3868 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3869 "\tqtd_tw_next_qtd: 0x%x ", Get_QTD(qtd->qtd_tw_next_qtd)); 3870 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3871 "\tqtd_active_qtd_next: 0x%x ", Get_QTD(qtd->qtd_active_qtd_next)); 3872 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3873 "\tqtd_active_qtd_prev: 0x%x ", Get_QTD(qtd->qtd_active_qtd_prev)); 3874 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3875 "\tqtd_state: 0x%x ", Get_QTD(qtd->qtd_state)); 3876 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3877 "\tqtd_ctrl_phase: 0x%x ", Get_QTD(qtd->qtd_ctrl_phase)); 3878 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3879 "\tqtd_xfer_addr: 0x%x ", Get_QTD(qtd->qtd_xfer_addr)); 3880 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3881 "\tqtd_xfer_len: 0x%x ", Get_QTD(qtd->qtd_xfer_len)); 3882 } 3883 3884 /* 3885 * ehci kstat functions 3886 */ 3887 3888 /* 3889 * ehci_create_stats: 3890 * 3891 * Allocate and initialize the ehci kstat structures 3892 */ 3893 void 3894 ehci_create_stats(ehci_state_t *ehcip) 3895 { 3896 char kstatname[KSTAT_STRLEN]; 3897 const char *dname = ddi_driver_name(ehcip->ehci_dip); 3898 char *usbtypes[USB_N_COUNT_KSTATS] = 3899 {"ctrl", "isoch", "bulk", "intr"}; 3900 uint_t instance = ehcip->ehci_instance; 3901 ehci_intrs_stats_t *isp; 3902 int i; 3903 3904 if (EHCI_INTRS_STATS(ehcip) == NULL) { 3905 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs", 3906 dname, instance); 3907 EHCI_INTRS_STATS(ehcip) = kstat_create("usba", instance, 3908 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED, 3909 sizeof (ehci_intrs_stats_t) / sizeof (kstat_named_t), 3910 KSTAT_FLAG_PERSISTENT); 3911 3912 if (EHCI_INTRS_STATS(ehcip)) { 3913 isp = EHCI_INTRS_STATS_DATA(ehcip); 3914 kstat_named_init(&isp->ehci_sts_total, 3915 "Interrupts Total", KSTAT_DATA_UINT64); 3916 kstat_named_init(&isp->ehci_sts_not_claimed, 3917 "Not Claimed", KSTAT_DATA_UINT64); 3918 kstat_named_init(&isp->ehci_sts_async_sched_status, 3919 "Async schedule status", KSTAT_DATA_UINT64); 3920 kstat_named_init(&isp->ehci_sts_periodic_sched_status, 3921 "Periodic sched status", KSTAT_DATA_UINT64); 3922 kstat_named_init(&isp->ehci_sts_empty_async_schedule, 3923 "Empty async schedule", KSTAT_DATA_UINT64); 3924 kstat_named_init(&isp->ehci_sts_host_ctrl_halted, 3925 "Host controller Halted", KSTAT_DATA_UINT64); 3926 kstat_named_init(&isp->ehci_sts_async_advance_intr, 3927 "Intr on async advance", KSTAT_DATA_UINT64); 3928 kstat_named_init(&isp->ehci_sts_host_system_error_intr, 3929 "Host system error", KSTAT_DATA_UINT64); 3930 kstat_named_init(&isp->ehci_sts_frm_list_rollover_intr, 3931 "Frame list rollover", KSTAT_DATA_UINT64); 3932 kstat_named_init(&isp->ehci_sts_rh_port_change_intr, 3933 "Port change detect", KSTAT_DATA_UINT64); 3934 kstat_named_init(&isp->ehci_sts_usb_error_intr, 3935 "USB error interrupt", KSTAT_DATA_UINT64); 3936 kstat_named_init(&isp->ehci_sts_usb_intr, 3937 "USB interrupt", KSTAT_DATA_UINT64); 3938 3939 EHCI_INTRS_STATS(ehcip)->ks_private = ehcip; 3940 EHCI_INTRS_STATS(ehcip)->ks_update = nulldev; 3941 kstat_install(EHCI_INTRS_STATS(ehcip)); 3942 } 3943 } 3944 3945 if (EHCI_TOTAL_STATS(ehcip) == NULL) { 3946 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total", 3947 dname, instance); 3948 EHCI_TOTAL_STATS(ehcip) = kstat_create("usba", instance, 3949 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1, 3950 KSTAT_FLAG_PERSISTENT); 3951 3952 if (EHCI_TOTAL_STATS(ehcip)) { 3953 kstat_install(EHCI_TOTAL_STATS(ehcip)); 3954 } 3955 } 3956 3957 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 3958 if (ehcip->ehci_count_stats[i] == NULL) { 3959 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s", 3960 dname, instance, usbtypes[i]); 3961 ehcip->ehci_count_stats[i] = kstat_create("usba", 3962 instance, kstatname, "usb_byte_count", 3963 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 3964 3965 if (ehcip->ehci_count_stats[i]) { 3966 kstat_install(ehcip->ehci_count_stats[i]); 3967 } 3968 } 3969 } 3970 } 3971 3972 3973 /* 3974 * ehci_destroy_stats: 3975 * 3976 * Clean up ehci kstat structures 3977 */ 3978 void 3979 ehci_destroy_stats(ehci_state_t *ehcip) 3980 { 3981 int i; 3982 3983 if (EHCI_INTRS_STATS(ehcip)) { 3984 kstat_delete(EHCI_INTRS_STATS(ehcip)); 3985 EHCI_INTRS_STATS(ehcip) = NULL; 3986 } 3987 3988 if (EHCI_TOTAL_STATS(ehcip)) { 3989 kstat_delete(EHCI_TOTAL_STATS(ehcip)); 3990 EHCI_TOTAL_STATS(ehcip) = NULL; 3991 } 3992 3993 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 3994 if (ehcip->ehci_count_stats[i]) { 3995 kstat_delete(ehcip->ehci_count_stats[i]); 3996 ehcip->ehci_count_stats[i] = NULL; 3997 } 3998 } 3999 } 4000 4001 4002 /* 4003 * ehci_do_intrs_stats: 4004 * 4005 * ehci status information 4006 */ 4007 void 4008 ehci_do_intrs_stats( 4009 ehci_state_t *ehcip, 4010 int val) 4011 { 4012 if (EHCI_INTRS_STATS(ehcip)) { 4013 EHCI_INTRS_STATS_DATA(ehcip)->ehci_sts_total.value.ui64++; 4014 switch (val) { 4015 case EHCI_STS_ASYNC_SCHED_STATUS: 4016 EHCI_INTRS_STATS_DATA(ehcip)-> 4017 ehci_sts_async_sched_status.value.ui64++; 4018 break; 4019 case EHCI_STS_PERIODIC_SCHED_STATUS: 4020 EHCI_INTRS_STATS_DATA(ehcip)-> 4021 ehci_sts_periodic_sched_status.value.ui64++; 4022 break; 4023 case EHCI_STS_EMPTY_ASYNC_SCHEDULE: 4024 EHCI_INTRS_STATS_DATA(ehcip)-> 4025 ehci_sts_empty_async_schedule.value.ui64++; 4026 break; 4027 case EHCI_STS_HOST_CTRL_HALTED: 4028 EHCI_INTRS_STATS_DATA(ehcip)-> 4029 ehci_sts_host_ctrl_halted.value.ui64++; 4030 break; 4031 case EHCI_STS_ASYNC_ADVANCE_INTR: 4032 EHCI_INTRS_STATS_DATA(ehcip)-> 4033 ehci_sts_async_advance_intr.value.ui64++; 4034 break; 4035 case EHCI_STS_HOST_SYSTEM_ERROR_INTR: 4036 EHCI_INTRS_STATS_DATA(ehcip)-> 4037 ehci_sts_host_system_error_intr.value.ui64++; 4038 break; 4039 case EHCI_STS_FRM_LIST_ROLLOVER_INTR: 4040 EHCI_INTRS_STATS_DATA(ehcip)-> 4041 ehci_sts_frm_list_rollover_intr.value.ui64++; 4042 break; 4043 case EHCI_STS_RH_PORT_CHANGE_INTR: 4044 EHCI_INTRS_STATS_DATA(ehcip)-> 4045 ehci_sts_rh_port_change_intr.value.ui64++; 4046 break; 4047 case EHCI_STS_USB_ERROR_INTR: 4048 EHCI_INTRS_STATS_DATA(ehcip)-> 4049 ehci_sts_usb_error_intr.value.ui64++; 4050 break; 4051 case EHCI_STS_USB_INTR: 4052 EHCI_INTRS_STATS_DATA(ehcip)-> 4053 ehci_sts_usb_intr.value.ui64++; 4054 break; 4055 default: 4056 EHCI_INTRS_STATS_DATA(ehcip)-> 4057 ehci_sts_not_claimed.value.ui64++; 4058 break; 4059 } 4060 } 4061 } 4062 4063 4064 /* 4065 * ehci_do_byte_stats: 4066 * 4067 * ehci data xfer information 4068 */ 4069 void 4070 ehci_do_byte_stats( 4071 ehci_state_t *ehcip, 4072 size_t len, 4073 uint8_t attr, 4074 uint8_t addr) 4075 { 4076 uint8_t type = attr & USB_EP_ATTR_MASK; 4077 uint8_t dir = addr & USB_EP_DIR_MASK; 4078 4079 if (dir == USB_EP_DIR_IN) { 4080 EHCI_TOTAL_STATS_DATA(ehcip)->reads++; 4081 EHCI_TOTAL_STATS_DATA(ehcip)->nread += len; 4082 switch (type) { 4083 case USB_EP_ATTR_CONTROL: 4084 EHCI_CTRL_STATS(ehcip)->reads++; 4085 EHCI_CTRL_STATS(ehcip)->nread += len; 4086 break; 4087 case USB_EP_ATTR_BULK: 4088 EHCI_BULK_STATS(ehcip)->reads++; 4089 EHCI_BULK_STATS(ehcip)->nread += len; 4090 break; 4091 case USB_EP_ATTR_INTR: 4092 EHCI_INTR_STATS(ehcip)->reads++; 4093 EHCI_INTR_STATS(ehcip)->nread += len; 4094 break; 4095 case USB_EP_ATTR_ISOCH: 4096 EHCI_ISOC_STATS(ehcip)->reads++; 4097 EHCI_ISOC_STATS(ehcip)->nread += len; 4098 break; 4099 } 4100 } else if (dir == USB_EP_DIR_OUT) { 4101 EHCI_TOTAL_STATS_DATA(ehcip)->writes++; 4102 EHCI_TOTAL_STATS_DATA(ehcip)->nwritten += len; 4103 switch (type) { 4104 case USB_EP_ATTR_CONTROL: 4105 EHCI_CTRL_STATS(ehcip)->writes++; 4106 EHCI_CTRL_STATS(ehcip)->nwritten += len; 4107 break; 4108 case USB_EP_ATTR_BULK: 4109 EHCI_BULK_STATS(ehcip)->writes++; 4110 EHCI_BULK_STATS(ehcip)->nwritten += len; 4111 break; 4112 case USB_EP_ATTR_INTR: 4113 EHCI_INTR_STATS(ehcip)->writes++; 4114 EHCI_INTR_STATS(ehcip)->nwritten += len; 4115 break; 4116 case USB_EP_ATTR_ISOCH: 4117 EHCI_ISOC_STATS(ehcip)->writes++; 4118 EHCI_ISOC_STATS(ehcip)->nwritten += len; 4119 break; 4120 } 4121 } 4122 } 4123