1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 #include <sys/scsi/adapters/pmcs/pmcs.h> 26 27 #define PMCS_DRIVER_VERSION "pmcs HBA device driver" 28 29 static char *pmcs_driver_rev = PMCS_DRIVER_VERSION; 30 31 /* 32 * Non-DDI Compliant stuff 33 */ 34 extern char hw_serial[]; 35 36 /* 37 * Global driver data 38 */ 39 void *pmcs_softc_state = NULL; 40 void *pmcs_iport_softstate = NULL; 41 42 /* 43 * Tracing and Logging info 44 */ 45 pmcs_tbuf_t *pmcs_tbuf = NULL; 46 uint32_t pmcs_tbuf_num_elems = 0; 47 pmcs_tbuf_t *pmcs_tbuf_ptr; 48 uint32_t pmcs_tbuf_idx = 0; 49 boolean_t pmcs_tbuf_wrap = B_FALSE; 50 static kmutex_t pmcs_trace_lock; 51 52 /* 53 * If pmcs_force_syslog value is non-zero, all messages put in the trace log 54 * will also be sent to system log. 55 */ 56 int pmcs_force_syslog = 0; 57 int pmcs_console = 0; 58 59 /* 60 * External References 61 */ 62 extern int ncpus_online; 63 64 /* 65 * Local static data 66 */ 67 static int fwlog_level = 3; 68 static int physpeed = PHY_LINK_ALL; 69 static int phymode = PHY_LM_AUTO; 70 static int block_mask = 0; 71 static int phymap_usec = 3 * MICROSEC; 72 static int iportmap_usec = 2 * MICROSEC; 73 74 #ifdef DEBUG 75 static int debug_mask = 1; 76 #else 77 static int debug_mask = 0; 78 #endif 79 80 #ifdef DISABLE_MSIX 81 static int disable_msix = 1; 82 #else 83 static int disable_msix = 0; 84 #endif 85 86 #ifdef DISABLE_MSI 87 static int disable_msi = 1; 88 #else 89 static int disable_msi = 0; 90 #endif 91 92 static uint16_t maxqdepth = 0xfffe; 93 94 /* 95 * Local prototypes 96 */ 97 static int pmcs_attach(dev_info_t *, ddi_attach_cmd_t); 98 static int pmcs_detach(dev_info_t *, ddi_detach_cmd_t); 99 static int pmcs_unattach(pmcs_hw_t *); 100 static int pmcs_iport_unattach(pmcs_iport_t *); 101 static int pmcs_add_more_chunks(pmcs_hw_t *, unsigned long); 102 static void pmcs_watchdog(void *); 103 static int pmcs_setup_intr(pmcs_hw_t *); 104 static int pmcs_teardown_intr(pmcs_hw_t *); 105 106 static uint_t pmcs_nonio_ix(caddr_t, caddr_t); 107 static uint_t pmcs_general_ix(caddr_t, caddr_t); 108 static uint_t pmcs_event_ix(caddr_t, caddr_t); 109 static uint_t pmcs_iodone_ix(caddr_t, caddr_t); 110 static uint_t pmcs_fatal_ix(caddr_t, caddr_t); 111 static uint_t pmcs_all_intr(caddr_t, caddr_t); 112 static int pmcs_quiesce(dev_info_t *dip); 113 static boolean_t pmcs_fabricate_wwid(pmcs_hw_t *); 114 115 static void pmcs_create_phy_stats(pmcs_iport_t *); 116 int pmcs_update_phy_stats(kstat_t *, int); 117 static void pmcs_destroy_phy_stats(pmcs_iport_t *); 118 119 static void pmcs_fm_fini(pmcs_hw_t *pwp); 120 static void pmcs_fm_init(pmcs_hw_t *pwp); 121 static int pmcs_fm_error_cb(dev_info_t *dip, 122 ddi_fm_error_t *err, const void *impl_data); 123 124 /* 125 * Local configuration data 126 */ 127 static struct dev_ops pmcs_ops = { 128 DEVO_REV, /* devo_rev, */ 129 0, /* refcnt */ 130 ddi_no_info, /* info */ 131 nulldev, /* identify */ 132 nulldev, /* probe */ 133 pmcs_attach, /* attach */ 134 pmcs_detach, /* detach */ 135 nodev, /* reset */ 136 NULL, /* driver operations */ 137 NULL, /* bus operations */ 138 ddi_power, /* power management */ 139 pmcs_quiesce /* quiesce */ 140 }; 141 142 static struct modldrv modldrv = { 143 &mod_driverops, 144 PMCS_DRIVER_VERSION, 145 &pmcs_ops, /* driver ops */ 146 }; 147 static struct modlinkage modlinkage = { 148 MODREV_1, &modldrv, NULL 149 }; 150 151 const ddi_dma_attr_t pmcs_dattr = { 152 DMA_ATTR_V0, /* dma_attr version */ 153 0x0000000000000000ull, /* dma_attr_addr_lo */ 154 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 155 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 156 0x0000000000000001ull, /* dma_attr_align */ 157 0x00000078, /* dma_attr_burstsizes */ 158 0x00000001, /* dma_attr_minxfer */ 159 0x00000000FFFFFFFFull, /* dma_attr_maxxfer */ 160 0x00000000FFFFFFFFull, /* dma_attr_seg */ 161 1, /* dma_attr_sgllen */ 162 512, /* dma_attr_granular */ 163 0 /* dma_attr_flags */ 164 }; 165 166 static ddi_device_acc_attr_t rattr = { 167 DDI_DEVICE_ATTR_V1, 168 DDI_STRUCTURE_LE_ACC, 169 DDI_STRICTORDER_ACC, 170 DDI_DEFAULT_ACC 171 }; 172 173 174 /* 175 * Attach/Detach functions 176 */ 177 178 int 179 _init(void) 180 { 181 int ret; 182 183 ret = ddi_soft_state_init(&pmcs_softc_state, sizeof (pmcs_hw_t), 1); 184 if (ret != 0) { 185 cmn_err(CE_WARN, "?soft state init failed for pmcs"); 186 return (ret); 187 } 188 189 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 190 cmn_err(CE_WARN, "?scsi_hba_init failed for pmcs"); 191 ddi_soft_state_fini(&pmcs_softc_state); 192 return (ret); 193 } 194 195 /* 196 * Allocate soft state for iports 197 */ 198 ret = ddi_soft_state_init(&pmcs_iport_softstate, 199 sizeof (pmcs_iport_t), 2); 200 if (ret != 0) { 201 cmn_err(CE_WARN, "?iport soft state init failed for pmcs"); 202 ddi_soft_state_fini(&pmcs_softc_state); 203 return (ret); 204 } 205 206 ret = mod_install(&modlinkage); 207 if (ret != 0) { 208 cmn_err(CE_WARN, "?mod_install failed for pmcs (%d)", ret); 209 scsi_hba_fini(&modlinkage); 210 ddi_soft_state_fini(&pmcs_iport_softstate); 211 ddi_soft_state_fini(&pmcs_softc_state); 212 return (ret); 213 } 214 215 /* Initialize the global trace lock */ 216 mutex_init(&pmcs_trace_lock, NULL, MUTEX_DRIVER, NULL); 217 218 return (0); 219 } 220 221 int 222 _fini(void) 223 { 224 int ret; 225 if ((ret = mod_remove(&modlinkage)) != 0) { 226 return (ret); 227 } 228 scsi_hba_fini(&modlinkage); 229 230 /* Free pmcs log buffer and destroy the global lock */ 231 if (pmcs_tbuf) { 232 kmem_free(pmcs_tbuf, 233 pmcs_tbuf_num_elems * sizeof (pmcs_tbuf_t)); 234 pmcs_tbuf = NULL; 235 } 236 mutex_destroy(&pmcs_trace_lock); 237 238 ddi_soft_state_fini(&pmcs_iport_softstate); 239 ddi_soft_state_fini(&pmcs_softc_state); 240 return (0); 241 } 242 243 int 244 _info(struct modinfo *modinfop) 245 { 246 return (mod_info(&modlinkage, modinfop)); 247 } 248 249 static int 250 pmcs_iport_attach(dev_info_t *dip) 251 { 252 pmcs_iport_t *iport; 253 pmcs_hw_t *pwp; 254 scsi_hba_tran_t *tran; 255 void *ua_priv = NULL; 256 char *iport_ua; 257 char *init_port; 258 int hba_inst; 259 int inst; 260 261 hba_inst = ddi_get_instance(ddi_get_parent(dip)); 262 inst = ddi_get_instance(dip); 263 264 pwp = ddi_get_soft_state(pmcs_softc_state, hba_inst); 265 if (pwp == NULL) { 266 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 267 "%s: iport%d attach invoked with NULL parent (HBA) node)", 268 __func__, inst); 269 return (DDI_FAILURE); 270 } 271 272 if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD)) { 273 return (DDI_FAILURE); 274 } 275 276 if ((iport_ua = scsi_hba_iport_unit_address(dip)) == NULL) { 277 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 278 "%s: invoked with NULL unit address, inst (%d)", 279 __func__, inst); 280 return (DDI_FAILURE); 281 } 282 283 if (ddi_soft_state_zalloc(pmcs_iport_softstate, inst) != DDI_SUCCESS) { 284 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 285 "Failed to alloc soft state for iport %d", inst); 286 return (DDI_FAILURE); 287 } 288 289 iport = ddi_get_soft_state(pmcs_iport_softstate, inst); 290 if (iport == NULL) { 291 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 292 "cannot get iport soft state"); 293 goto iport_attach_fail1; 294 } 295 296 mutex_init(&iport->lock, NULL, MUTEX_DRIVER, 297 DDI_INTR_PRI(pwp->intr_pri)); 298 cv_init(&iport->refcnt_cv, NULL, CV_DEFAULT, NULL); 299 cv_init(&iport->smp_cv, NULL, CV_DEFAULT, NULL); 300 mutex_init(&iport->refcnt_lock, NULL, MUTEX_DRIVER, 301 DDI_INTR_PRI(pwp->intr_pri)); 302 mutex_init(&iport->smp_lock, NULL, MUTEX_DRIVER, 303 DDI_INTR_PRI(pwp->intr_pri)); 304 305 /* Set some data on the iport handle */ 306 iport->dip = dip; 307 iport->pwp = pwp; 308 309 /* Dup the UA into the iport handle */ 310 iport->ua = strdup(iport_ua); 311 312 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 313 tran->tran_hba_private = iport; 314 315 list_create(&iport->phys, sizeof (pmcs_phy_t), 316 offsetof(pmcs_phy_t, list_node)); 317 318 /* 319 * If our unit address is active in the phymap, configure our 320 * iport's phylist. 321 */ 322 mutex_enter(&iport->lock); 323 ua_priv = sas_phymap_lookup_uapriv(pwp->hss_phymap, iport->ua); 324 if (ua_priv) { 325 /* Non-NULL private data indicates the unit address is active */ 326 iport->ua_state = UA_ACTIVE; 327 if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) { 328 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 329 "%s: failed to " 330 "configure phys on iport handle (0x%p), " 331 " unit address [%s]", __func__, 332 (void *)iport, iport_ua); 333 mutex_exit(&iport->lock); 334 goto iport_attach_fail2; 335 } 336 } else { 337 iport->ua_state = UA_INACTIVE; 338 } 339 mutex_exit(&iport->lock); 340 341 /* Allocate string-based soft state pool for targets */ 342 iport->tgt_sstate = NULL; 343 if (ddi_soft_state_bystr_init(&iport->tgt_sstate, 344 sizeof (pmcs_xscsi_t), PMCS_TGT_SSTATE_SZ) != 0) { 345 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 346 "cannot get iport tgt soft state"); 347 goto iport_attach_fail2; 348 } 349 350 /* Create this iport's target map */ 351 if (pmcs_iport_tgtmap_create(iport) == B_FALSE) { 352 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 353 "Failed to create tgtmap on iport %d", inst); 354 goto iport_attach_fail3; 355 } 356 357 /* Set up the 'initiator-port' DDI property on this iport */ 358 init_port = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP); 359 if (pwp->separate_ports) { 360 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 361 "%s: separate ports not supported", __func__); 362 } else { 363 /* Set initiator-port value to the HBA's base WWN */ 364 (void) scsi_wwn_to_wwnstr(pwp->sas_wwns[0], 1, 365 init_port); 366 } 367 368 mutex_enter(&iport->lock); 369 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_STRING, 370 SCSI_ADDR_PROP_INITIATOR_PORT, init_port); 371 kmem_free(init_port, PMCS_MAX_UA_SIZE); 372 373 /* Set up a 'num-phys' DDI property for the iport node */ 374 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 375 &iport->nphy); 376 mutex_exit(&iport->lock); 377 378 /* Create kstats for each of the phys in this port */ 379 pmcs_create_phy_stats(iport); 380 381 /* 382 * Insert this iport handle into our list and set 383 * iports_attached on the HBA node. 384 */ 385 rw_enter(&pwp->iports_lock, RW_WRITER); 386 ASSERT(!list_link_active(&iport->list_node)); 387 list_insert_tail(&pwp->iports, iport); 388 pwp->iports_attached = 1; 389 pwp->num_iports++; 390 rw_exit(&pwp->iports_lock); 391 392 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 393 "iport%d attached", inst); 394 ddi_report_dev(dip); 395 return (DDI_SUCCESS); 396 397 /* teardown and fail */ 398 iport_attach_fail3: 399 ddi_soft_state_bystr_fini(&iport->tgt_sstate); 400 iport_attach_fail2: 401 list_destroy(&iport->phys); 402 strfree(iport->ua); 403 mutex_destroy(&iport->refcnt_lock); 404 mutex_destroy(&iport->smp_lock); 405 cv_destroy(&iport->refcnt_cv); 406 cv_destroy(&iport->smp_cv); 407 mutex_destroy(&iport->lock); 408 iport_attach_fail1: 409 ddi_soft_state_free(pmcs_iport_softstate, inst); 410 return (DDI_FAILURE); 411 } 412 413 static int 414 pmcs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 415 { 416 scsi_hba_tran_t *tran; 417 char chiprev, *fwsupport, hw_rev[24], fw_rev[24]; 418 off_t set3size; 419 int inst, i; 420 int sm_hba = 1; 421 int protocol = 0; 422 int num_phys = 0; 423 pmcs_hw_t *pwp; 424 pmcs_phy_t *phyp; 425 uint32_t num_threads; 426 char buf[64]; 427 428 switch (cmd) { 429 case DDI_ATTACH: 430 break; 431 432 case DDI_PM_RESUME: 433 case DDI_RESUME: 434 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 435 if (!tran) { 436 return (DDI_FAILURE); 437 } 438 /* No DDI_?_RESUME on iport nodes */ 439 if (scsi_hba_iport_unit_address(dip) != NULL) { 440 return (DDI_SUCCESS); 441 } 442 pwp = TRAN2PMC(tran); 443 if (pwp == NULL) { 444 return (DDI_FAILURE); 445 } 446 447 mutex_enter(&pwp->lock); 448 pwp->suspended = 0; 449 if (pwp->tq) { 450 ddi_taskq_resume(pwp->tq); 451 } 452 mutex_exit(&pwp->lock); 453 return (DDI_SUCCESS); 454 455 default: 456 return (DDI_FAILURE); 457 } 458 459 /* 460 * If this is an iport node, invoke iport attach. 461 */ 462 if (scsi_hba_iport_unit_address(dip) != NULL) { 463 return (pmcs_iport_attach(dip)); 464 } 465 466 /* 467 * From here on is attach for the HBA node 468 */ 469 470 #ifdef DEBUG 471 /* 472 * Check to see if this unit is to be disabled. We can't disable 473 * on a per-iport node. It's either the entire HBA or nothing. 474 */ 475 (void) snprintf(buf, sizeof (buf), 476 "disable-instance-%d", ddi_get_instance(dip)); 477 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 478 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, buf, 0)) { 479 cmn_err(CE_NOTE, "pmcs%d: disabled by configuration", 480 ddi_get_instance(dip)); 481 return (DDI_FAILURE); 482 } 483 #endif 484 485 /* 486 * Allocate softstate 487 */ 488 inst = ddi_get_instance(dip); 489 if (ddi_soft_state_zalloc(pmcs_softc_state, inst) != DDI_SUCCESS) { 490 cmn_err(CE_WARN, "pmcs%d: Failed to alloc soft state", inst); 491 return (DDI_FAILURE); 492 } 493 494 pwp = ddi_get_soft_state(pmcs_softc_state, inst); 495 if (pwp == NULL) { 496 cmn_err(CE_WARN, "pmcs%d: cannot get soft state", inst); 497 ddi_soft_state_free(pmcs_softc_state, inst); 498 return (DDI_FAILURE); 499 } 500 pwp->dip = dip; 501 STAILQ_INIT(&pwp->dq); 502 STAILQ_INIT(&pwp->cq); 503 STAILQ_INIT(&pwp->wf); 504 STAILQ_INIT(&pwp->pf); 505 /* 506 * Create the list for iports 507 */ 508 list_create(&pwp->iports, sizeof (pmcs_iport_t), 509 offsetof(pmcs_iport_t, list_node)); 510 511 pwp->state = STATE_PROBING; 512 513 /* 514 * Get driver.conf properties 515 */ 516 pwp->debug_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 517 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-debug-mask", 518 debug_mask); 519 pwp->phyid_block_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 520 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phyid-block-mask", 521 block_mask); 522 pwp->physpeed = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 523 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-physpeed", physpeed); 524 pwp->phymode = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 525 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phymode", phymode); 526 pwp->fwlog = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 527 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fwlog", fwlog_level); 528 if (pwp->fwlog > PMCS_FWLOG_MAX) { 529 pwp->fwlog = PMCS_FWLOG_MAX; 530 } 531 532 mutex_enter(&pmcs_trace_lock); 533 if (pmcs_tbuf == NULL) { 534 /* Allocate trace buffer */ 535 pmcs_tbuf_num_elems = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 536 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-tbuf-num-elems", 537 PMCS_TBUF_NUM_ELEMS_DEF); 538 if ((pmcs_tbuf_num_elems == DDI_PROP_NOT_FOUND) || 539 (pmcs_tbuf_num_elems == 0)) { 540 pmcs_tbuf_num_elems = PMCS_TBUF_NUM_ELEMS_DEF; 541 } 542 543 pmcs_tbuf = kmem_zalloc(pmcs_tbuf_num_elems * 544 sizeof (pmcs_tbuf_t), KM_SLEEP); 545 pmcs_tbuf_ptr = pmcs_tbuf; 546 pmcs_tbuf_idx = 0; 547 } 548 mutex_exit(&pmcs_trace_lock); 549 550 disable_msix = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 551 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msix", 552 disable_msix); 553 disable_msi = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 554 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msi", 555 disable_msi); 556 maxqdepth = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 557 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-maxqdepth", maxqdepth); 558 pwp->fw_force_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 559 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fw-force-update", 0); 560 if (pwp->fw_force_update == 0) { 561 pwp->fw_disable_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 562 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 563 "pmcs-fw-disable-update", 0); 564 } 565 pwp->ioq_depth = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 566 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-num-io-qentries", 567 PMCS_NQENTRY); 568 569 /* 570 * Initialize FMA 571 */ 572 pwp->dev_acc_attr = pwp->reg_acc_attr = rattr; 573 pwp->iqp_dma_attr = pwp->oqp_dma_attr = 574 pwp->regdump_dma_attr = pwp->cip_dma_attr = 575 pwp->fwlog_dma_attr = pmcs_dattr; 576 pwp->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, pwp->dip, 577 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "fm-capable", 578 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 579 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 580 pmcs_fm_init(pwp); 581 582 /* 583 * Map registers 584 */ 585 if (pci_config_setup(dip, &pwp->pci_acc_handle)) { 586 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 587 "pci config setup failed"); 588 ddi_soft_state_free(pmcs_softc_state, inst); 589 return (DDI_FAILURE); 590 } 591 592 /* 593 * Get the size of register set 3. 594 */ 595 if (ddi_dev_regsize(dip, PMCS_REGSET_3, &set3size) != DDI_SUCCESS) { 596 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 597 "unable to get size of register set %d", PMCS_REGSET_3); 598 pci_config_teardown(&pwp->pci_acc_handle); 599 ddi_soft_state_free(pmcs_softc_state, inst); 600 return (DDI_FAILURE); 601 } 602 603 /* 604 * Map registers 605 */ 606 pwp->reg_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 607 608 if (ddi_regs_map_setup(dip, PMCS_REGSET_0, (caddr_t *)&pwp->msg_regs, 609 0, 0, &pwp->reg_acc_attr, &pwp->msg_acc_handle)) { 610 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 611 "failed to map Message Unit registers"); 612 pci_config_teardown(&pwp->pci_acc_handle); 613 ddi_soft_state_free(pmcs_softc_state, inst); 614 return (DDI_FAILURE); 615 } 616 617 if (ddi_regs_map_setup(dip, PMCS_REGSET_1, (caddr_t *)&pwp->top_regs, 618 0, 0, &pwp->reg_acc_attr, &pwp->top_acc_handle)) { 619 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 620 "failed to map TOP registers"); 621 ddi_regs_map_free(&pwp->msg_acc_handle); 622 pci_config_teardown(&pwp->pci_acc_handle); 623 ddi_soft_state_free(pmcs_softc_state, inst); 624 return (DDI_FAILURE); 625 } 626 627 if (ddi_regs_map_setup(dip, PMCS_REGSET_2, (caddr_t *)&pwp->gsm_regs, 628 0, 0, &pwp->reg_acc_attr, &pwp->gsm_acc_handle)) { 629 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 630 "failed to map GSM registers"); 631 ddi_regs_map_free(&pwp->top_acc_handle); 632 ddi_regs_map_free(&pwp->msg_acc_handle); 633 pci_config_teardown(&pwp->pci_acc_handle); 634 ddi_soft_state_free(pmcs_softc_state, inst); 635 return (DDI_FAILURE); 636 } 637 638 if (ddi_regs_map_setup(dip, PMCS_REGSET_3, (caddr_t *)&pwp->mpi_regs, 639 0, 0, &pwp->reg_acc_attr, &pwp->mpi_acc_handle)) { 640 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 641 "failed to map MPI registers"); 642 ddi_regs_map_free(&pwp->top_acc_handle); 643 ddi_regs_map_free(&pwp->gsm_acc_handle); 644 ddi_regs_map_free(&pwp->msg_acc_handle); 645 pci_config_teardown(&pwp->pci_acc_handle); 646 ddi_soft_state_free(pmcs_softc_state, inst); 647 return (DDI_FAILURE); 648 } 649 pwp->mpibar = 650 (((5U << 2) + 0x10) << PMCS_MSGU_MPI_BAR_SHIFT) | set3size; 651 652 /* 653 * Make sure we can support this card. 654 */ 655 pwp->chiprev = pmcs_rd_topunit(pwp, PMCS_DEVICE_REVISION); 656 657 switch (pwp->chiprev) { 658 case PMCS_PM8001_REV_A: 659 case PMCS_PM8001_REV_B: 660 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 661 "Rev A/B Card no longer supported"); 662 goto failure; 663 case PMCS_PM8001_REV_C: 664 break; 665 default: 666 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 667 "Unknown chip revision (%d)", pwp->chiprev); 668 goto failure; 669 } 670 671 /* 672 * Allocate DMA addressable area for Inbound and Outbound Queue indices 673 * that the chip needs to access plus a space for scratch usage 674 */ 675 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t); 676 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pwp->cip_acchdls, 677 &pwp->cip_handles, ptob(1), (caddr_t *)&pwp->cip, 678 &pwp->ciaddr) == B_FALSE) { 679 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 680 "Failed to setup DMA for index/scratch"); 681 goto failure; 682 } 683 684 bzero(pwp->cip, ptob(1)); 685 pwp->scratch = &pwp->cip[PMCS_INDICES_SIZE]; 686 pwp->scratch_dma = pwp->ciaddr + PMCS_INDICES_SIZE; 687 688 /* 689 * Allocate DMA S/G list chunks 690 */ 691 (void) pmcs_add_more_chunks(pwp, ptob(1) * PMCS_MIN_CHUNK_PAGES); 692 693 /* 694 * Allocate a DMA addressable area for the firmware log (if needed) 695 */ 696 if (pwp->fwlog) { 697 /* 698 * Align to event log header and entry size 699 */ 700 pwp->fwlog_dma_attr.dma_attr_align = 32; 701 if (pmcs_dma_setup(pwp, &pwp->fwlog_dma_attr, 702 &pwp->fwlog_acchdl, 703 &pwp->fwlog_hndl, PMCS_FWLOG_SIZE, 704 (caddr_t *)&pwp->fwlogp, 705 &pwp->fwaddr) == B_FALSE) { 706 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 707 "Failed to setup DMA for fwlog area"); 708 pwp->fwlog = 0; 709 } else { 710 bzero(pwp->fwlogp, PMCS_FWLOG_SIZE); 711 } 712 } 713 714 if (pwp->flash_chunk_addr == NULL) { 715 pwp->regdump_dma_attr.dma_attr_align = PMCS_FLASH_CHUNK_SIZE; 716 if (pmcs_dma_setup(pwp, &pwp->regdump_dma_attr, 717 &pwp->regdump_acchdl, 718 &pwp->regdump_hndl, PMCS_FLASH_CHUNK_SIZE, 719 (caddr_t *)&pwp->flash_chunkp, &pwp->flash_chunk_addr) == 720 B_FALSE) { 721 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 722 "Failed to setup DMA for register dump area"); 723 goto failure; 724 } 725 bzero(pwp->flash_chunkp, PMCS_FLASH_CHUNK_SIZE); 726 } 727 728 /* 729 * More bits of local initialization... 730 */ 731 pwp->tq = ddi_taskq_create(dip, "_tq", 4, TASKQ_DEFAULTPRI, 0); 732 if (pwp->tq == NULL) { 733 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 734 "unable to create worker taskq"); 735 goto failure; 736 } 737 738 /* 739 * Cache of structures for dealing with I/O completion callbacks. 740 */ 741 (void) snprintf(buf, sizeof (buf), "pmcs_iocomp_cb_cache%d", inst); 742 pwp->iocomp_cb_cache = kmem_cache_create(buf, 743 sizeof (pmcs_iocomp_cb_t), 16, NULL, NULL, NULL, NULL, NULL, 0); 744 745 /* 746 * Cache of PHY structures 747 */ 748 (void) snprintf(buf, sizeof (buf), "pmcs_phy_cache%d", inst); 749 pwp->phy_cache = kmem_cache_create(buf, sizeof (pmcs_phy_t), 8, 750 pmcs_phy_constructor, pmcs_phy_destructor, NULL, (void *)pwp, 751 NULL, 0); 752 753 /* 754 * Allocate space for the I/O completion threads 755 */ 756 num_threads = ncpus_online; 757 if (num_threads > PMCS_MAX_CQ_THREADS) { 758 num_threads = PMCS_MAX_CQ_THREADS; 759 } 760 761 pwp->cq_info.cq_thr_info = kmem_zalloc(sizeof (pmcs_cq_thr_info_t) * 762 num_threads, KM_SLEEP); 763 pwp->cq_info.cq_threads = num_threads; 764 pwp->cq_info.cq_next_disp_thr = 0; 765 pwp->cq_info.cq_stop = B_FALSE; 766 767 /* 768 * Set the quantum value in clock ticks for the I/O interrupt 769 * coalescing timer. 770 */ 771 pwp->io_intr_coal.quantum = drv_usectohz(PMCS_QUANTUM_TIME_USECS); 772 773 /* 774 * We have a delicate dance here. We need to set up 775 * interrupts so we know how to set up some OQC 776 * tables. However, while we're setting up table 777 * access, we may need to flash new firmware and 778 * reset the card, which will take some finessing. 779 */ 780 781 /* 782 * Set up interrupts here. 783 */ 784 switch (pmcs_setup_intr(pwp)) { 785 case 0: 786 break; 787 case EIO: 788 pwp->stuck = 1; 789 /* FALLTHROUGH */ 790 default: 791 goto failure; 792 } 793 794 /* 795 * Set these up now becuase they are used to initialize the OQC tables. 796 * 797 * If we have MSI or MSI-X interrupts set up and we have enough 798 * vectors for each OQ, the Outbound Queue vectors can all be the 799 * same as the appropriate interrupt routine will have been called 800 * and the doorbell register automatically cleared. 801 * This keeps us from having to check the Outbound Doorbell register 802 * when the routines for these interrupts are called. 803 * 804 * If we have Legacy INT-X interrupts set up or we didn't have enough 805 * MSI/MSI-X vectors to uniquely identify each OQ, we point these 806 * vectors to the bits we would like to have set in the Outbound 807 * Doorbell register because pmcs_all_intr will read the doorbell 808 * register to find out why we have an interrupt and write the 809 * corresponding 'clear' bit for that interrupt. 810 */ 811 812 switch (pwp->intr_cnt) { 813 case 1: 814 /* 815 * Only one vector, so we must check all OQs for MSI. For 816 * INT-X, there's only one vector anyway, so we can just 817 * use the outbound queue bits to keep from having to 818 * check each queue for each interrupt. 819 */ 820 if (pwp->int_type == PMCS_INT_FIXED) { 821 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 822 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 823 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS; 824 } else { 825 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 826 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_IODONE; 827 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_IODONE; 828 } 829 break; 830 case 2: 831 /* With 2, we can at least isolate IODONE */ 832 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 833 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 834 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_GENERAL; 835 break; 836 case 4: 837 /* With 4 vectors, everybody gets one */ 838 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 839 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 840 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS; 841 break; 842 } 843 844 /* 845 * Do the first part of setup 846 */ 847 if (pmcs_setup(pwp)) { 848 goto failure; 849 } 850 pmcs_report_fwversion(pwp); 851 852 /* 853 * Now do some additonal allocations based upon information 854 * gathered during MPI setup. 855 */ 856 pwp->root_phys = kmem_zalloc(pwp->nphy * sizeof (pmcs_phy_t), KM_SLEEP); 857 ASSERT(pwp->nphy < SAS2_PHYNUM_MAX); 858 phyp = pwp->root_phys; 859 for (i = 0; i < pwp->nphy; i++) { 860 if (i < pwp->nphy-1) { 861 phyp->sibling = (phyp + 1); 862 } 863 mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER, 864 DDI_INTR_PRI(pwp->intr_pri)); 865 phyp->phynum = i & SAS2_PHYNUM_MASK; 866 pmcs_phy_name(pwp, phyp, phyp->path, sizeof (phyp->path)); 867 phyp->pwp = pwp; 868 phyp->device_id = PMCS_INVALID_DEVICE_ID; 869 phyp->portid = PMCS_PHY_INVALID_PORT_ID; 870 phyp++; 871 } 872 873 pwp->work = kmem_zalloc(pwp->max_cmd * sizeof (pmcwork_t), KM_SLEEP); 874 for (i = 0; i < pwp->max_cmd - 1; i++) { 875 pmcwork_t *pwrk = &pwp->work[i]; 876 mutex_init(&pwrk->lock, NULL, MUTEX_DRIVER, 877 DDI_INTR_PRI(pwp->intr_pri)); 878 cv_init(&pwrk->sleep_cv, NULL, CV_DRIVER, NULL); 879 STAILQ_INSERT_TAIL(&pwp->wf, pwrk, next); 880 881 } 882 pwp->targets = (pmcs_xscsi_t **) 883 kmem_zalloc(pwp->max_dev * sizeof (pmcs_xscsi_t *), KM_SLEEP); 884 885 pwp->iqpt = (pmcs_iqp_trace_t *) 886 kmem_zalloc(sizeof (pmcs_iqp_trace_t), KM_SLEEP); 887 pwp->iqpt->head = kmem_zalloc(PMCS_IQP_TRACE_BUFFER_SIZE, KM_SLEEP); 888 pwp->iqpt->curpos = pwp->iqpt->head; 889 pwp->iqpt->size_left = PMCS_IQP_TRACE_BUFFER_SIZE; 890 891 /* 892 * Start MPI communication. 893 */ 894 if (pmcs_start_mpi(pwp)) { 895 if (pmcs_soft_reset(pwp, B_FALSE)) { 896 goto failure; 897 } 898 } 899 900 /* 901 * Do some initial acceptance tests. 902 * This tests interrupts and queues. 903 */ 904 if (pmcs_echo_test(pwp)) { 905 goto failure; 906 } 907 908 /* Read VPD - if it exists */ 909 if (pmcs_get_nvmd(pwp, PMCS_NVMD_VPD, PMCIN_NVMD_VPD, 0, NULL, 0)) { 910 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 911 "%s: Unable to read VPD: " 912 "attempting to fabricate", __func__); 913 /* 914 * When we release, this must goto failure and the call 915 * to pmcs_fabricate_wwid is removed. 916 */ 917 /* goto failure; */ 918 if (!pmcs_fabricate_wwid(pwp)) { 919 goto failure; 920 } 921 } 922 923 /* 924 * We're now officially running 925 */ 926 pwp->state = STATE_RUNNING; 927 928 /* 929 * Check firmware versions and load new firmware 930 * if needed and reset. 931 */ 932 if (pmcs_firmware_update(pwp)) { 933 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 934 "%s: Firmware update failed", __func__); 935 goto failure; 936 } 937 938 /* 939 * Create completion threads. 940 */ 941 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 942 pwp->cq_info.cq_thr_info[i].cq_pwp = pwp; 943 pwp->cq_info.cq_thr_info[i].cq_thread = 944 thread_create(NULL, 0, pmcs_scsa_cq_run, 945 &pwp->cq_info.cq_thr_info[i], 0, &p0, TS_RUN, minclsyspri); 946 } 947 948 /* 949 * Create one thread to deal with the updating of the interrupt 950 * coalescing timer. 951 */ 952 pwp->ict_thread = thread_create(NULL, 0, pmcs_check_intr_coal, 953 pwp, 0, &p0, TS_RUN, minclsyspri); 954 955 /* 956 * Kick off the watchdog 957 */ 958 pwp->wdhandle = timeout(pmcs_watchdog, pwp, 959 drv_usectohz(PMCS_WATCH_INTERVAL)); 960 /* 961 * Do the SCSI attachment code (before starting phys) 962 */ 963 if (pmcs_scsa_init(pwp, &pmcs_dattr)) { 964 goto failure; 965 } 966 pwp->hba_attached = 1; 967 968 /* 969 * Initialize the rwlock for the iport elements. 970 */ 971 rw_init(&pwp->iports_lock, NULL, RW_DRIVER, NULL); 972 973 /* Check all acc & dma handles allocated in attach */ 974 if (pmcs_check_acc_dma_handle(pwp)) { 975 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 976 goto failure; 977 } 978 979 /* 980 * Create the phymap for this HBA instance 981 */ 982 if (sas_phymap_create(dip, phymap_usec, PHYMAP_MODE_SIMPLE, NULL, 983 pwp, pmcs_phymap_activate, pmcs_phymap_deactivate, 984 &pwp->hss_phymap) != DDI_SUCCESS) { 985 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 986 "%s: pmcs%d phymap_create failed", __func__, inst); 987 goto failure; 988 } 989 ASSERT(pwp->hss_phymap); 990 991 /* 992 * Create the iportmap for this HBA instance 993 */ 994 if (scsi_hba_iportmap_create(dip, iportmap_usec, 995 &pwp->hss_iportmap) != DDI_SUCCESS) { 996 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 997 "%s: pmcs%d iportmap_create failed", __func__, inst); 998 goto failure; 999 } 1000 ASSERT(pwp->hss_iportmap); 1001 1002 /* 1003 * Start the PHYs. 1004 */ 1005 if (pmcs_start_phys(pwp)) { 1006 goto failure; 1007 } 1008 1009 /* 1010 * From this point on, we can't fail. 1011 */ 1012 ddi_report_dev(dip); 1013 1014 /* SM-HBA */ 1015 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SMHBA_SUPPORTED, 1016 &sm_hba); 1017 1018 /* SM-HBA */ 1019 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_DRV_VERSION, 1020 pmcs_driver_rev); 1021 1022 /* SM-HBA */ 1023 chiprev = 'A' + pwp->chiprev; 1024 (void) snprintf(hw_rev, 2, "%s", &chiprev); 1025 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_HWARE_VERSION, 1026 hw_rev); 1027 1028 /* SM-HBA */ 1029 switch (PMCS_FW_TYPE(pwp)) { 1030 case PMCS_FW_TYPE_RELEASED: 1031 fwsupport = "Released"; 1032 break; 1033 case PMCS_FW_TYPE_DEVELOPMENT: 1034 fwsupport = "Development"; 1035 break; 1036 case PMCS_FW_TYPE_ALPHA: 1037 fwsupport = "Alpha"; 1038 break; 1039 case PMCS_FW_TYPE_BETA: 1040 fwsupport = "Beta"; 1041 break; 1042 default: 1043 fwsupport = "Special"; 1044 break; 1045 } 1046 (void) snprintf(fw_rev, sizeof (fw_rev), "%x.%x.%x %s", 1047 PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), PMCS_FW_MICRO(pwp), 1048 fwsupport); 1049 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_FWARE_VERSION, 1050 fw_rev); 1051 1052 /* SM-HBA */ 1053 num_phys = pwp->nphy; 1054 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_NUM_PHYS_HBA, 1055 &num_phys); 1056 1057 /* SM-HBA */ 1058 protocol = SAS_SSP_SUPPORT | SAS_SATA_SUPPORT | SAS_SMP_SUPPORT; 1059 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SUPPORTED_PROTOCOL, 1060 &protocol); 1061 1062 return (DDI_SUCCESS); 1063 1064 failure: 1065 if (pmcs_unattach(pwp)) { 1066 pwp->stuck = 1; 1067 } 1068 return (DDI_FAILURE); 1069 } 1070 1071 int 1072 pmcs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1073 { 1074 int inst = ddi_get_instance(dip); 1075 pmcs_iport_t *iport = NULL; 1076 pmcs_hw_t *pwp = NULL; 1077 scsi_hba_tran_t *tran; 1078 1079 if (scsi_hba_iport_unit_address(dip) != NULL) { 1080 /* iport node */ 1081 iport = ddi_get_soft_state(pmcs_iport_softstate, inst); 1082 ASSERT(iport); 1083 if (iport == NULL) { 1084 return (DDI_FAILURE); 1085 } 1086 pwp = iport->pwp; 1087 } else { 1088 /* hba node */ 1089 pwp = (pmcs_hw_t *)ddi_get_soft_state(pmcs_softc_state, inst); 1090 ASSERT(pwp); 1091 if (pwp == NULL) { 1092 return (DDI_FAILURE); 1093 } 1094 } 1095 1096 switch (cmd) { 1097 case DDI_DETACH: 1098 if (iport) { 1099 /* iport detach */ 1100 if (pmcs_iport_unattach(iport)) { 1101 return (DDI_FAILURE); 1102 } 1103 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1104 "iport%d detached", inst); 1105 return (DDI_SUCCESS); 1106 } else { 1107 /* HBA detach */ 1108 if (pmcs_unattach(pwp)) { 1109 return (DDI_FAILURE); 1110 } 1111 return (DDI_SUCCESS); 1112 } 1113 1114 case DDI_SUSPEND: 1115 case DDI_PM_SUSPEND: 1116 /* No DDI_SUSPEND on iport nodes */ 1117 if (iport) { 1118 return (DDI_SUCCESS); 1119 } 1120 1121 if (pwp->stuck) { 1122 return (DDI_FAILURE); 1123 } 1124 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 1125 if (!tran) { 1126 return (DDI_FAILURE); 1127 } 1128 1129 pwp = TRAN2PMC(tran); 1130 if (pwp == NULL) { 1131 return (DDI_FAILURE); 1132 } 1133 mutex_enter(&pwp->lock); 1134 if (pwp->tq) { 1135 ddi_taskq_suspend(pwp->tq); 1136 } 1137 pwp->suspended = 1; 1138 mutex_exit(&pwp->lock); 1139 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "PMC8X6G suspending"); 1140 return (DDI_SUCCESS); 1141 1142 default: 1143 return (DDI_FAILURE); 1144 } 1145 } 1146 1147 static int 1148 pmcs_iport_unattach(pmcs_iport_t *iport) 1149 { 1150 pmcs_hw_t *pwp = iport->pwp; 1151 1152 /* 1153 * First, check if there are still any configured targets on this 1154 * iport. If so, we fail detach. 1155 */ 1156 if (pmcs_iport_has_targets(pwp, iport)) { 1157 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 1158 "iport%d detach failure: iport has targets (luns)", 1159 ddi_get_instance(iport->dip)); 1160 return (DDI_FAILURE); 1161 } 1162 1163 /* 1164 * Remove this iport from our list if it is inactive in the phymap. 1165 */ 1166 rw_enter(&pwp->iports_lock, RW_WRITER); 1167 mutex_enter(&iport->lock); 1168 1169 if (iport->ua_state == UA_ACTIVE) { 1170 mutex_exit(&iport->lock); 1171 rw_exit(&pwp->iports_lock); 1172 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 1173 "iport%d detach failure: " 1174 "iport unit address active in phymap", 1175 ddi_get_instance(iport->dip)); 1176 return (DDI_FAILURE); 1177 } 1178 1179 /* If it's our only iport, clear iports_attached */ 1180 ASSERT(pwp->num_iports >= 1); 1181 if (--pwp->num_iports == 0) { 1182 pwp->iports_attached = 0; 1183 } 1184 1185 ASSERT(list_link_active(&iport->list_node)); 1186 list_remove(&pwp->iports, iport); 1187 rw_exit(&pwp->iports_lock); 1188 1189 /* 1190 * We have removed the iport handle from the HBA's iports list, 1191 * there will be no new references to it. Two things must be 1192 * guarded against here. First, we could have PHY up events, 1193 * adding themselves to the iport->phys list and grabbing ref's 1194 * on our iport handle. Second, we could have existing references 1195 * to this iport handle from a point in time prior to the list 1196 * removal above. 1197 * 1198 * So first, destroy the phys list. Remove any phys that have snuck 1199 * in after the phymap deactivate, dropping the refcnt accordingly. 1200 * If these PHYs are still up if and when the phymap reactivates 1201 * (i.e. when this iport reattaches), we'll populate the list with 1202 * them and bump the refcnt back up. 1203 */ 1204 pmcs_remove_phy_from_iport(iport, NULL); 1205 ASSERT(list_is_empty(&iport->phys)); 1206 list_destroy(&iport->phys); 1207 mutex_exit(&iport->lock); 1208 1209 /* 1210 * Second, wait for any other references to this iport to be 1211 * dropped, then continue teardown. 1212 */ 1213 mutex_enter(&iport->refcnt_lock); 1214 while (iport->refcnt != 0) { 1215 cv_wait(&iport->refcnt_cv, &iport->refcnt_lock); 1216 } 1217 mutex_exit(&iport->refcnt_lock); 1218 1219 /* Delete kstats */ 1220 pmcs_destroy_phy_stats(iport); 1221 1222 /* Destroy the iport target map */ 1223 if (pmcs_iport_tgtmap_destroy(iport) == B_FALSE) { 1224 return (DDI_FAILURE); 1225 } 1226 1227 /* Free the tgt soft state */ 1228 if (iport->tgt_sstate != NULL) { 1229 ddi_soft_state_bystr_fini(&iport->tgt_sstate); 1230 } 1231 1232 /* Free our unit address string */ 1233 strfree(iport->ua); 1234 1235 /* Finish teardown and free the softstate */ 1236 mutex_destroy(&iport->refcnt_lock); 1237 mutex_destroy(&iport->smp_lock); 1238 ASSERT(iport->refcnt == 0); 1239 cv_destroy(&iport->refcnt_cv); 1240 cv_destroy(&iport->smp_cv); 1241 mutex_destroy(&iport->lock); 1242 ddi_soft_state_free(pmcs_iport_softstate, ddi_get_instance(iport->dip)); 1243 1244 return (DDI_SUCCESS); 1245 } 1246 1247 static int 1248 pmcs_unattach(pmcs_hw_t *pwp) 1249 { 1250 int i; 1251 enum pwpstate curstate; 1252 pmcs_cq_thr_info_t *cqti; 1253 1254 /* 1255 * Tear down the interrupt infrastructure. 1256 */ 1257 if (pmcs_teardown_intr(pwp)) { 1258 pwp->stuck = 1; 1259 } 1260 pwp->intr_cnt = 0; 1261 1262 /* 1263 * Grab a lock, if initted, to set state. 1264 */ 1265 if (pwp->locks_initted) { 1266 mutex_enter(&pwp->lock); 1267 if (pwp->state != STATE_DEAD) { 1268 pwp->state = STATE_UNPROBING; 1269 } 1270 curstate = pwp->state; 1271 mutex_exit(&pwp->lock); 1272 1273 /* 1274 * Stop the I/O completion threads. 1275 */ 1276 mutex_enter(&pwp->cq_lock); 1277 pwp->cq_info.cq_stop = B_TRUE; 1278 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 1279 if (pwp->cq_info.cq_thr_info[i].cq_thread) { 1280 cqti = &pwp->cq_info.cq_thr_info[i]; 1281 mutex_enter(&cqti->cq_thr_lock); 1282 cv_signal(&cqti->cq_cv); 1283 mutex_exit(&cqti->cq_thr_lock); 1284 mutex_exit(&pwp->cq_lock); 1285 thread_join(cqti->cq_thread->t_did); 1286 mutex_enter(&pwp->cq_lock); 1287 } 1288 } 1289 mutex_exit(&pwp->cq_lock); 1290 1291 /* 1292 * Stop the interrupt coalescing timer thread 1293 */ 1294 if (pwp->ict_thread) { 1295 mutex_enter(&pwp->ict_lock); 1296 pwp->io_intr_coal.stop_thread = B_TRUE; 1297 cv_signal(&pwp->ict_cv); 1298 mutex_exit(&pwp->ict_lock); 1299 thread_join(pwp->ict_thread->t_did); 1300 } 1301 } else { 1302 if (pwp->state != STATE_DEAD) { 1303 pwp->state = STATE_UNPROBING; 1304 } 1305 curstate = pwp->state; 1306 } 1307 1308 if (&pwp->iports != NULL) { 1309 /* Destroy the iports lock */ 1310 rw_destroy(&pwp->iports_lock); 1311 /* Destroy the iports list */ 1312 ASSERT(list_is_empty(&pwp->iports)); 1313 list_destroy(&pwp->iports); 1314 } 1315 1316 if (pwp->hss_iportmap != NULL) { 1317 /* Destroy the iportmap */ 1318 scsi_hba_iportmap_destroy(pwp->hss_iportmap); 1319 } 1320 1321 if (pwp->hss_phymap != NULL) { 1322 /* Destroy the phymap */ 1323 sas_phymap_destroy(pwp->hss_phymap); 1324 } 1325 1326 /* 1327 * Make sure that any pending watchdog won't 1328 * be called from this point on out. 1329 */ 1330 (void) untimeout(pwp->wdhandle); 1331 /* 1332 * After the above action, the watchdog 1333 * timer that starts up the worker task 1334 * may trigger but will exit immediately 1335 * on triggering. 1336 * 1337 * Now that this is done, we can destroy 1338 * the task queue, which will wait if we're 1339 * running something on it. 1340 */ 1341 if (pwp->tq) { 1342 ddi_taskq_destroy(pwp->tq); 1343 pwp->tq = NULL; 1344 } 1345 1346 pmcs_fm_fini(pwp); 1347 1348 if (pwp->hba_attached) { 1349 (void) scsi_hba_detach(pwp->dip); 1350 pwp->hba_attached = 0; 1351 } 1352 1353 /* 1354 * If the chip hasn't been marked dead, shut it down now 1355 * to bring it back to a known state without attempting 1356 * a soft reset. 1357 */ 1358 if (curstate != STATE_DEAD && pwp->locks_initted) { 1359 /* 1360 * De-register all registered devices 1361 */ 1362 pmcs_deregister_devices(pwp, pwp->root_phys); 1363 1364 /* 1365 * Stop all the phys. 1366 */ 1367 pmcs_stop_phys(pwp); 1368 1369 /* 1370 * Shut Down Message Passing 1371 */ 1372 (void) pmcs_stop_mpi(pwp); 1373 1374 /* 1375 * Reset chip 1376 */ 1377 (void) pmcs_soft_reset(pwp, B_FALSE); 1378 } 1379 1380 /* 1381 * Turn off interrupts on the chip 1382 */ 1383 if (pwp->mpi_acc_handle) { 1384 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1385 } 1386 1387 /* Destroy pwp's lock */ 1388 if (pwp->locks_initted) { 1389 mutex_destroy(&pwp->lock); 1390 mutex_destroy(&pwp->dma_lock); 1391 mutex_destroy(&pwp->axil_lock); 1392 mutex_destroy(&pwp->cq_lock); 1393 mutex_destroy(&pwp->config_lock); 1394 mutex_destroy(&pwp->ict_lock); 1395 mutex_destroy(&pwp->wfree_lock); 1396 mutex_destroy(&pwp->pfree_lock); 1397 mutex_destroy(&pwp->dead_phylist_lock); 1398 #ifdef DEBUG 1399 mutex_destroy(&pwp->dbglock); 1400 #endif 1401 cv_destroy(&pwp->ict_cv); 1402 cv_destroy(&pwp->drain_cv); 1403 pwp->locks_initted = 0; 1404 } 1405 1406 /* 1407 * Free DMA handles and associated consistent memory 1408 */ 1409 if (pwp->regdump_hndl) { 1410 if (ddi_dma_unbind_handle(pwp->regdump_hndl) != DDI_SUCCESS) { 1411 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1412 "Condition check failed " 1413 "at %s():%d", __func__, __LINE__); 1414 } 1415 ddi_dma_free_handle(&pwp->regdump_hndl); 1416 ddi_dma_mem_free(&pwp->regdump_acchdl); 1417 pwp->regdump_hndl = 0; 1418 } 1419 if (pwp->fwlog_hndl) { 1420 if (ddi_dma_unbind_handle(pwp->fwlog_hndl) != DDI_SUCCESS) { 1421 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1422 "Condition check failed " 1423 "at %s():%d", __func__, __LINE__); 1424 } 1425 ddi_dma_free_handle(&pwp->fwlog_hndl); 1426 ddi_dma_mem_free(&pwp->fwlog_acchdl); 1427 pwp->fwlog_hndl = 0; 1428 } 1429 if (pwp->cip_handles) { 1430 if (ddi_dma_unbind_handle(pwp->cip_handles) != DDI_SUCCESS) { 1431 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1432 "Condition check failed " 1433 "at %s():%d", __func__, __LINE__); 1434 } 1435 ddi_dma_free_handle(&pwp->cip_handles); 1436 ddi_dma_mem_free(&pwp->cip_acchdls); 1437 pwp->cip_handles = 0; 1438 } 1439 for (i = 0; i < PMCS_NOQ; i++) { 1440 if (pwp->oqp_handles[i]) { 1441 if (ddi_dma_unbind_handle(pwp->oqp_handles[i]) != 1442 DDI_SUCCESS) { 1443 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1444 "Condition check failed at %s():%d", 1445 __func__, __LINE__); 1446 } 1447 ddi_dma_free_handle(&pwp->oqp_handles[i]); 1448 ddi_dma_mem_free(&pwp->oqp_acchdls[i]); 1449 pwp->oqp_handles[i] = 0; 1450 } 1451 } 1452 for (i = 0; i < PMCS_NIQ; i++) { 1453 if (pwp->iqp_handles[i]) { 1454 if (ddi_dma_unbind_handle(pwp->iqp_handles[i]) != 1455 DDI_SUCCESS) { 1456 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1457 "Condition check failed at %s():%d", 1458 __func__, __LINE__); 1459 } 1460 ddi_dma_free_handle(&pwp->iqp_handles[i]); 1461 ddi_dma_mem_free(&pwp->iqp_acchdls[i]); 1462 pwp->iqp_handles[i] = 0; 1463 } 1464 } 1465 1466 pmcs_free_dma_chunklist(pwp); 1467 1468 /* 1469 * Unmap registers and destroy access handles 1470 */ 1471 if (pwp->mpi_acc_handle) { 1472 ddi_regs_map_free(&pwp->mpi_acc_handle); 1473 pwp->mpi_acc_handle = 0; 1474 } 1475 if (pwp->top_acc_handle) { 1476 ddi_regs_map_free(&pwp->top_acc_handle); 1477 pwp->top_acc_handle = 0; 1478 } 1479 if (pwp->gsm_acc_handle) { 1480 ddi_regs_map_free(&pwp->gsm_acc_handle); 1481 pwp->gsm_acc_handle = 0; 1482 } 1483 if (pwp->msg_acc_handle) { 1484 ddi_regs_map_free(&pwp->msg_acc_handle); 1485 pwp->msg_acc_handle = 0; 1486 } 1487 if (pwp->pci_acc_handle) { 1488 pci_config_teardown(&pwp->pci_acc_handle); 1489 pwp->pci_acc_handle = 0; 1490 } 1491 1492 /* 1493 * Do memory allocation cleanup. 1494 */ 1495 while (pwp->dma_freelist) { 1496 pmcs_dmachunk_t *this = pwp->dma_freelist; 1497 pwp->dma_freelist = this->nxt; 1498 kmem_free(this, sizeof (pmcs_dmachunk_t)); 1499 } 1500 1501 /* 1502 * Free pools 1503 */ 1504 if (pwp->iocomp_cb_cache) { 1505 kmem_cache_destroy(pwp->iocomp_cb_cache); 1506 } 1507 1508 /* 1509 * Free all PHYs (at level > 0), then free the cache 1510 */ 1511 pmcs_free_all_phys(pwp, pwp->root_phys); 1512 if (pwp->phy_cache) { 1513 kmem_cache_destroy(pwp->phy_cache); 1514 } 1515 1516 /* 1517 * Free root PHYs 1518 */ 1519 if (pwp->root_phys) { 1520 pmcs_phy_t *phyp = pwp->root_phys; 1521 for (i = 0; i < pwp->nphy; i++) { 1522 mutex_destroy(&phyp->phy_lock); 1523 phyp = phyp->sibling; 1524 } 1525 kmem_free(pwp->root_phys, pwp->nphy * sizeof (pmcs_phy_t)); 1526 pwp->root_phys = NULL; 1527 pwp->nphy = 0; 1528 } 1529 1530 /* Free the targets list */ 1531 if (pwp->targets) { 1532 kmem_free(pwp->targets, 1533 sizeof (pmcs_xscsi_t *) * pwp->max_dev); 1534 } 1535 1536 /* 1537 * Free work structures 1538 */ 1539 1540 if (pwp->work && pwp->max_cmd) { 1541 for (i = 0; i < pwp->max_cmd - 1; i++) { 1542 pmcwork_t *pwrk = &pwp->work[i]; 1543 mutex_destroy(&pwrk->lock); 1544 cv_destroy(&pwrk->sleep_cv); 1545 } 1546 kmem_free(pwp->work, sizeof (pmcwork_t) * pwp->max_cmd); 1547 pwp->work = NULL; 1548 pwp->max_cmd = 0; 1549 } 1550 1551 /* 1552 * Do last property and SCSA cleanup 1553 */ 1554 if (pwp->tran) { 1555 scsi_hba_tran_free(pwp->tran); 1556 pwp->tran = NULL; 1557 } 1558 if (pwp->reset_notify_listf) { 1559 scsi_hba_reset_notify_tear_down(pwp->reset_notify_listf); 1560 pwp->reset_notify_listf = NULL; 1561 } 1562 ddi_prop_remove_all(pwp->dip); 1563 if (pwp->stuck) { 1564 return (-1); 1565 } 1566 1567 /* Free register dump area if allocated */ 1568 if (pwp->regdumpp) { 1569 kmem_free(pwp->regdumpp, PMCS_REG_DUMP_SIZE); 1570 pwp->regdumpp = NULL; 1571 } 1572 if (pwp->iqpt && pwp->iqpt->head) { 1573 kmem_free(pwp->iqpt->head, PMCS_IQP_TRACE_BUFFER_SIZE); 1574 pwp->iqpt->head = pwp->iqpt->curpos = NULL; 1575 } 1576 if (pwp->iqpt) { 1577 kmem_free(pwp->iqpt, sizeof (pmcs_iqp_trace_t)); 1578 pwp->iqpt = NULL; 1579 } 1580 1581 ddi_soft_state_free(pmcs_softc_state, ddi_get_instance(pwp->dip)); 1582 return (0); 1583 } 1584 1585 /* 1586 * quiesce (9E) entry point 1587 * 1588 * This function is called when the system is single-threaded at high PIL 1589 * with preemption disabled. Therefore, the function must not block/wait/sleep. 1590 * 1591 * Returns DDI_SUCCESS or DDI_FAILURE. 1592 * 1593 */ 1594 static int 1595 pmcs_quiesce(dev_info_t *dip) 1596 { 1597 pmcs_hw_t *pwp; 1598 scsi_hba_tran_t *tran; 1599 1600 if ((tran = ddi_get_driver_private(dip)) == NULL) 1601 return (DDI_SUCCESS); 1602 1603 /* No quiesce necessary on a per-iport basis */ 1604 if (scsi_hba_iport_unit_address(dip) != NULL) { 1605 return (DDI_SUCCESS); 1606 } 1607 1608 if ((pwp = TRAN2PMC(tran)) == NULL) 1609 return (DDI_SUCCESS); 1610 1611 /* Stop MPI & Reset chip (no need to re-initialize) */ 1612 (void) pmcs_stop_mpi(pwp); 1613 (void) pmcs_soft_reset(pwp, B_TRUE); 1614 1615 return (DDI_SUCCESS); 1616 } 1617 1618 /* 1619 * Called with xp->statlock and PHY lock and scratch acquired. 1620 */ 1621 static int 1622 pmcs_add_sata_device(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 1623 { 1624 ata_identify_t *ati; 1625 int result, i; 1626 pmcs_phy_t *pptr; 1627 uint16_t *a; 1628 union { 1629 uint8_t nsa[8]; 1630 uint16_t nsb[4]; 1631 } u; 1632 1633 /* 1634 * Safe defaults - use only if this target is brand new (i.e. doesn't 1635 * already have these settings configured) 1636 */ 1637 if (xp->capacity == 0) { 1638 xp->capacity = (uint64_t)-1; 1639 xp->ca = 1; 1640 xp->qdepth = 1; 1641 xp->pio = 1; 1642 } 1643 1644 pptr = xp->phy; 1645 1646 /* 1647 * We only try and issue an IDENTIFY for first level 1648 * (direct attached) devices. We don't try and 1649 * set other quirks here (this will happen later, 1650 * if the device is fully configured) 1651 */ 1652 if (pptr->level) { 1653 return (0); 1654 } 1655 1656 mutex_exit(&xp->statlock); 1657 result = pmcs_sata_identify(pwp, pptr); 1658 mutex_enter(&xp->statlock); 1659 1660 if (result) { 1661 return (result); 1662 } 1663 ati = pwp->scratch; 1664 a = &ati->word108; 1665 for (i = 0; i < 4; i++) { 1666 u.nsb[i] = ddi_swap16(*a++); 1667 } 1668 1669 /* 1670 * Check the returned data for being a valid (NAA=5) WWN. 1671 * If so, use that and override the SAS address we were 1672 * given at Link Up time. 1673 */ 1674 if ((u.nsa[0] >> 4) == 5) { 1675 (void) memcpy(pptr->sas_address, u.nsa, 8); 1676 } 1677 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 1678 "%s: %s has SAS ADDRESS " SAS_ADDR_FMT, 1679 __func__, pptr->path, SAS_ADDR_PRT(pptr->sas_address)); 1680 return (0); 1681 } 1682 1683 /* 1684 * Called with PHY lock and target statlock held and scratch acquired 1685 */ 1686 static boolean_t 1687 pmcs_add_new_device(pmcs_hw_t *pwp, pmcs_xscsi_t *target) 1688 { 1689 ASSERT(target != NULL); 1690 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, target, "%s: target = 0x%p", 1691 __func__, (void *) target); 1692 1693 switch (target->phy->dtype) { 1694 case SATA: 1695 if (pmcs_add_sata_device(pwp, target) != 0) { 1696 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, target->phy, 1697 target, "%s: add_sata_device failed for tgt 0x%p", 1698 __func__, (void *) target); 1699 return (B_FALSE); 1700 } 1701 break; 1702 case SAS: 1703 target->qdepth = maxqdepth; 1704 break; 1705 case EXPANDER: 1706 target->qdepth = 1; 1707 break; 1708 } 1709 1710 target->new = 0; 1711 target->assigned = 1; 1712 target->dev_state = PMCS_DEVICE_STATE_OPERATIONAL; 1713 target->dtype = target->phy->dtype; 1714 1715 /* 1716 * Set the PHY's config stop time to 0. This is one of the final 1717 * stops along the config path, so we're indicating that we 1718 * successfully configured the PHY. 1719 */ 1720 target->phy->config_stop = 0; 1721 1722 return (B_TRUE); 1723 } 1724 1725 void 1726 pmcs_worker(void *arg) 1727 { 1728 pmcs_hw_t *pwp = arg; 1729 ulong_t work_flags; 1730 1731 DTRACE_PROBE2(pmcs__worker, ulong_t, pwp->work_flags, boolean_t, 1732 pwp->config_changed); 1733 1734 if (pwp->state != STATE_RUNNING) { 1735 return; 1736 } 1737 1738 work_flags = atomic_swap_ulong(&pwp->work_flags, 0); 1739 1740 if (work_flags & PMCS_WORK_FLAG_SAS_HW_ACK) { 1741 pmcs_ack_events(pwp); 1742 } 1743 1744 if (work_flags & PMCS_WORK_FLAG_SPINUP_RELEASE) { 1745 mutex_enter(&pwp->lock); 1746 pmcs_spinup_release(pwp, NULL); 1747 mutex_exit(&pwp->lock); 1748 } 1749 1750 if (work_flags & PMCS_WORK_FLAG_SSP_EVT_RECOVERY) { 1751 pmcs_ssp_event_recovery(pwp); 1752 } 1753 1754 if (work_flags & PMCS_WORK_FLAG_DS_ERR_RECOVERY) { 1755 pmcs_dev_state_recovery(pwp, NULL); 1756 } 1757 1758 if (work_flags & PMCS_WORK_FLAG_DEREGISTER_DEV) { 1759 pmcs_deregister_device_work(pwp, NULL); 1760 } 1761 1762 if (work_flags & PMCS_WORK_FLAG_DISCOVER) { 1763 pmcs_discover(pwp); 1764 } 1765 1766 if (work_flags & PMCS_WORK_FLAG_ABORT_HANDLE) { 1767 if (pmcs_abort_handler(pwp)) { 1768 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 1769 } 1770 } 1771 1772 if (work_flags & PMCS_WORK_FLAG_SATA_RUN) { 1773 pmcs_sata_work(pwp); 1774 } 1775 1776 if (work_flags & PMCS_WORK_FLAG_RUN_QUEUES) { 1777 pmcs_scsa_wq_run(pwp); 1778 mutex_enter(&pwp->lock); 1779 PMCS_CQ_RUN(pwp); 1780 mutex_exit(&pwp->lock); 1781 } 1782 1783 if (work_flags & PMCS_WORK_FLAG_ADD_DMA_CHUNKS) { 1784 if (pmcs_add_more_chunks(pwp, 1785 ptob(1) * PMCS_ADDTL_CHUNK_PAGES)) { 1786 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 1787 } else { 1788 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1789 } 1790 } 1791 } 1792 1793 static int 1794 pmcs_add_more_chunks(pmcs_hw_t *pwp, unsigned long nsize) 1795 { 1796 pmcs_dmachunk_t *dc; 1797 unsigned long dl; 1798 pmcs_chunk_t *pchunk = NULL; 1799 1800 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t); 1801 1802 pchunk = kmem_zalloc(sizeof (pmcs_chunk_t), KM_SLEEP); 1803 if (pchunk == NULL) { 1804 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1805 "Not enough memory for DMA chunks"); 1806 return (-1); 1807 } 1808 1809 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pchunk->acc_handle, 1810 &pchunk->dma_handle, nsize, (caddr_t *)&pchunk->addrp, 1811 &pchunk->dma_addr) == B_FALSE) { 1812 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1813 "Failed to setup DMA for chunks"); 1814 kmem_free(pchunk, sizeof (pmcs_chunk_t)); 1815 return (-1); 1816 } 1817 1818 if ((pmcs_check_acc_handle(pchunk->acc_handle) != DDI_SUCCESS) || 1819 (pmcs_check_dma_handle(pchunk->dma_handle) != DDI_SUCCESS)) { 1820 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED); 1821 return (-1); 1822 } 1823 1824 bzero(pchunk->addrp, nsize); 1825 dc = NULL; 1826 for (dl = 0; dl < (nsize / PMCS_SGL_CHUNKSZ); dl++) { 1827 pmcs_dmachunk_t *tmp; 1828 tmp = kmem_alloc(sizeof (pmcs_dmachunk_t), KM_SLEEP); 1829 tmp->nxt = dc; 1830 dc = tmp; 1831 } 1832 mutex_enter(&pwp->dma_lock); 1833 pmcs_idma_chunks(pwp, dc, pchunk, nsize); 1834 pwp->nchunks++; 1835 mutex_exit(&pwp->dma_lock); 1836 return (0); 1837 } 1838 1839 1840 static void 1841 pmcs_check_commands(pmcs_hw_t *pwp) 1842 { 1843 pmcs_cmd_t *sp; 1844 size_t amt; 1845 char path[32]; 1846 pmcwork_t *pwrk; 1847 pmcs_xscsi_t *target; 1848 pmcs_phy_t *phyp; 1849 int rval; 1850 1851 for (pwrk = pwp->work; pwrk < &pwp->work[pwp->max_cmd]; pwrk++) { 1852 mutex_enter(&pwrk->lock); 1853 1854 /* 1855 * If the command isn't active, we can't be timing it still. 1856 * Active means the tag is not free and the state is "on chip". 1857 */ 1858 if (!PMCS_COMMAND_ACTIVE(pwrk)) { 1859 mutex_exit(&pwrk->lock); 1860 continue; 1861 } 1862 1863 /* 1864 * No timer active for this command. 1865 */ 1866 if (pwrk->timer == 0) { 1867 mutex_exit(&pwrk->lock); 1868 continue; 1869 } 1870 1871 /* 1872 * Knock off bits for the time interval. 1873 */ 1874 if (pwrk->timer >= US2WT(PMCS_WATCH_INTERVAL)) { 1875 pwrk->timer -= US2WT(PMCS_WATCH_INTERVAL); 1876 } else { 1877 pwrk->timer = 0; 1878 } 1879 if (pwrk->timer > 0) { 1880 mutex_exit(&pwrk->lock); 1881 continue; 1882 } 1883 1884 /* 1885 * The command has now officially timed out. 1886 * Get the path for it. If it doesn't have 1887 * a phy pointer any more, it's really dead 1888 * and can just be put back on the free list. 1889 * There should *not* be any commands associated 1890 * with it any more. 1891 */ 1892 if (pwrk->phy == NULL) { 1893 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1894 "dead command with gone phy being recycled"); 1895 ASSERT(pwrk->xp == NULL); 1896 pmcs_pwork(pwp, pwrk); 1897 continue; 1898 } 1899 amt = sizeof (path); 1900 amt = min(sizeof (pwrk->phy->path), amt); 1901 (void) memcpy(path, pwrk->phy->path, amt); 1902 1903 /* 1904 * If this is a non-SCSA command, stop here. Eventually 1905 * we might do something with non-SCSA commands here- 1906 * but so far their timeout mechanisms are handled in 1907 * the WAIT_FOR macro. 1908 */ 1909 if (pwrk->xp == NULL) { 1910 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1911 "%s: non-SCSA cmd tag 0x%x timed out", 1912 path, pwrk->htag); 1913 mutex_exit(&pwrk->lock); 1914 continue; 1915 } 1916 1917 sp = pwrk->arg; 1918 ASSERT(sp != NULL); 1919 1920 /* 1921 * Mark it as timed out. 1922 */ 1923 CMD2PKT(sp)->pkt_reason = CMD_TIMEOUT; 1924 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 1925 #ifdef DEBUG 1926 pmcs_prt(pwp, PMCS_PRT_DEBUG, pwrk->phy, pwrk->xp, 1927 "%s: SCSA cmd tag 0x%x timed out (state %x) onwire=%d", 1928 path, pwrk->htag, pwrk->state, pwrk->onwire); 1929 #else 1930 pmcs_prt(pwp, PMCS_PRT_DEBUG, pwrk->phy, pwrk->xp, 1931 "%s: SCSA cmd tag 0x%x timed out (state %x)", 1932 path, pwrk->htag, pwrk->state); 1933 #endif 1934 /* 1935 * Mark the work structure as timed out. 1936 */ 1937 pwrk->state = PMCS_WORK_STATE_TIMED_OUT; 1938 phyp = pwrk->phy; 1939 target = pwrk->xp; 1940 mutex_exit(&pwrk->lock); 1941 1942 pmcs_lock_phy(phyp); 1943 mutex_enter(&target->statlock); 1944 1945 /* 1946 * No point attempting recovery if the device is gone 1947 */ 1948 if (target->dev_gone) { 1949 mutex_exit(&target->statlock); 1950 pmcs_unlock_phy(phyp); 1951 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 1952 "%s: tgt(0x%p) is gone. Returning CMD_DEV_GONE " 1953 "for htag 0x%08x", __func__, 1954 (void *)target, pwrk->htag); 1955 mutex_enter(&pwrk->lock); 1956 if (!PMCS_COMMAND_DONE(pwrk)) { 1957 /* Complete this command here */ 1958 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 1959 "%s: Completing cmd (htag 0x%08x) " 1960 "anyway", __func__, pwrk->htag); 1961 pwrk->dead = 1; 1962 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 1963 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 1964 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 1965 } else { 1966 mutex_exit(&pwrk->lock); 1967 } 1968 continue; 1969 } 1970 1971 mutex_exit(&target->statlock); 1972 rval = pmcs_abort(pwp, phyp, pwrk->htag, 0, 1); 1973 if (rval) { 1974 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 1975 "%s: Bad status (%d) on abort of HTAG 0x%08x", 1976 __func__, rval, pwrk->htag); 1977 pmcs_unlock_phy(phyp); 1978 mutex_enter(&pwrk->lock); 1979 if (!PMCS_COMMAND_DONE(pwrk)) { 1980 /* Complete this command here */ 1981 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 1982 "%s: Completing cmd (htag 0x%08x) " 1983 "anyway", __func__, pwrk->htag); 1984 if (target->dev_gone) { 1985 pwrk->dead = 1; 1986 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 1987 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 1988 } 1989 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 1990 } else { 1991 mutex_exit(&pwrk->lock); 1992 } 1993 pmcs_lock_phy(phyp); 1994 /* 1995 * No need to reschedule ABORT if we get any other 1996 * status 1997 */ 1998 if (rval == ENOMEM) { 1999 phyp->abort_sent = 0; 2000 phyp->abort_pending = 1; 2001 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 2002 } 2003 } 2004 pmcs_unlock_phy(phyp); 2005 } 2006 /* 2007 * Run any completions that may have been queued up. 2008 */ 2009 PMCS_CQ_RUN(pwp); 2010 } 2011 2012 static void 2013 pmcs_watchdog(void *arg) 2014 { 2015 pmcs_hw_t *pwp = arg; 2016 2017 DTRACE_PROBE2(pmcs__watchdog, ulong_t, pwp->work_flags, boolean_t, 2018 pwp->config_changed); 2019 2020 mutex_enter(&pwp->lock); 2021 2022 if (pwp->state != STATE_RUNNING) { 2023 mutex_exit(&pwp->lock); 2024 return; 2025 } 2026 2027 if (atomic_cas_ulong(&pwp->work_flags, 0, 0) != 0) { 2028 if (ddi_taskq_dispatch(pwp->tq, pmcs_worker, pwp, 2029 DDI_NOSLEEP) != DDI_SUCCESS) { 2030 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2031 "Could not dispatch to worker thread"); 2032 } 2033 } 2034 pwp->wdhandle = timeout(pmcs_watchdog, pwp, 2035 drv_usectohz(PMCS_WATCH_INTERVAL)); 2036 mutex_exit(&pwp->lock); 2037 pmcs_check_commands(pwp); 2038 pmcs_handle_dead_phys(pwp); 2039 } 2040 2041 static int 2042 pmcs_remove_ihandlers(pmcs_hw_t *pwp, int icnt) 2043 { 2044 int i, r, rslt = 0; 2045 for (i = 0; i < icnt; i++) { 2046 r = ddi_intr_remove_handler(pwp->ih_table[i]); 2047 if (r == DDI_SUCCESS) { 2048 continue; 2049 } 2050 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2051 "%s: unable to remove interrupt handler %d", __func__, i); 2052 rslt = -1; 2053 break; 2054 } 2055 return (rslt); 2056 } 2057 2058 static int 2059 pmcs_disable_intrs(pmcs_hw_t *pwp, int icnt) 2060 { 2061 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) { 2062 int r = ddi_intr_block_disable(&pwp->ih_table[0], 2063 pwp->intr_cnt); 2064 if (r != DDI_SUCCESS) { 2065 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2066 "unable to disable interrupt block"); 2067 return (-1); 2068 } 2069 } else { 2070 int i; 2071 for (i = 0; i < icnt; i++) { 2072 if (ddi_intr_disable(pwp->ih_table[i]) == DDI_SUCCESS) { 2073 continue; 2074 } 2075 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2076 "unable to disable interrupt %d", i); 2077 return (-1); 2078 } 2079 } 2080 return (0); 2081 } 2082 2083 static int 2084 pmcs_free_intrs(pmcs_hw_t *pwp, int icnt) 2085 { 2086 int i; 2087 for (i = 0; i < icnt; i++) { 2088 if (ddi_intr_free(pwp->ih_table[i]) == DDI_SUCCESS) { 2089 continue; 2090 } 2091 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2092 "unable to free interrupt %d", i); 2093 return (-1); 2094 } 2095 kmem_free(pwp->ih_table, pwp->ih_table_size); 2096 pwp->ih_table_size = 0; 2097 return (0); 2098 } 2099 2100 /* 2101 * Try to set up interrupts of type "type" with a minimum number of interrupts 2102 * of "min". 2103 */ 2104 static void 2105 pmcs_setup_intr_impl(pmcs_hw_t *pwp, int type, int min) 2106 { 2107 int rval, avail, count, actual, max; 2108 2109 rval = ddi_intr_get_nintrs(pwp->dip, type, &count); 2110 if ((rval != DDI_SUCCESS) || (count < min)) { 2111 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2112 "%s: get_nintrs failed; type: %d rc: %d count: %d min: %d", 2113 __func__, type, rval, count, min); 2114 return; 2115 } 2116 2117 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2118 "%s: nintrs = %d for type: %d", __func__, count, type); 2119 2120 rval = ddi_intr_get_navail(pwp->dip, type, &avail); 2121 if ((rval != DDI_SUCCESS) || (avail < min)) { 2122 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2123 "%s: get_navail failed; type: %d rc: %d avail: %d min: %d", 2124 __func__, type, rval, avail, min); 2125 return; 2126 } 2127 2128 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2129 "%s: navail = %d for type: %d", __func__, avail, type); 2130 2131 pwp->ih_table_size = avail * sizeof (ddi_intr_handle_t); 2132 pwp->ih_table = kmem_alloc(pwp->ih_table_size, KM_SLEEP); 2133 2134 switch (type) { 2135 case DDI_INTR_TYPE_MSIX: 2136 pwp->int_type = PMCS_INT_MSIX; 2137 max = PMCS_MAX_MSIX; 2138 break; 2139 case DDI_INTR_TYPE_MSI: 2140 pwp->int_type = PMCS_INT_MSI; 2141 max = PMCS_MAX_MSI; 2142 break; 2143 case DDI_INTR_TYPE_FIXED: 2144 default: 2145 pwp->int_type = PMCS_INT_FIXED; 2146 max = PMCS_MAX_FIXED; 2147 break; 2148 } 2149 2150 rval = ddi_intr_alloc(pwp->dip, pwp->ih_table, type, 0, max, &actual, 2151 DDI_INTR_ALLOC_NORMAL); 2152 if (rval != DDI_SUCCESS) { 2153 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2154 "%s: ddi_intr_alloc failed; type: %d rc: %d", 2155 __func__, type, rval); 2156 kmem_free(pwp->ih_table, pwp->ih_table_size); 2157 pwp->ih_table = NULL; 2158 pwp->ih_table_size = 0; 2159 pwp->intr_cnt = 0; 2160 pwp->int_type = PMCS_INT_NONE; 2161 return; 2162 } 2163 2164 pwp->intr_cnt = actual; 2165 } 2166 2167 /* 2168 * Set up interrupts. 2169 * We return one of three values: 2170 * 2171 * 0 - success 2172 * EAGAIN - failure to set up interrupts 2173 * EIO - "" + we're now stuck partly enabled 2174 * 2175 * If EIO is returned, we can't unload the driver. 2176 */ 2177 static int 2178 pmcs_setup_intr(pmcs_hw_t *pwp) 2179 { 2180 int i, r, itypes, oqv_count; 2181 ddi_intr_handler_t **iv_table; 2182 size_t iv_table_size; 2183 uint_t pri; 2184 2185 if (ddi_intr_get_supported_types(pwp->dip, &itypes) != DDI_SUCCESS) { 2186 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2187 "cannot get interrupt types"); 2188 return (EAGAIN); 2189 } 2190 2191 if (disable_msix) { 2192 itypes &= ~DDI_INTR_TYPE_MSIX; 2193 } 2194 if (disable_msi) { 2195 itypes &= ~DDI_INTR_TYPE_MSI; 2196 } 2197 2198 /* 2199 * We won't know what firmware we're running until we call pmcs_setup, 2200 * and we can't call pmcs_setup until we establish interrupts. 2201 */ 2202 2203 pwp->int_type = PMCS_INT_NONE; 2204 2205 /* 2206 * We want PMCS_MAX_MSIX vectors for MSI-X. Anything less would be 2207 * uncivilized. 2208 */ 2209 if (itypes & DDI_INTR_TYPE_MSIX) { 2210 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSIX, PMCS_MAX_MSIX); 2211 if (pwp->int_type == PMCS_INT_MSIX) { 2212 itypes = 0; 2213 } 2214 } 2215 2216 if (itypes & DDI_INTR_TYPE_MSI) { 2217 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSI, 1); 2218 if (pwp->int_type == PMCS_INT_MSI) { 2219 itypes = 0; 2220 } 2221 } 2222 2223 if (itypes & DDI_INTR_TYPE_FIXED) { 2224 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_FIXED, 1); 2225 if (pwp->int_type == PMCS_INT_FIXED) { 2226 itypes = 0; 2227 } 2228 } 2229 2230 if (pwp->intr_cnt == 0) { 2231 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 2232 "No interrupts available"); 2233 return (EAGAIN); 2234 } 2235 2236 iv_table_size = sizeof (ddi_intr_handler_t *) * pwp->intr_cnt; 2237 iv_table = kmem_alloc(iv_table_size, KM_SLEEP); 2238 2239 /* 2240 * Get iblock cookie and add handlers. 2241 */ 2242 switch (pwp->intr_cnt) { 2243 case 1: 2244 iv_table[0] = pmcs_all_intr; 2245 break; 2246 case 2: 2247 iv_table[0] = pmcs_iodone_ix; 2248 iv_table[1] = pmcs_nonio_ix; 2249 break; 2250 case 4: 2251 iv_table[PMCS_MSIX_GENERAL] = pmcs_general_ix; 2252 iv_table[PMCS_MSIX_IODONE] = pmcs_iodone_ix; 2253 iv_table[PMCS_MSIX_EVENTS] = pmcs_event_ix; 2254 iv_table[PMCS_MSIX_FATAL] = pmcs_fatal_ix; 2255 break; 2256 default: 2257 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2258 "%s: intr_cnt = %d - unexpected", __func__, pwp->intr_cnt); 2259 kmem_free(iv_table, iv_table_size); 2260 return (EAGAIN); 2261 } 2262 2263 for (i = 0; i < pwp->intr_cnt; i++) { 2264 r = ddi_intr_add_handler(pwp->ih_table[i], iv_table[i], 2265 (caddr_t)pwp, NULL); 2266 if (r != DDI_SUCCESS) { 2267 kmem_free(iv_table, iv_table_size); 2268 if (pmcs_remove_ihandlers(pwp, i)) { 2269 return (EIO); 2270 } 2271 if (pmcs_free_intrs(pwp, i)) { 2272 return (EIO); 2273 } 2274 pwp->intr_cnt = 0; 2275 return (EAGAIN); 2276 } 2277 } 2278 2279 kmem_free(iv_table, iv_table_size); 2280 2281 if (ddi_intr_get_cap(pwp->ih_table[0], &pwp->intr_cap) != DDI_SUCCESS) { 2282 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2283 "unable to get int capabilities"); 2284 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2285 return (EIO); 2286 } 2287 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2288 return (EIO); 2289 } 2290 pwp->intr_cnt = 0; 2291 return (EAGAIN); 2292 } 2293 2294 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) { 2295 r = ddi_intr_block_enable(&pwp->ih_table[0], pwp->intr_cnt); 2296 if (r != DDI_SUCCESS) { 2297 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2298 "intr blk enable failed"); 2299 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2300 return (EIO); 2301 } 2302 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2303 return (EIO); 2304 } 2305 pwp->intr_cnt = 0; 2306 return (EFAULT); 2307 } 2308 } else { 2309 for (i = 0; i < pwp->intr_cnt; i++) { 2310 r = ddi_intr_enable(pwp->ih_table[i]); 2311 if (r == DDI_SUCCESS) { 2312 continue; 2313 } 2314 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2315 "unable to enable interrupt %d", i); 2316 if (pmcs_disable_intrs(pwp, i)) { 2317 return (EIO); 2318 } 2319 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2320 return (EIO); 2321 } 2322 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2323 return (EIO); 2324 } 2325 pwp->intr_cnt = 0; 2326 return (EAGAIN); 2327 } 2328 } 2329 2330 /* 2331 * Set up locks. 2332 */ 2333 if (ddi_intr_get_pri(pwp->ih_table[0], &pri) != DDI_SUCCESS) { 2334 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2335 "unable to get interrupt priority"); 2336 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) { 2337 return (EIO); 2338 } 2339 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2340 return (EIO); 2341 } 2342 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2343 return (EIO); 2344 } 2345 pwp->intr_cnt = 0; 2346 return (EAGAIN); 2347 } 2348 2349 pwp->locks_initted = 1; 2350 pwp->intr_pri = pri; 2351 mutex_init(&pwp->lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2352 mutex_init(&pwp->dma_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2353 mutex_init(&pwp->axil_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2354 mutex_init(&pwp->cq_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2355 mutex_init(&pwp->ict_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2356 mutex_init(&pwp->config_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2357 mutex_init(&pwp->wfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2358 mutex_init(&pwp->pfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2359 mutex_init(&pwp->dead_phylist_lock, NULL, MUTEX_DRIVER, 2360 DDI_INTR_PRI(pri)); 2361 #ifdef DEBUG 2362 mutex_init(&pwp->dbglock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2363 #endif 2364 cv_init(&pwp->ict_cv, NULL, CV_DRIVER, NULL); 2365 cv_init(&pwp->drain_cv, NULL, CV_DRIVER, NULL); 2366 for (i = 0; i < PMCS_NIQ; i++) { 2367 mutex_init(&pwp->iqp_lock[i], NULL, 2368 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri)); 2369 } 2370 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 2371 mutex_init(&pwp->cq_info.cq_thr_info[i].cq_thr_lock, NULL, 2372 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri)); 2373 cv_init(&pwp->cq_info.cq_thr_info[i].cq_cv, NULL, 2374 CV_DRIVER, NULL); 2375 } 2376 2377 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "%d %s interrup%s configured", 2378 pwp->intr_cnt, (pwp->int_type == PMCS_INT_MSIX)? "MSI-X" : 2379 ((pwp->int_type == PMCS_INT_MSI)? "MSI" : "INT-X"), 2380 pwp->intr_cnt == 1? "t" : "ts"); 2381 2382 2383 /* 2384 * Enable Interrupts 2385 */ 2386 if (pwp->intr_cnt > PMCS_NOQ) { 2387 oqv_count = pwp->intr_cnt; 2388 } else { 2389 oqv_count = PMCS_NOQ; 2390 } 2391 for (pri = 0xffffffff, i = 0; i < oqv_count; i++) { 2392 pri ^= (1 << i); 2393 } 2394 2395 mutex_enter(&pwp->lock); 2396 pwp->intr_mask = pri; 2397 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask); 2398 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 2399 mutex_exit(&pwp->lock); 2400 2401 return (0); 2402 } 2403 2404 static int 2405 pmcs_teardown_intr(pmcs_hw_t *pwp) 2406 { 2407 if (pwp->intr_cnt) { 2408 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) { 2409 return (EIO); 2410 } 2411 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2412 return (EIO); 2413 } 2414 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2415 return (EIO); 2416 } 2417 pwp->intr_cnt = 0; 2418 } 2419 return (0); 2420 } 2421 2422 static uint_t 2423 pmcs_general_ix(caddr_t arg1, caddr_t arg2) 2424 { 2425 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2426 _NOTE(ARGUNUSED(arg2)); 2427 pmcs_general_intr(pwp); 2428 return (DDI_INTR_CLAIMED); 2429 } 2430 2431 static uint_t 2432 pmcs_event_ix(caddr_t arg1, caddr_t arg2) 2433 { 2434 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2435 _NOTE(ARGUNUSED(arg2)); 2436 pmcs_event_intr(pwp); 2437 return (DDI_INTR_CLAIMED); 2438 } 2439 2440 static uint_t 2441 pmcs_iodone_ix(caddr_t arg1, caddr_t arg2) 2442 { 2443 _NOTE(ARGUNUSED(arg2)); 2444 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2445 2446 /* 2447 * It's possible that if we just turned interrupt coalescing off 2448 * (and thus, re-enabled auto clear for interrupts on the I/O outbound 2449 * queue) that there was an interrupt already pending. We use 2450 * io_intr_coal.int_cleared to ensure that we still drop in here and 2451 * clear the appropriate interrupt bit one last time. 2452 */ 2453 mutex_enter(&pwp->ict_lock); 2454 if (pwp->io_intr_coal.timer_on || 2455 (pwp->io_intr_coal.int_cleared == B_FALSE)) { 2456 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2457 (1 << PMCS_OQ_IODONE)); 2458 pwp->io_intr_coal.int_cleared = B_TRUE; 2459 } 2460 mutex_exit(&pwp->ict_lock); 2461 2462 pmcs_iodone_intr(pwp); 2463 2464 return (DDI_INTR_CLAIMED); 2465 } 2466 2467 static uint_t 2468 pmcs_fatal_ix(caddr_t arg1, caddr_t arg2) 2469 { 2470 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2471 _NOTE(ARGUNUSED(arg2)); 2472 pmcs_fatal_handler(pwp); 2473 return (DDI_INTR_CLAIMED); 2474 } 2475 2476 static uint_t 2477 pmcs_nonio_ix(caddr_t arg1, caddr_t arg2) 2478 { 2479 _NOTE(ARGUNUSED(arg2)); 2480 pmcs_hw_t *pwp = (void *)arg1; 2481 uint32_t obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB); 2482 2483 /* 2484 * Check for Fatal Interrupts 2485 */ 2486 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) { 2487 pmcs_fatal_handler(pwp); 2488 return (DDI_INTR_CLAIMED); 2489 } 2490 2491 if (obdb & (1 << PMCS_OQ_GENERAL)) { 2492 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2493 (1 << PMCS_OQ_GENERAL)); 2494 pmcs_general_intr(pwp); 2495 pmcs_event_intr(pwp); 2496 } 2497 2498 return (DDI_INTR_CLAIMED); 2499 } 2500 2501 static uint_t 2502 pmcs_all_intr(caddr_t arg1, caddr_t arg2) 2503 { 2504 _NOTE(ARGUNUSED(arg2)); 2505 pmcs_hw_t *pwp = (void *) arg1; 2506 uint32_t obdb; 2507 int handled = 0; 2508 2509 obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB); 2510 2511 /* 2512 * Check for Fatal Interrupts 2513 */ 2514 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) { 2515 pmcs_fatal_handler(pwp); 2516 return (DDI_INTR_CLAIMED); 2517 } 2518 2519 /* 2520 * Check for Outbound Queue service needed 2521 */ 2522 if (obdb & (1 << PMCS_OQ_IODONE)) { 2523 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2524 (1 << PMCS_OQ_IODONE)); 2525 obdb ^= (1 << PMCS_OQ_IODONE); 2526 handled++; 2527 pmcs_iodone_intr(pwp); 2528 } 2529 if (obdb & (1 << PMCS_OQ_GENERAL)) { 2530 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2531 (1 << PMCS_OQ_GENERAL)); 2532 obdb ^= (1 << PMCS_OQ_GENERAL); 2533 handled++; 2534 pmcs_general_intr(pwp); 2535 } 2536 if (obdb & (1 << PMCS_OQ_EVENTS)) { 2537 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2538 (1 << PMCS_OQ_EVENTS)); 2539 obdb ^= (1 << PMCS_OQ_EVENTS); 2540 handled++; 2541 pmcs_event_intr(pwp); 2542 } 2543 if (obdb) { 2544 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2545 "interrupt bits not handled (0x%x)", obdb); 2546 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, obdb); 2547 handled++; 2548 } 2549 if (pwp->int_type == PMCS_INT_MSI) { 2550 handled++; 2551 } 2552 return (handled? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 2553 } 2554 2555 void 2556 pmcs_fatal_handler(pmcs_hw_t *pwp) 2557 { 2558 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, "Fatal Interrupt caught"); 2559 mutex_enter(&pwp->lock); 2560 pwp->state = STATE_DEAD; 2561 pmcs_register_dump_int(pwp); 2562 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 2563 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 2564 mutex_exit(&pwp->lock); 2565 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_NO_RESPONSE); 2566 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 2567 2568 #ifdef DEBUG 2569 cmn_err(CE_PANIC, "PMCS Fatal Firmware Error"); 2570 #endif 2571 } 2572 2573 /* 2574 * Called with PHY lock and target statlock held and scratch acquired. 2575 */ 2576 boolean_t 2577 pmcs_assign_device(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt) 2578 { 2579 pmcs_phy_t *pptr = tgt->phy; 2580 2581 switch (pptr->dtype) { 2582 case SAS: 2583 case EXPANDER: 2584 break; 2585 case SATA: 2586 tgt->ca = 1; 2587 break; 2588 default: 2589 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2590 "%s: Target %p has PHY %p with invalid dtype", 2591 __func__, (void *)tgt, (void *)pptr); 2592 return (B_FALSE); 2593 } 2594 2595 tgt->new = 1; 2596 tgt->dev_gone = 0; 2597 tgt->recover_wait = 0; 2598 2599 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2600 "%s: config %s vtgt %u for " SAS_ADDR_FMT, __func__, 2601 pptr->path, tgt->target_num, SAS_ADDR_PRT(pptr->sas_address)); 2602 2603 if (pmcs_add_new_device(pwp, tgt) != B_TRUE) { 2604 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2605 "%s: Failed for vtgt %u / WWN " SAS_ADDR_FMT, __func__, 2606 tgt->target_num, SAS_ADDR_PRT(pptr->sas_address)); 2607 mutex_destroy(&tgt->statlock); 2608 mutex_destroy(&tgt->wqlock); 2609 mutex_destroy(&tgt->aqlock); 2610 return (B_FALSE); 2611 } 2612 2613 return (B_TRUE); 2614 } 2615 2616 /* 2617 * Called with softstate lock held 2618 */ 2619 void 2620 pmcs_remove_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2621 { 2622 pmcs_xscsi_t *xp; 2623 unsigned int vtgt; 2624 2625 ASSERT(mutex_owned(&pwp->lock)); 2626 2627 for (vtgt = 0; vtgt < pwp->max_dev; vtgt++) { 2628 xp = pwp->targets[vtgt]; 2629 if (xp == NULL) { 2630 continue; 2631 } 2632 2633 mutex_enter(&xp->statlock); 2634 if (xp->phy == pptr) { 2635 if (xp->new) { 2636 xp->new = 0; 2637 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, xp, 2638 "cancel config of vtgt %u", vtgt); 2639 } else { 2640 pmcs_clear_xp(pwp, xp); 2641 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, xp, 2642 "Removed tgt 0x%p vtgt %u", 2643 (void *)xp, vtgt); 2644 } 2645 mutex_exit(&xp->statlock); 2646 break; 2647 } 2648 mutex_exit(&xp->statlock); 2649 } 2650 } 2651 2652 void 2653 pmcs_prt_impl(pmcs_hw_t *pwp, pmcs_prt_level_t level, 2654 pmcs_phy_t *phyp, pmcs_xscsi_t *target, const char *fmt, ...) 2655 { 2656 va_list ap; 2657 int written = 0; 2658 char *ptr; 2659 uint32_t elem_size = PMCS_TBUF_ELEM_SIZE - 1; 2660 boolean_t system_log; 2661 int system_log_level; 2662 2663 switch (level) { 2664 case PMCS_PRT_DEBUG_DEVEL: 2665 case PMCS_PRT_DEBUG_DEV_STATE: 2666 case PMCS_PRT_DEBUG_PHY_LOCKING: 2667 case PMCS_PRT_DEBUG_SCSI_STATUS: 2668 case PMCS_PRT_DEBUG_UNDERFLOW: 2669 case PMCS_PRT_DEBUG_CONFIG: 2670 case PMCS_PRT_DEBUG_IPORT: 2671 case PMCS_PRT_DEBUG_MAP: 2672 case PMCS_PRT_DEBUG3: 2673 case PMCS_PRT_DEBUG2: 2674 case PMCS_PRT_DEBUG1: 2675 case PMCS_PRT_DEBUG: 2676 system_log = B_FALSE; 2677 break; 2678 case PMCS_PRT_INFO: 2679 system_log = B_TRUE; 2680 system_log_level = CE_CONT; 2681 break; 2682 case PMCS_PRT_WARN: 2683 system_log = B_TRUE; 2684 system_log_level = CE_NOTE; 2685 break; 2686 case PMCS_PRT_ERR: 2687 system_log = B_TRUE; 2688 system_log_level = CE_WARN; 2689 break; 2690 default: 2691 return; 2692 } 2693 2694 mutex_enter(&pmcs_trace_lock); 2695 gethrestime(&pmcs_tbuf_ptr->timestamp); 2696 ptr = pmcs_tbuf_ptr->buf; 2697 2698 /* 2699 * Store the pertinent PHY and target information if there is any 2700 */ 2701 if (target == NULL) { 2702 pmcs_tbuf_ptr->target_num = PMCS_INVALID_TARGET_NUM; 2703 pmcs_tbuf_ptr->target_ua[0] = '\0'; 2704 } else { 2705 pmcs_tbuf_ptr->target_num = target->target_num; 2706 (void) strncpy(pmcs_tbuf_ptr->target_ua, target->ua, 2707 PMCS_TBUF_UA_MAX_SIZE); 2708 } 2709 2710 if (phyp == NULL) { 2711 (void) memset(pmcs_tbuf_ptr->phy_sas_address, 0, 8); 2712 pmcs_tbuf_ptr->phy_path[0] = '\0'; 2713 pmcs_tbuf_ptr->phy_dtype = NOTHING; 2714 } else { 2715 (void) memcpy(pmcs_tbuf_ptr->phy_sas_address, 2716 phyp->sas_address, 8); 2717 (void) strncpy(pmcs_tbuf_ptr->phy_path, phyp->path, 32); 2718 pmcs_tbuf_ptr->phy_dtype = phyp->dtype; 2719 } 2720 2721 written += snprintf(ptr, elem_size, "pmcs%d:%d: ", 2722 ddi_get_instance(pwp->dip), level); 2723 ptr += strlen(ptr); 2724 va_start(ap, fmt); 2725 written += vsnprintf(ptr, elem_size - written, fmt, ap); 2726 va_end(ap); 2727 if (written > elem_size - 1) { 2728 /* Indicate truncation */ 2729 pmcs_tbuf_ptr->buf[elem_size - 1] = '+'; 2730 } 2731 if (++pmcs_tbuf_idx == pmcs_tbuf_num_elems) { 2732 pmcs_tbuf_ptr = pmcs_tbuf; 2733 pmcs_tbuf_wrap = B_TRUE; 2734 pmcs_tbuf_idx = 0; 2735 } else { 2736 ++pmcs_tbuf_ptr; 2737 } 2738 mutex_exit(&pmcs_trace_lock); 2739 2740 /* 2741 * When pmcs_force_syslog in non-zero, everything goes also 2742 * to syslog, at CE_CONT level. 2743 */ 2744 if (pmcs_force_syslog) { 2745 system_log = B_TRUE; 2746 system_log_level = CE_CONT; 2747 } 2748 2749 /* 2750 * Anything that comes in with PMCS_PRT_INFO, WARN, or ERR also 2751 * goes to syslog. 2752 */ 2753 if (system_log) { 2754 char local[196]; 2755 2756 switch (system_log_level) { 2757 case CE_CONT: 2758 (void) snprintf(local, sizeof (local), "%sINFO: ", 2759 pmcs_console ? "" : "?"); 2760 break; 2761 case CE_NOTE: 2762 case CE_WARN: 2763 local[0] = 0; 2764 break; 2765 default: 2766 return; 2767 } 2768 2769 ptr = local; 2770 ptr += strlen(local); 2771 (void) snprintf(ptr, (sizeof (local)) - 2772 ((size_t)ptr - (size_t)local), "pmcs%d: ", 2773 ddi_get_instance(pwp->dip)); 2774 ptr += strlen(ptr); 2775 va_start(ap, fmt); 2776 (void) vsnprintf(ptr, 2777 (sizeof (local)) - ((size_t)ptr - (size_t)local), fmt, ap); 2778 va_end(ap); 2779 if (level == CE_CONT) { 2780 (void) strlcat(local, "\n", sizeof (local)); 2781 } 2782 cmn_err(system_log_level, local); 2783 } 2784 2785 } 2786 2787 /* 2788 * pmcs_acquire_scratch 2789 * 2790 * If "wait" is true, the caller will wait until it can acquire the scratch. 2791 * This implies the caller needs to be in a context where spinning for an 2792 * indeterminate amount of time is acceptable. 2793 */ 2794 int 2795 pmcs_acquire_scratch(pmcs_hw_t *pwp, boolean_t wait) 2796 { 2797 int rval; 2798 2799 if (!wait) { 2800 return (atomic_swap_8(&pwp->scratch_locked, 1)); 2801 } 2802 2803 /* 2804 * Caller will wait for scratch. 2805 */ 2806 while ((rval = atomic_swap_8(&pwp->scratch_locked, 1)) != 0) { 2807 drv_usecwait(100); 2808 } 2809 2810 return (rval); 2811 } 2812 2813 void 2814 pmcs_release_scratch(pmcs_hw_t *pwp) 2815 { 2816 pwp->scratch_locked = 0; 2817 } 2818 2819 static void 2820 pmcs_create_phy_stats(pmcs_iport_t *iport) 2821 { 2822 sas_phy_stats_t *ps; 2823 pmcs_hw_t *pwp; 2824 pmcs_phy_t *phyp; 2825 int ndata; 2826 char ks_name[KSTAT_STRLEN]; 2827 2828 ASSERT(iport != NULL); 2829 pwp = iport->pwp; 2830 ASSERT(pwp != NULL); 2831 2832 mutex_enter(&iport->lock); 2833 2834 for (phyp = list_head(&iport->phys); 2835 phyp != NULL; 2836 phyp = list_next(&iport->phys, phyp)) { 2837 2838 pmcs_lock_phy(phyp); 2839 2840 if (phyp->phy_stats != NULL) { 2841 pmcs_unlock_phy(phyp); 2842 /* We've already created this kstat instance */ 2843 continue; 2844 } 2845 2846 ndata = (sizeof (sas_phy_stats_t)/sizeof (kstat_named_t)); 2847 2848 (void) snprintf(ks_name, sizeof (ks_name), 2849 "%s.%llx.%d.%d", ddi_driver_name(iport->dip), 2850 (longlong_t)pwp->sas_wwns[0], 2851 ddi_get_instance(iport->dip), phyp->phynum); 2852 2853 phyp->phy_stats = kstat_create("pmcs", 2854 ddi_get_instance(iport->dip), ks_name, KSTAT_SAS_PHY_CLASS, 2855 KSTAT_TYPE_NAMED, ndata, 0); 2856 2857 if (phyp->phy_stats == NULL) { 2858 pmcs_unlock_phy(phyp); 2859 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2860 "%s: Failed to create %s kstats", __func__, 2861 ks_name); 2862 continue; 2863 } 2864 2865 ps = (sas_phy_stats_t *)phyp->phy_stats->ks_data; 2866 2867 kstat_named_init(&ps->seconds_since_last_reset, 2868 "SecondsSinceLastReset", KSTAT_DATA_ULONGLONG); 2869 kstat_named_init(&ps->tx_frames, 2870 "TxFrames", KSTAT_DATA_ULONGLONG); 2871 kstat_named_init(&ps->rx_frames, 2872 "RxFrames", KSTAT_DATA_ULONGLONG); 2873 kstat_named_init(&ps->tx_words, 2874 "TxWords", KSTAT_DATA_ULONGLONG); 2875 kstat_named_init(&ps->rx_words, 2876 "RxWords", KSTAT_DATA_ULONGLONG); 2877 kstat_named_init(&ps->invalid_dword_count, 2878 "InvalidDwordCount", KSTAT_DATA_ULONGLONG); 2879 kstat_named_init(&ps->running_disparity_error_count, 2880 "RunningDisparityErrorCount", KSTAT_DATA_ULONGLONG); 2881 kstat_named_init(&ps->loss_of_dword_sync_count, 2882 "LossofDwordSyncCount", KSTAT_DATA_ULONGLONG); 2883 kstat_named_init(&ps->phy_reset_problem_count, 2884 "PhyResetProblemCount", KSTAT_DATA_ULONGLONG); 2885 2886 phyp->phy_stats->ks_private = phyp; 2887 phyp->phy_stats->ks_update = pmcs_update_phy_stats; 2888 kstat_install(phyp->phy_stats); 2889 pmcs_unlock_phy(phyp); 2890 } 2891 2892 mutex_exit(&iport->lock); 2893 } 2894 2895 int 2896 pmcs_update_phy_stats(kstat_t *ks, int rw) 2897 { 2898 int val, ret = DDI_FAILURE; 2899 pmcs_phy_t *pptr = (pmcs_phy_t *)ks->ks_private; 2900 pmcs_hw_t *pwp = pptr->pwp; 2901 sas_phy_stats_t *ps = ks->ks_data; 2902 2903 _NOTE(ARGUNUSED(rw)); 2904 ASSERT((pptr != NULL) && (pwp != NULL)); 2905 2906 /* 2907 * We just want to lock against other invocations of kstat; 2908 * we don't need to pmcs_lock_phy() for this. 2909 */ 2910 mutex_enter(&pptr->phy_lock); 2911 2912 /* Get Stats from Chip */ 2913 val = pmcs_get_diag_report(pwp, PMCS_INVALID_DWORD_CNT, pptr->phynum); 2914 if (val == DDI_FAILURE) 2915 goto fail; 2916 ps->invalid_dword_count.value.ull = (unsigned long long)val; 2917 2918 val = pmcs_get_diag_report(pwp, PMCS_DISPARITY_ERR_CNT, pptr->phynum); 2919 if (val == DDI_FAILURE) 2920 goto fail; 2921 ps->running_disparity_error_count.value.ull = (unsigned long long)val; 2922 2923 val = pmcs_get_diag_report(pwp, PMCS_LOST_DWORD_SYNC_CNT, pptr->phynum); 2924 if (val == DDI_FAILURE) 2925 goto fail; 2926 ps->loss_of_dword_sync_count.value.ull = (unsigned long long)val; 2927 2928 val = pmcs_get_diag_report(pwp, PMCS_RESET_FAILED_CNT, pptr->phynum); 2929 if (val == DDI_FAILURE) 2930 goto fail; 2931 ps->phy_reset_problem_count.value.ull = (unsigned long long)val; 2932 2933 ret = DDI_SUCCESS; 2934 fail: 2935 mutex_exit(&pptr->phy_lock); 2936 return (ret); 2937 } 2938 2939 static void 2940 pmcs_destroy_phy_stats(pmcs_iport_t *iport) 2941 { 2942 pmcs_phy_t *phyp; 2943 2944 ASSERT(iport != NULL); 2945 mutex_enter(&iport->lock); 2946 phyp = iport->pptr; 2947 if (phyp == NULL) { 2948 mutex_exit(&iport->lock); 2949 return; 2950 } 2951 2952 pmcs_lock_phy(phyp); 2953 if (phyp->phy_stats != NULL) { 2954 kstat_delete(phyp->phy_stats); 2955 phyp->phy_stats = NULL; 2956 } 2957 pmcs_unlock_phy(phyp); 2958 2959 mutex_exit(&iport->lock); 2960 } 2961 2962 /*ARGSUSED*/ 2963 static int 2964 pmcs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2965 { 2966 /* 2967 * as the driver can always deal with an error in any dma or 2968 * access handle, we can just return the fme_status value. 2969 */ 2970 pci_ereport_post(dip, err, NULL); 2971 return (err->fme_status); 2972 } 2973 2974 static void 2975 pmcs_fm_init(pmcs_hw_t *pwp) 2976 { 2977 ddi_iblock_cookie_t fm_ibc; 2978 2979 /* Only register with IO Fault Services if we have some capability */ 2980 if (pwp->fm_capabilities) { 2981 /* Adjust access and dma attributes for FMA */ 2982 pwp->reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 2983 pwp->iqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2984 pwp->oqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2985 pwp->cip_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2986 pwp->fwlog_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2987 2988 /* 2989 * Register capabilities with IO Fault Services. 2990 */ 2991 ddi_fm_init(pwp->dip, &pwp->fm_capabilities, &fm_ibc); 2992 2993 /* 2994 * Initialize pci ereport capabilities if ereport 2995 * capable (should always be.) 2996 */ 2997 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) || 2998 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 2999 pci_ereport_setup(pwp->dip); 3000 } 3001 3002 /* 3003 * Register error callback if error callback capable. 3004 */ 3005 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3006 ddi_fm_handler_register(pwp->dip, 3007 pmcs_fm_error_cb, (void *) pwp); 3008 } 3009 } 3010 } 3011 3012 static void 3013 pmcs_fm_fini(pmcs_hw_t *pwp) 3014 { 3015 /* Only unregister FMA capabilities if registered */ 3016 if (pwp->fm_capabilities) { 3017 /* 3018 * Un-register error callback if error callback capable. 3019 */ 3020 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3021 ddi_fm_handler_unregister(pwp->dip); 3022 } 3023 3024 /* 3025 * Release any resources allocated by pci_ereport_setup() 3026 */ 3027 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) || 3028 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3029 pci_ereport_teardown(pwp->dip); 3030 } 3031 3032 /* Unregister from IO Fault Services */ 3033 ddi_fm_fini(pwp->dip); 3034 3035 /* Adjust access and dma attributes for FMA */ 3036 pwp->reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 3037 pwp->iqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3038 pwp->oqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3039 pwp->cip_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3040 pwp->fwlog_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3041 } 3042 } 3043 3044 static boolean_t 3045 pmcs_fabricate_wwid(pmcs_hw_t *pwp) 3046 { 3047 char *cp, c; 3048 uint64_t adr; 3049 int i; 3050 3051 cp = &c; 3052 (void) ddi_strtoul(hw_serial, &cp, 10, (unsigned long *)&adr); 3053 3054 if (adr == 0) { 3055 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 3056 "%s: No serial number available to fabricate WWN", 3057 __func__); 3058 3059 adr = (uint64_t)gethrtime(); 3060 } 3061 3062 adr <<= 8; 3063 adr |= ((uint64_t)ddi_get_instance(pwp->dip) << 52); 3064 adr |= (5ULL << 60); 3065 3066 for (i = 0; i < PMCS_MAX_PORTS; i++) { 3067 pwp->sas_wwns[i] = adr + i; 3068 } 3069 3070 return (B_TRUE); 3071 } 3072