1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * SCSI (SCSA) midlayer interface for PMC drier. 26 */ 27 28 #include <sys/scsi/adapters/pmcs/pmcs.h> 29 30 extern scsi_lun_t scsi_lun64_to_lun(scsi_lun64_t lun64); 31 32 static int pmcs_scsa_tran_tgt_init(dev_info_t *, dev_info_t *, 33 scsi_hba_tran_t *, struct scsi_device *); 34 static void pmcs_scsa_tran_tgt_free(dev_info_t *, dev_info_t *, 35 scsi_hba_tran_t *, struct scsi_device *); 36 static int pmcs_scsa_start(struct scsi_address *, struct scsi_pkt *); 37 static int pmcs_scsa_abort(struct scsi_address *, struct scsi_pkt *); 38 static int pmcs_scsa_reset(struct scsi_address *, int); 39 static int pmcs_scsi_reset_notify(struct scsi_address *, int, 40 void (*)(caddr_t), caddr_t); 41 static int pmcs_scsa_getcap(struct scsi_address *, char *, int); 42 static int pmcs_scsa_setcap(struct scsi_address *, char *, int, int); 43 static int pmcs_scsa_setup_pkt(struct scsi_pkt *, int (*)(caddr_t), caddr_t); 44 static void pmcs_scsa_teardown_pkt(struct scsi_pkt *); 45 46 static int pmcs_smp_init(dev_info_t *, dev_info_t *, smp_hba_tran_t *, 47 smp_device_t *); 48 static void pmcs_smp_free(dev_info_t *, dev_info_t *, smp_hba_tran_t *, 49 smp_device_t *); 50 static int pmcs_smp_start(struct smp_pkt *); 51 52 static int pmcs_scsi_quiesce(dev_info_t *); 53 static int pmcs_scsi_unquiesce(dev_info_t *); 54 55 static int pmcs_cap(struct scsi_address *, char *, int, int, int); 56 static pmcs_xscsi_t * 57 pmcs_addr2xp(struct scsi_address *, uint64_t *, pmcs_cmd_t *); 58 static int pmcs_SAS_run(pmcs_cmd_t *, pmcwork_t *); 59 static void pmcs_SAS_done(pmcs_hw_t *, pmcwork_t *, uint32_t *); 60 61 static int pmcs_SATA_run(pmcs_cmd_t *, pmcwork_t *); 62 static void pmcs_SATA_done(pmcs_hw_t *, pmcwork_t *, uint32_t *); 63 static uint8_t pmcs_SATA_rwparm(uint8_t *, uint32_t *, uint64_t *, uint64_t); 64 65 static void pmcs_ioerror(pmcs_hw_t *, pmcs_dtype_t pmcs_dtype, 66 pmcwork_t *, uint32_t *, uint32_t); 67 68 69 int 70 pmcs_scsa_init(pmcs_hw_t *pwp, const ddi_dma_attr_t *ap) 71 { 72 scsi_hba_tran_t *tran; 73 ddi_dma_attr_t pmcs_scsa_dattr; 74 int flags; 75 76 (void) memcpy(&pmcs_scsa_dattr, ap, sizeof (ddi_dma_attr_t)); 77 pmcs_scsa_dattr.dma_attr_sgllen = 78 ((PMCS_SGL_NCHUNKS - 1) * (PMCS_MAX_CHUNKS - 1)) + PMCS_SGL_NCHUNKS; 79 pmcs_scsa_dattr.dma_attr_flags = DDI_DMA_RELAXED_ORDERING; 80 pmcs_scsa_dattr.dma_attr_flags |= DDI_DMA_FLAGERR; 81 82 /* 83 * Allocate a transport structure 84 */ 85 tran = scsi_hba_tran_alloc(pwp->dip, SCSI_HBA_CANSLEEP); 86 if (tran == NULL) { 87 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 88 "scsi_hba_tran_alloc failed"); 89 return (DDI_FAILURE); 90 } 91 92 tran->tran_hba_private = pwp; 93 tran->tran_tgt_init = pmcs_scsa_tran_tgt_init; 94 tran->tran_tgt_free = pmcs_scsa_tran_tgt_free; 95 tran->tran_start = pmcs_scsa_start; 96 tran->tran_abort = pmcs_scsa_abort; 97 tran->tran_reset = pmcs_scsa_reset; 98 tran->tran_reset_notify = pmcs_scsi_reset_notify; 99 tran->tran_getcap = pmcs_scsa_getcap; 100 tran->tran_setcap = pmcs_scsa_setcap; 101 tran->tran_setup_pkt = pmcs_scsa_setup_pkt; 102 tran->tran_teardown_pkt = pmcs_scsa_teardown_pkt; 103 tran->tran_quiesce = pmcs_scsi_quiesce; 104 tran->tran_unquiesce = pmcs_scsi_unquiesce; 105 tran->tran_interconnect_type = INTERCONNECT_SAS; 106 tran->tran_hba_len = sizeof (pmcs_cmd_t); 107 108 /* 109 * Attach this instance of the hba 110 */ 111 112 flags = SCSI_HBA_TRAN_SCB | SCSI_HBA_TRAN_CDB | SCSI_HBA_ADDR_COMPLEX | 113 SCSI_HBA_TRAN_PHCI | SCSI_HBA_HBA; 114 115 if (scsi_hba_attach_setup(pwp->dip, &pmcs_scsa_dattr, tran, flags)) { 116 scsi_hba_tran_free(tran); 117 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 118 "scsi_hba_attach failed"); 119 return (DDI_FAILURE); 120 } 121 pwp->tran = tran; 122 123 /* 124 * Attach the SMP part of this hba 125 */ 126 pwp->smp_tran = smp_hba_tran_alloc(pwp->dip); 127 ASSERT(pwp->smp_tran != NULL); 128 pwp->smp_tran->smp_tran_hba_private = pwp; 129 pwp->smp_tran->smp_tran_init = pmcs_smp_init; 130 pwp->smp_tran->smp_tran_free = pmcs_smp_free; 131 pwp->smp_tran->smp_tran_start = pmcs_smp_start; 132 133 if (smp_hba_attach_setup(pwp->dip, pwp->smp_tran) != DDI_SUCCESS) { 134 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 135 "smp_hba_attach failed"); 136 smp_hba_tran_free(pwp->smp_tran); 137 pwp->smp_tran = NULL; 138 scsi_hba_tran_free(tran); 139 return (DDI_FAILURE); 140 } 141 142 return (DDI_SUCCESS); 143 } 144 145 /* 146 * SCSA entry points 147 */ 148 149 static int 150 pmcs_scsa_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 151 scsi_hba_tran_t *tran, struct scsi_device *sd) 152 { 153 pmcs_hw_t *pwp = NULL; 154 int rval; 155 char *variant_prop = "sata"; 156 char *tgt_port = NULL, *ua = NULL; 157 pmcs_xscsi_t *tgt = NULL; 158 pmcs_iport_t *iport; 159 pmcs_lun_t *lun = NULL; 160 pmcs_phy_t *phyp = NULL; 161 uint64_t lun_num; 162 boolean_t got_scratch = B_FALSE; 163 164 /* 165 * First, make sure we're an iport and get the pointer to the HBA 166 * node's softstate 167 */ 168 if (scsi_hba_iport_unit_address(hba_dip) == NULL) { 169 pmcs_prt(TRAN2PMC(tran), PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 170 "%s: We don't enumerate devices on the HBA node", __func__); 171 goto tgt_init_fail; 172 } 173 174 pwp = ITRAN2PMC(tran); 175 iport = ITRAN2IPORT(tran); 176 177 /* 178 * Get the unit-address 179 */ 180 ua = scsi_device_unit_address(sd); 181 if (ua == NULL) { 182 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 183 "%s: Couldn't get UA", __func__); 184 pwp = NULL; 185 goto tgt_init_fail; 186 } 187 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 188 "got ua '%s'", ua); 189 190 /* 191 * Get the target address 192 */ 193 rval = scsi_device_prop_lookup_string(sd, SCSI_DEVICE_PROP_PATH, 194 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port); 195 if (rval != DDI_PROP_SUCCESS) { 196 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 197 "Couldn't get target UA"); 198 pwp = NULL; 199 goto tgt_init_fail; 200 } 201 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 202 "got tgt_port '%s'", tgt_port); 203 204 /* 205 * Validate that this tran_tgt_init is for an active iport. 206 */ 207 if (iport->ua_state == UA_INACTIVE) { 208 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 209 "%s: Got tran_tgt_init on inactive iport for '%s'", 210 __func__, tgt_port); 211 pwp = NULL; 212 goto tgt_init_fail; 213 } 214 215 /* 216 * Since we're going to wait for scratch, be sure to acquire it while 217 * we're not holding any other locks 218 */ 219 (void) pmcs_acquire_scratch(pwp, B_TRUE); 220 got_scratch = B_TRUE; 221 222 mutex_enter(&pwp->lock); 223 224 /* 225 * See if there's already a target softstate. If not, allocate one. 226 */ 227 tgt = pmcs_get_target(iport, tgt_port, B_TRUE); 228 229 if (tgt == NULL) { 230 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: " 231 "No tgt for tgt_port (%s)", __func__, tgt_port); 232 goto tgt_init_fail; 233 } 234 235 phyp = tgt->phy; 236 if (!IS_ROOT_PHY(phyp)) { 237 pmcs_inc_phy_ref_count(phyp); 238 } 239 ASSERT(mutex_owned(&phyp->phy_lock)); 240 241 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, "@%s tgt = 0x%p, dip = 0x%p", 242 ua, (void *)tgt, (void *)tgt_dip); 243 244 /* Now get the lun */ 245 lun_num = scsi_device_prop_get_int64(sd, SCSI_DEVICE_PROP_PATH, 246 SCSI_ADDR_PROP_LUN64, SCSI_LUN64_ILLEGAL); 247 if (lun_num == SCSI_LUN64_ILLEGAL) { 248 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 249 "No LUN for tgt %p", (void *)tgt); 250 goto tgt_init_fail; 251 } 252 253 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, "%s: @%s tgt 0x%p phy " 254 "0x%p (%s)", __func__, ua, (void *)tgt, (void *)phyp, phyp->path); 255 256 mutex_enter(&tgt->statlock); 257 tgt->dtype = phyp->dtype; 258 if (tgt->dtype != SAS && tgt->dtype != SATA) { 259 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 260 "PHY 0x%p went away?", (void *)phyp); 261 goto tgt_init_fail; 262 } 263 264 /* We don't support SATA devices at LUN > 0. */ 265 if ((tgt->dtype == SATA) && (lun_num > 0)) { 266 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 267 "%s: No support for SATA devices at LUN > 0 " 268 "(target = 0x%p)", __func__, (void *)tgt); 269 goto tgt_init_fail; 270 } 271 272 /* 273 * Allocate LU soft state. We use ddi_soft_state_bystr_zalloc instead 274 * of kmem_alloc because ddi_soft_state_bystr_zalloc allows us to 275 * verify that the framework never tries to initialize two scsi_device 276 * structures with the same unit-address at the same time. 277 */ 278 if (ddi_soft_state_bystr_zalloc(tgt->lun_sstate, ua) != DDI_SUCCESS) { 279 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, 280 "Couldn't allocate LU soft state"); 281 goto tgt_init_fail; 282 } 283 284 lun = ddi_soft_state_bystr_get(tgt->lun_sstate, ua); 285 if (lun == NULL) { 286 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, 287 "Couldn't get LU soft state"); 288 goto tgt_init_fail; 289 } 290 scsi_device_hba_private_set(sd, lun); 291 lun->lun_num = lun_num; 292 293 /* convert the scsi_lun64_t value to SCSI standard form */ 294 lun->scsi_lun = scsi_lun64_to_lun(lun_num); 295 296 ASSERT(strlen(ua) < (PMCS_MAX_UA_SIZE - 1)); 297 bcopy(ua, lun->unit_address, strnlen(ua, PMCS_MAX_UA_SIZE - 1)); 298 299 lun->target = tgt; 300 301 /* 302 * If this is the first tran_tgt_init, add this target to our list 303 */ 304 if (tgt->target_num == PMCS_INVALID_TARGET_NUM) { 305 int target; 306 for (target = 0; target < pwp->max_dev; target++) { 307 if (pwp->targets[target] != NULL) { 308 continue; 309 } 310 311 pwp->targets[target] = tgt; 312 tgt->target_num = (uint16_t)target; 313 break; 314 } 315 316 if (target == pwp->max_dev) { 317 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 318 "Target list full."); 319 goto tgt_init_fail; 320 } 321 } 322 323 tgt->dip = sd->sd_dev; 324 lun->sd = sd; 325 list_insert_tail(&tgt->lun_list, lun); 326 327 if (!pmcs_assign_device(pwp, tgt)) { 328 pmcs_release_scratch(pwp); 329 pwp->targets[tgt->target_num] = NULL; 330 tgt->target_num = PMCS_INVALID_TARGET_NUM; 331 tgt->phy = NULL; 332 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 333 "%s: pmcs_assign_device failed for target 0x%p", 334 __func__, (void *)tgt); 335 goto tgt_init_fail; 336 } 337 338 pmcs_release_scratch(pwp); 339 tgt->ref_count++; 340 341 (void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH, 342 SCSI_ADDR_PROP_TARGET, (uint32_t)(tgt->target_num)); 343 344 /* SM-HBA */ 345 if (tgt->dtype == SATA) { 346 /* TCR in PSARC/1997/281 opinion */ 347 (void) scsi_device_prop_update_string(sd, 348 SCSI_DEVICE_PROP_PATH, "variant", variant_prop); 349 } 350 351 tgt->phy_addressable = PMCS_PHY_ADDRESSABLE(phyp); 352 353 if (tgt->phy_addressable) { 354 (void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH, 355 SCSI_ADDR_PROP_SATA_PHY, phyp->phynum); 356 } 357 358 /* SM-HBA */ 359 (void) pmcs_smhba_set_scsi_device_props(pwp, phyp, sd); 360 /* 361 * Make sure attached port and target port pm props are updated 362 * By passing in 0s, we're not actually updating any values, but 363 * the properties should now get updated on the node. 364 */ 365 366 mutex_exit(&tgt->statlock); 367 pmcs_update_phy_pm_props(phyp, 0, 0, B_TRUE); 368 pmcs_unlock_phy(phyp); 369 mutex_exit(&pwp->lock); 370 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port); 371 return (DDI_SUCCESS); 372 373 tgt_init_fail: 374 scsi_device_hba_private_set(sd, NULL); 375 if (got_scratch) { 376 pmcs_release_scratch(pwp); 377 } 378 if (lun) { 379 list_remove(&tgt->lun_list, lun); 380 ddi_soft_state_bystr_free(tgt->lun_sstate, ua); 381 } 382 if (phyp) { 383 mutex_exit(&tgt->statlock); 384 pmcs_unlock_phy(phyp); 385 /* 386 * phyp's ref count was incremented in pmcs_new_tport. 387 * We're failing configuration, we now need to decrement it. 388 */ 389 if (!IS_ROOT_PHY(phyp)) { 390 pmcs_dec_phy_ref_count(phyp); 391 } 392 phyp->target = NULL; 393 } 394 if (tgt && tgt->ref_count == 0) { 395 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port); 396 } 397 if (pwp) { 398 mutex_exit(&pwp->lock); 399 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 400 "%s: failed for @%s tgt 0x%p phy 0x%p", __func__, ua, 401 (void *)tgt, (void *)phyp); 402 } 403 if (tgt_port) { 404 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port); 405 } 406 return (DDI_FAILURE); 407 } 408 409 static void 410 pmcs_scsa_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 411 scsi_hba_tran_t *tran, struct scsi_device *sd) 412 { 413 _NOTE(ARGUNUSED(hba_dip, tgt_dip)); 414 pmcs_hw_t *pwp; 415 pmcs_lun_t *lun; 416 pmcs_xscsi_t *target; 417 char *unit_address; 418 pmcs_phy_t *phyp; 419 420 if (scsi_hba_iport_unit_address(hba_dip) == NULL) { 421 pwp = TRAN2PMC(tran); 422 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 423 "%s: We don't enumerate devices on the HBA node", __func__); 424 return; 425 } 426 427 lun = (pmcs_lun_t *)scsi_device_hba_private_get(sd); 428 429 ASSERT((lun != NULL) && (lun->target != NULL)); 430 ASSERT(lun->target->ref_count > 0); 431 432 target = lun->target; 433 unit_address = lun->unit_address; 434 list_remove(&target->lun_list, lun); 435 436 pwp = ITRAN2PMC(tran); 437 mutex_enter(&pwp->lock); 438 phyp = target->phy; 439 if (phyp) { 440 mutex_enter(&phyp->phy_lock); 441 } 442 mutex_enter(&target->statlock); 443 444 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, 445 "%s: for @%s tgt 0x%p phy 0x%p", __func__, unit_address, 446 (void *)target, (void *)phyp); 447 ddi_soft_state_bystr_free(lun->target->lun_sstate, unit_address); 448 449 if (target->recover_wait) { 450 mutex_exit(&target->statlock); 451 if (phyp) { 452 mutex_exit(&phyp->phy_lock); 453 } 454 mutex_exit(&pwp->lock); 455 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, "%s: " 456 "Target 0x%p in device state recovery, fail tran_tgt_free", 457 __func__, (void *)target); 458 return; 459 } 460 461 /* 462 * If this target still has a PHY pointer and that PHY's target pointer 463 * has been cleared, then that PHY has been reaped. In that case, there 464 * would be no need to decrement the reference count 465 */ 466 if (phyp && !IS_ROOT_PHY(phyp) && phyp->target) { 467 pmcs_dec_phy_ref_count(phyp); 468 } 469 470 if (--target->ref_count == 0) { 471 /* 472 * Remove this target from our list. The target soft 473 * state will remain, and the device will remain registered 474 * with the hardware unless/until we're told the device 475 * physically went away. 476 */ 477 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, 478 "%s: Free target 0x%p (vtgt %d)", __func__, (void *)target, 479 target->target_num); 480 pwp->targets[target->target_num] = NULL; 481 target->target_num = PMCS_INVALID_TARGET_NUM; 482 /* 483 * If the target still has a PHY pointer, break the linkage 484 */ 485 if (phyp) { 486 phyp->target = NULL; 487 } 488 target->phy = NULL; 489 pmcs_destroy_target(target); 490 } else { 491 mutex_exit(&target->statlock); 492 } 493 494 if (phyp) { 495 mutex_exit(&phyp->phy_lock); 496 } 497 mutex_exit(&pwp->lock); 498 } 499 500 static int 501 pmcs_scsa_start(struct scsi_address *ap, struct scsi_pkt *pkt) 502 { 503 pmcs_cmd_t *sp = PKT2CMD(pkt); 504 pmcs_hw_t *pwp = ADDR2PMC(ap); 505 pmcs_xscsi_t *xp; 506 boolean_t blocked; 507 uint32_t hba_state; 508 509 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 510 "%s: pkt %p sd %p cdb0=0x%02x dl=%lu", __func__, (void *)pkt, 511 (void *)scsi_address_device(&pkt->pkt_address), 512 pkt->pkt_cdbp[0] & 0xff, pkt->pkt_dma_len); 513 514 if (pkt->pkt_flags & FLAG_NOINTR) { 515 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 516 "%s: nointr pkt", __func__); 517 return (TRAN_BADPKT); 518 } 519 520 sp->cmd_tag = 0; 521 pkt->pkt_state = pkt->pkt_statistics = 0; 522 pkt->pkt_reason = CMD_INCOMPLETE; 523 524 mutex_enter(&pwp->lock); 525 hba_state = pwp->state; 526 blocked = pwp->blocked; 527 mutex_exit(&pwp->lock); 528 529 if (hba_state != STATE_RUNNING) { 530 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 531 "%s: hba dead", __func__); 532 return (TRAN_FATAL_ERROR); 533 } 534 535 xp = pmcs_addr2xp(ap, NULL, sp); 536 if (xp == NULL) { 537 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 538 "%s: dropping due to null target", __func__); 539 goto dead_target; 540 } 541 ASSERT(mutex_owned(&xp->statlock)); 542 543 /* 544 * First, check to see if the device is gone. 545 */ 546 if (xp->dev_gone) { 547 xp->actv_pkts++; 548 mutex_exit(&xp->statlock); 549 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, xp, 550 "%s: dropping due to dead target 0x%p", 551 __func__, (void *)xp); 552 goto dead_target; 553 } 554 555 /* 556 * If we're blocked (quiesced) just return. 557 */ 558 if (blocked) { 559 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 560 "%s: hba blocked", __func__); 561 xp->actv_pkts++; 562 mutex_exit(&xp->statlock); 563 mutex_enter(&xp->wqlock); 564 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 565 mutex_exit(&xp->wqlock); 566 return (TRAN_ACCEPT); 567 } 568 569 /* 570 * If we're draining or resetting, queue and return. 571 */ 572 if (xp->draining || xp->resetting || xp->recover_wait) { 573 xp->actv_pkts++; 574 mutex_exit(&xp->statlock); 575 mutex_enter(&xp->wqlock); 576 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 577 mutex_exit(&xp->wqlock); 578 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, xp, 579 "%s: draining/resetting/recovering (cnt %u)", 580 __func__, xp->actv_cnt); 581 /* 582 * By the time we get here, draining or 583 * resetting may have come and gone, not 584 * yet noticing that we had put something 585 * on the wait queue, so schedule a worker 586 * to look at this later. 587 */ 588 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 589 return (TRAN_ACCEPT); 590 } 591 592 xp->actv_pkts++; 593 mutex_exit(&xp->statlock); 594 595 /* 596 * Queue this command to the tail of the wait queue. 597 * This keeps us getting commands out of order. 598 */ 599 mutex_enter(&xp->wqlock); 600 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 601 mutex_exit(&xp->wqlock); 602 603 /* 604 * Now run the queue for this device. 605 */ 606 (void) pmcs_scsa_wq_run_one(pwp, xp); 607 608 return (TRAN_ACCEPT); 609 610 dead_target: 611 pkt->pkt_state = STATE_GOT_BUS; 612 pkt->pkt_reason = CMD_DEV_GONE; 613 mutex_enter(&pwp->cq_lock); 614 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 615 PMCS_CQ_RUN_LOCKED(pwp); 616 mutex_exit(&pwp->cq_lock); 617 return (TRAN_ACCEPT); 618 } 619 620 /* Return code 1 = Success */ 621 static int 622 pmcs_scsa_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 623 { 624 pmcs_hw_t *pwp = ADDR2PMC(ap); 625 pmcs_cmd_t *sp = NULL; 626 pmcs_xscsi_t *xp = NULL; 627 pmcs_phy_t *pptr = NULL; 628 pmcs_lun_t *pmcs_lun = (pmcs_lun_t *) 629 scsi_device_hba_private_get(scsi_address_device(ap)); 630 uint32_t tag; 631 uint64_t lun; 632 pmcwork_t *pwrk; 633 634 mutex_enter(&pwp->lock); 635 if (pwp->state != STATE_RUNNING) { 636 mutex_exit(&pwp->lock); 637 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 638 "%s: hba dead", __func__); 639 return (0); 640 } 641 mutex_exit(&pwp->lock); 642 643 if (pkt == NULL) { 644 if (pmcs_lun == NULL) { 645 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: " 646 "No pmcs_lun_t struct to do ABORT_ALL", __func__); 647 return (0); 648 } 649 xp = pmcs_lun->target; 650 if (xp != NULL) { 651 pptr = xp->phy; 652 } 653 if (pptr == NULL) { 654 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, "%s: pkt is " 655 "NULL. No tgt/phy to do ABORT_ALL", __func__); 656 return (0); 657 } 658 pmcs_lock_phy(pptr); 659 if (pmcs_abort(pwp, pptr, 0, 1, 0)) { 660 pptr->abort_pending = 1; 661 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 662 } 663 pmcs_unlock_phy(pptr); 664 return (1); 665 } 666 667 sp = PKT2CMD(pkt); 668 xp = sp->cmd_target; 669 670 if (sp->cmd_lun) { 671 lun = sp->cmd_lun->lun_num; 672 } else { 673 lun = 0; 674 } 675 if (xp == NULL) { 676 return (0); 677 } 678 679 /* 680 * See if we have a real work structure associated with this cmd. 681 */ 682 pwrk = pmcs_tag2wp(pwp, sp->cmd_tag, B_FALSE); 683 if (pwrk && pwrk->arg == sp) { 684 tag = pwrk->htag; 685 pptr = pwrk->phy; 686 pwrk->timer = 0; /* we don't time this here */ 687 ASSERT(pwrk->state == PMCS_WORK_STATE_ONCHIP); 688 mutex_exit(&pwrk->lock); 689 pmcs_lock_phy(pptr); 690 if (pptr->dtype == SAS) { 691 if (pmcs_ssp_tmf(pwp, pptr, SAS_ABORT_TASK, tag, lun, 692 NULL)) { 693 pptr->abort_pending = 1; 694 pmcs_unlock_phy(pptr); 695 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 696 return (0); 697 } 698 } else { 699 /* 700 * XXX: Was the command that was active an 701 * NCQ I/O command? 702 */ 703 pptr->need_rl_ext = 1; 704 if (pmcs_sata_abort_ncq(pwp, pptr)) { 705 pptr->abort_pending = 1; 706 pmcs_unlock_phy(pptr); 707 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 708 return (0); 709 } 710 } 711 pptr->abort_pending = 1; 712 pmcs_unlock_phy(pptr); 713 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 714 return (1); 715 } 716 if (pwrk) { 717 mutex_exit(&pwrk->lock); 718 } 719 /* 720 * Okay, those weren't the droids we were looking for. 721 * See if the command is on any of the wait queues. 722 */ 723 mutex_enter(&xp->wqlock); 724 sp = NULL; 725 STAILQ_FOREACH(sp, &xp->wq, cmd_next) { 726 if (sp == PKT2CMD(pkt)) { 727 STAILQ_REMOVE(&xp->wq, sp, pmcs_cmd, cmd_next); 728 break; 729 } 730 } 731 mutex_exit(&xp->wqlock); 732 if (sp) { 733 pkt->pkt_reason = CMD_ABORTED; 734 pkt->pkt_statistics |= STAT_ABORTED; 735 mutex_enter(&pwp->cq_lock); 736 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 737 PMCS_CQ_RUN_LOCKED(pwp); 738 mutex_exit(&pwp->cq_lock); 739 return (1); 740 } 741 return (0); 742 } 743 744 /* 745 * SCSA reset functions 746 */ 747 static int 748 pmcs_scsa_reset(struct scsi_address *ap, int level) 749 { 750 pmcs_hw_t *pwp = ADDR2PMC(ap); 751 pmcs_phy_t *pptr; 752 pmcs_xscsi_t *xp; 753 uint64_t lun = (uint64_t)-1, *lp = NULL; 754 int rval; 755 756 mutex_enter(&pwp->lock); 757 if (pwp->state != STATE_RUNNING) { 758 mutex_exit(&pwp->lock); 759 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 760 "%s: hba dead", __func__); 761 return (0); 762 } 763 mutex_exit(&pwp->lock); 764 765 switch (level) { 766 case RESET_ALL: 767 rval = 0; 768 break; 769 case RESET_LUN: 770 /* 771 * Point lp at lun so that pmcs_addr2xp 772 * will fill out the 64 bit lun number. 773 */ 774 lp = &lun; 775 /* FALLTHROUGH */ 776 case RESET_TARGET: 777 xp = pmcs_addr2xp(ap, lp, NULL); 778 if (xp == NULL) { 779 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 780 "%s: no xp found for this scsi address", __func__); 781 return (0); 782 } 783 784 if (xp->dev_gone) { 785 mutex_exit(&xp->statlock); 786 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 787 "%s: Target 0x%p has gone away", __func__, 788 (void *)xp); 789 return (0); 790 } 791 792 /* 793 * If we're already performing this action, or if device 794 * state recovery is already running, just return failure. 795 */ 796 if (xp->resetting || xp->recover_wait) { 797 mutex_exit(&xp->statlock); 798 return (0); 799 } 800 xp->reset_wait = 0; 801 xp->reset_success = 0; 802 xp->resetting = 1; 803 pptr = xp->phy; 804 mutex_exit(&xp->statlock); 805 806 if (pmcs_reset_dev(pwp, pptr, lun)) { 807 rval = 0; 808 } else { 809 rval = 1; 810 } 811 812 mutex_enter(&xp->statlock); 813 if (rval == 1) { 814 xp->reset_success = 1; 815 } 816 if (xp->reset_wait) { 817 xp->reset_wait = 0; 818 cv_signal(&xp->reset_cv); 819 } 820 xp->resetting = 0; 821 mutex_exit(&xp->statlock); 822 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 823 break; 824 default: 825 rval = 0; 826 break; 827 } 828 829 return (rval); 830 } 831 832 static int 833 pmcs_scsi_reset_notify(struct scsi_address *ap, int flag, 834 void (*callback)(caddr_t), caddr_t arg) 835 { 836 pmcs_hw_t *pwp = ADDR2PMC(ap); 837 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 838 &pwp->lock, &pwp->reset_notify_listf)); 839 } 840 841 842 static int 843 pmcs_cap(struct scsi_address *ap, char *cap, int val, int tonly, int set) 844 { 845 _NOTE(ARGUNUSED(val, tonly)); 846 int cidx, rval = 0; 847 pmcs_xscsi_t *xp; 848 849 cidx = scsi_hba_lookup_capstr(cap); 850 if (cidx == -1) { 851 return (-1); 852 } 853 854 xp = pmcs_addr2xp(ap, NULL, NULL); 855 if (xp == NULL) { 856 return (-1); 857 } 858 859 switch (cidx) { 860 case SCSI_CAP_DMA_MAX: 861 case SCSI_CAP_INITIATOR_ID: 862 if (set == 0) { 863 rval = INT_MAX; /* argh */ 864 } 865 break; 866 case SCSI_CAP_DISCONNECT: 867 case SCSI_CAP_SYNCHRONOUS: 868 case SCSI_CAP_WIDE_XFER: 869 case SCSI_CAP_PARITY: 870 case SCSI_CAP_ARQ: 871 case SCSI_CAP_UNTAGGED_QING: 872 if (set == 0) { 873 rval = 1; 874 } 875 break; 876 877 case SCSI_CAP_TAGGED_QING: 878 rval = 1; 879 break; 880 881 case SCSI_CAP_MSG_OUT: 882 case SCSI_CAP_RESET_NOTIFICATION: 883 case SCSI_CAP_QFULL_RETRIES: 884 case SCSI_CAP_QFULL_RETRY_INTERVAL: 885 break; 886 case SCSI_CAP_SCSI_VERSION: 887 if (set == 0) { 888 rval = SCSI_VERSION_3; 889 } 890 break; 891 case SCSI_CAP_INTERCONNECT_TYPE: 892 if (set) { 893 break; 894 } 895 if (xp->phy_addressable) { 896 rval = INTERCONNECT_SATA; 897 } else { 898 rval = INTERCONNECT_SAS; 899 } 900 break; 901 case SCSI_CAP_CDB_LEN: 902 if (set == 0) { 903 rval = 16; 904 } 905 break; 906 case SCSI_CAP_LUN_RESET: 907 if (set) { 908 break; 909 } 910 if (xp->dtype == SATA) { 911 rval = 0; 912 } else { 913 rval = 1; 914 } 915 break; 916 default: 917 rval = -1; 918 break; 919 } 920 mutex_exit(&xp->statlock); 921 pmcs_prt(ADDR2PMC(ap), PMCS_PRT_DEBUG3, NULL, NULL, 922 "%s: cap %s val %d set %d rval %d", 923 __func__, cap, val, set, rval); 924 return (rval); 925 } 926 927 /* 928 * Returns with statlock held if the xp is found. 929 * Fills in pmcs_cmd_t with values if pmcs_cmd_t pointer non-NULL. 930 */ 931 static pmcs_xscsi_t * 932 pmcs_addr2xp(struct scsi_address *ap, uint64_t *lp, pmcs_cmd_t *sp) 933 { 934 pmcs_xscsi_t *xp; 935 pmcs_lun_t *lun = (pmcs_lun_t *) 936 scsi_device_hba_private_get(scsi_address_device(ap)); 937 938 if ((lun == NULL) || (lun->target == NULL)) { 939 return (NULL); 940 } 941 xp = lun->target; 942 mutex_enter(&xp->statlock); 943 944 if (xp->dev_gone || (xp->phy == NULL)) { 945 /* 946 * This may be a retried packet, so it's possible cmd_target 947 * and cmd_lun may still be populated. Clear them. 948 */ 949 if (sp != NULL) { 950 sp->cmd_target = NULL; 951 sp->cmd_lun = NULL; 952 } 953 mutex_exit(&xp->statlock); 954 return (NULL); 955 } 956 957 if (sp != NULL) { 958 sp->cmd_target = xp; 959 sp->cmd_lun = lun; 960 } 961 if (lp) { 962 *lp = lun->lun_num; 963 } 964 return (xp); 965 } 966 967 static int 968 pmcs_scsa_getcap(struct scsi_address *ap, char *cap, int whom) 969 { 970 int r; 971 if (cap == NULL) { 972 return (-1); 973 } 974 r = pmcs_cap(ap, cap, 0, whom, 0); 975 return (r); 976 } 977 978 static int 979 pmcs_scsa_setcap(struct scsi_address *ap, char *cap, int value, int whom) 980 { 981 int r; 982 if (cap == NULL) { 983 return (-1); 984 } 985 r = pmcs_cap(ap, cap, value, whom, 1); 986 return (r); 987 } 988 989 static int 990 pmcs_scsa_setup_pkt(struct scsi_pkt *pkt, int (*callback)(caddr_t), 991 caddr_t cbarg) 992 { 993 _NOTE(ARGUNUSED(callback, cbarg)); 994 pmcs_cmd_t *sp = pkt->pkt_ha_private; 995 996 bzero(sp, sizeof (pmcs_cmd_t)); 997 sp->cmd_pkt = pkt; 998 return (0); 999 } 1000 1001 static void 1002 pmcs_scsa_teardown_pkt(struct scsi_pkt *pkt) 1003 { 1004 pmcs_cmd_t *sp = pkt->pkt_ha_private; 1005 sp->cmd_target = NULL; 1006 sp->cmd_lun = NULL; 1007 } 1008 1009 static int 1010 pmcs_smp_start(struct smp_pkt *smp_pkt) 1011 { 1012 struct pmcwork *pwrk; 1013 pmcs_iport_t *iport; 1014 const uint_t rdoff = SAS_SMP_MAX_PAYLOAD; 1015 uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status; 1016 uint64_t wwn; 1017 pmcs_hw_t *pwp; 1018 pmcs_phy_t *pptr; 1019 pmcs_xscsi_t *xp; 1020 uint_t reqsz, rspsz, will_retry; 1021 int result; 1022 1023 pwp = smp_pkt->smp_pkt_address->smp_a_hba_tran->smp_tran_hba_private; 1024 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE); 1025 1026 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, NULL, 1027 "%s: starting for wwn 0x%" PRIx64, __func__, wwn); 1028 1029 will_retry = smp_pkt->smp_pkt_will_retry; 1030 1031 (void) pmcs_acquire_scratch(pwp, B_TRUE); 1032 reqsz = smp_pkt->smp_pkt_reqsize; 1033 if (reqsz > SAS_SMP_MAX_PAYLOAD) { 1034 reqsz = SAS_SMP_MAX_PAYLOAD; 1035 } 1036 (void) memcpy(pwp->scratch, smp_pkt->smp_pkt_req, reqsz); 1037 1038 rspsz = smp_pkt->smp_pkt_rspsize; 1039 if (rspsz > SAS_SMP_MAX_PAYLOAD) { 1040 rspsz = SAS_SMP_MAX_PAYLOAD; 1041 } 1042 1043 /* 1044 * The request size from the SMP driver always includes 4 bytes 1045 * for the CRC. The PMCS chip, however, doesn't want to see those 1046 * counts as part of the transfer size. 1047 */ 1048 reqsz -= 4; 1049 1050 pptr = pmcs_find_phy_by_wwn(pwp, wwn); 1051 /* PHY is now locked */ 1052 if (pptr == NULL || pptr->dtype != EXPANDER) { 1053 if (pptr) { 1054 pmcs_unlock_phy(pptr); 1055 } 1056 pmcs_release_scratch(pwp); 1057 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1058 "%s: could not find phy", __func__); 1059 smp_pkt->smp_pkt_reason = ENXIO; 1060 return (DDI_FAILURE); 1061 } 1062 1063 if ((pptr->iport == NULL) || !pptr->valid_device_id) { 1064 pmcs_unlock_phy(pptr); 1065 pmcs_release_scratch(pwp); 1066 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target, 1067 "%s: Can't reach PHY %s", __func__, pptr->path); 1068 smp_pkt->smp_pkt_reason = ENXIO; 1069 return (DDI_FAILURE); 1070 } 1071 1072 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 1073 if (pwrk == NULL) { 1074 pmcs_unlock_phy(pptr); 1075 pmcs_release_scratch(pwp); 1076 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1077 "%s: could not get work structure", __func__); 1078 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN : EBUSY; 1079 return (DDI_FAILURE); 1080 } 1081 1082 pwrk->arg = msg; 1083 pwrk->dtype = EXPANDER; 1084 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1085 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1086 if (ptr == NULL) { 1087 pmcs_pwork(pwp, pwrk); 1088 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1089 pmcs_unlock_phy(pptr); 1090 pmcs_release_scratch(pwp); 1091 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1092 "%s: could not get IQ entry", __func__); 1093 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN :EBUSY; 1094 return (DDI_FAILURE); 1095 } 1096 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST)); 1097 msg[1] = LE_32(pwrk->htag); 1098 msg[2] = LE_32(pptr->device_id); 1099 msg[3] = LE_32(SMP_INDIRECT_RESPONSE | SMP_INDIRECT_REQUEST); 1100 msg[8] = LE_32(DWORD0(pwp->scratch_dma)); 1101 msg[9] = LE_32(DWORD1(pwp->scratch_dma)); 1102 msg[10] = LE_32(reqsz); 1103 msg[11] = 0; 1104 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff)); 1105 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff)); 1106 msg[14] = LE_32(rspsz); 1107 msg[15] = 0; 1108 1109 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE); 1110 1111 pmcs_hold_iport(pptr->iport); 1112 iport = pptr->iport; 1113 pmcs_smp_acquire(iport); 1114 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1115 htag = pwrk->htag; 1116 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1117 pmcs_unlock_phy(pptr); 1118 WAIT_FOR(pwrk, smp_pkt->smp_pkt_timeout * 1000, result); 1119 pmcs_pwork(pwp, pwrk); 1120 pmcs_smp_release(iport); 1121 pmcs_rele_iport(iport); 1122 pmcs_lock_phy(pptr); 1123 1124 if (result) { 1125 pmcs_timed_out(pwp, htag, __func__); 1126 if (pmcs_abort(pwp, pptr, htag, 0, 0)) { 1127 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1128 "%s: Unable to issue SMP ABORT for htag 0x%08x", 1129 __func__, htag); 1130 } else { 1131 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1132 "%s: Issuing SMP ABORT for htag 0x%08x", 1133 __func__, htag); 1134 } 1135 pmcs_unlock_phy(pptr); 1136 pmcs_release_scratch(pwp); 1137 smp_pkt->smp_pkt_reason = ETIMEDOUT; 1138 return (DDI_FAILURE); 1139 } 1140 status = LE_32(msg[2]); 1141 if (status == PMCOUT_STATUS_OVERFLOW) { 1142 status = PMCOUT_STATUS_OK; 1143 smp_pkt->smp_pkt_reason = EOVERFLOW; 1144 } 1145 if (status != PMCOUT_STATUS_OK) { 1146 const char *emsg = pmcs_status_str(status); 1147 if (emsg == NULL) { 1148 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1149 "SMP operation failed (0x%x)", status); 1150 } else { 1151 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1152 "SMP operation failed (%s)", emsg); 1153 } 1154 1155 if ((status == PMCOUT_STATUS_ERROR_HW_TIMEOUT) || 1156 (status == PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT)) { 1157 smp_pkt->smp_pkt_reason = 1158 will_retry ? EAGAIN : ETIMEDOUT; 1159 result = DDI_FAILURE; 1160 } else if (status == 1161 PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS) { 1162 xp = pptr->target; 1163 if (xp == NULL) { 1164 smp_pkt->smp_pkt_reason = EIO; 1165 result = DDI_FAILURE; 1166 goto out; 1167 } 1168 if (xp->dev_state != 1169 PMCS_DEVICE_STATE_NON_OPERATIONAL) { 1170 xp->dev_state = 1171 PMCS_DEVICE_STATE_NON_OPERATIONAL; 1172 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, xp->phy, 1173 xp, "%s: Got _IT_NEXUS_LOSS SMP status. " 1174 "Tgt(0x%p) dev_state set to " 1175 "_NON_OPERATIONAL", __func__, 1176 (void *)xp); 1177 } 1178 /* ABORT any pending commands related to this device */ 1179 if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) != 0) { 1180 pptr->abort_pending = 1; 1181 smp_pkt->smp_pkt_reason = EIO; 1182 result = DDI_FAILURE; 1183 } 1184 } else { 1185 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN : EIO; 1186 result = DDI_FAILURE; 1187 } 1188 } else { 1189 (void) memcpy(smp_pkt->smp_pkt_rsp, 1190 &((uint8_t *)pwp->scratch)[rdoff], rspsz); 1191 if (smp_pkt->smp_pkt_reason == EOVERFLOW) { 1192 result = DDI_FAILURE; 1193 } else { 1194 result = DDI_SUCCESS; 1195 } 1196 } 1197 out: 1198 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, pptr->target, 1199 "%s: done for wwn 0x%" PRIx64, __func__, wwn); 1200 1201 pmcs_unlock_phy(pptr); 1202 pmcs_release_scratch(pwp); 1203 return (result); 1204 } 1205 1206 static int 1207 pmcs_smp_init(dev_info_t *self, dev_info_t *child, 1208 smp_hba_tran_t *tran, smp_device_t *smp_sd) 1209 { 1210 _NOTE(ARGUNUSED(tran, smp_sd)); 1211 pmcs_iport_t *iport; 1212 pmcs_hw_t *pwp; 1213 pmcs_xscsi_t *tgt; 1214 pmcs_phy_t *phy, *pphy; 1215 uint64_t wwn; 1216 char *addr, *tgt_port; 1217 int ua_form = 1; 1218 1219 iport = ddi_get_soft_state(pmcs_iport_softstate, 1220 ddi_get_instance(self)); 1221 ASSERT(iport); 1222 if (iport == NULL) 1223 return (DDI_FAILURE); 1224 pwp = iport->pwp; 1225 ASSERT(pwp); 1226 if (pwp == NULL) 1227 return (DDI_FAILURE); 1228 1229 /* Get "target-port" prop from devinfo node */ 1230 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child, 1231 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1232 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) { 1233 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to " 1234 "lookup prop ("SCSI_ADDR_PROP_TARGET_PORT")", __func__); 1235 /* Dont fail _smp_init() because we couldnt get/set a prop */ 1236 return (DDI_SUCCESS); 1237 } 1238 1239 /* 1240 * Validate that this tran_tgt_init is for an active iport. 1241 */ 1242 if (iport->ua_state == UA_INACTIVE) { 1243 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1244 "%s: Init on inactive iport for '%s'", __func__, tgt_port); 1245 ddi_prop_free(tgt_port); 1246 return (DDI_FAILURE); 1247 } 1248 1249 mutex_enter(&pwp->lock); 1250 1251 /* Retrieve softstate using unit-address */ 1252 tgt = pmcs_get_target(iport, tgt_port, B_TRUE); 1253 if (tgt == NULL) { 1254 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1255 "%s: tgt softstate not found", __func__); 1256 ddi_prop_free(tgt_port); 1257 mutex_exit(&pwp->lock); 1258 return (DDI_FAILURE); 1259 } 1260 1261 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, "%s: %s (%s)", 1262 __func__, ddi_get_name(child), tgt_port); 1263 1264 mutex_enter(&tgt->statlock); 1265 phy = tgt->phy; 1266 ASSERT(mutex_owned(&phy->phy_lock)); 1267 1268 if (IS_ROOT_PHY(phy)) { 1269 /* Expander attached to HBA - don't ref_count it */ 1270 wwn = pwp->sas_wwns[0]; 1271 } else { 1272 pmcs_inc_phy_ref_count(phy); 1273 1274 /* 1275 * Parent (in topology) is also an expander 1276 * Now that we've increased the ref count on phy, it's OK 1277 * to drop the lock so we can acquire the parent's lock. 1278 */ 1279 pphy = phy->parent; 1280 mutex_exit(&tgt->statlock); 1281 pmcs_unlock_phy(phy); 1282 pmcs_lock_phy(pphy); 1283 wwn = pmcs_barray2wwn(pphy->sas_address); 1284 pmcs_unlock_phy(pphy); 1285 pmcs_lock_phy(phy); 1286 mutex_enter(&tgt->statlock); 1287 } 1288 1289 /* 1290 * If this is the 1st smp_init, add this to our list. 1291 */ 1292 if (tgt->target_num == PMCS_INVALID_TARGET_NUM) { 1293 int target; 1294 for (target = 0; target < pwp->max_dev; target++) { 1295 if (pwp->targets[target] != NULL) { 1296 continue; 1297 } 1298 1299 pwp->targets[target] = tgt; 1300 tgt->target_num = (uint16_t)target; 1301 tgt->assigned = 1; 1302 tgt->dev_state = PMCS_DEVICE_STATE_OPERATIONAL; 1303 break; 1304 } 1305 1306 if (target == pwp->max_dev) { 1307 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 1308 "Target list full."); 1309 goto smp_init_fail; 1310 } 1311 } 1312 1313 if (!pmcs_assign_device(pwp, tgt)) { 1314 pwp->targets[tgt->target_num] = NULL; 1315 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, 1316 "%s: pmcs_assign_device failed for target 0x%p", 1317 __func__, (void *)tgt); 1318 goto smp_init_fail; 1319 } 1320 1321 /* 1322 * Update the attached port and target port pm properties 1323 */ 1324 tgt->smpd = smp_sd; 1325 1326 pmcs_unlock_phy(phy); 1327 mutex_exit(&pwp->lock); 1328 1329 tgt->ref_count++; 1330 tgt->dtype = phy->dtype; 1331 mutex_exit(&tgt->statlock); 1332 1333 pmcs_update_phy_pm_props(phy, 0, 0, B_TRUE); 1334 1335 addr = scsi_wwn_to_wwnstr(wwn, ua_form, NULL); 1336 if (smp_device_prop_update_string(smp_sd, SCSI_ADDR_PROP_ATTACHED_PORT, 1337 addr) != DDI_SUCCESS) { 1338 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to set " 1339 "prop ("SCSI_ADDR_PROP_ATTACHED_PORT")", __func__); 1340 } 1341 (void) scsi_free_wwnstr(addr); 1342 ddi_prop_free(tgt_port); 1343 return (DDI_SUCCESS); 1344 1345 smp_init_fail: 1346 tgt->phy = NULL; 1347 tgt->target_num = PMCS_INVALID_TARGET_NUM; 1348 phy->target = NULL; 1349 if (!IS_ROOT_PHY(phy)) { 1350 pmcs_dec_phy_ref_count(phy); 1351 } 1352 mutex_exit(&tgt->statlock); 1353 pmcs_unlock_phy(phy); 1354 mutex_exit(&pwp->lock); 1355 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt->unit_address); 1356 ddi_prop_free(tgt_port); 1357 return (DDI_FAILURE); 1358 } 1359 1360 static void 1361 pmcs_smp_free(dev_info_t *self, dev_info_t *child, 1362 smp_hba_tran_t *tran, smp_device_t *smp) 1363 { 1364 _NOTE(ARGUNUSED(tran, smp)); 1365 pmcs_iport_t *iport; 1366 pmcs_hw_t *pwp; 1367 pmcs_xscsi_t *tgt; 1368 pmcs_phy_t *phyp; 1369 char *tgt_port; 1370 1371 iport = ddi_get_soft_state(pmcs_iport_softstate, 1372 ddi_get_instance(self)); 1373 ASSERT(iport); 1374 if (iport == NULL) 1375 return; 1376 1377 pwp = iport->pwp; 1378 if (pwp == NULL) 1379 return; 1380 ASSERT(pwp); 1381 1382 /* Get "target-port" prop from devinfo node */ 1383 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child, 1384 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1385 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) { 1386 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to " 1387 "lookup prop ("SCSI_ADDR_PROP_TARGET_PORT")", __func__); 1388 return; 1389 } 1390 1391 /* Retrieve softstate using unit-address */ 1392 mutex_enter(&pwp->lock); 1393 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port); 1394 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, "%s: %s (%s)", __func__, 1395 ddi_get_name(child), tgt_port); 1396 ddi_prop_free(tgt_port); 1397 1398 if (tgt == NULL) { 1399 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1400 "%s: tgt softstate not found", __func__); 1401 mutex_exit(&pwp->lock); 1402 return; 1403 } 1404 1405 phyp = tgt->phy; 1406 if (phyp) { 1407 mutex_enter(&phyp->phy_lock); 1408 if (!IS_ROOT_PHY(phyp)) { 1409 pmcs_dec_phy_ref_count(phyp); 1410 } 1411 } 1412 mutex_enter(&tgt->statlock); 1413 1414 if (--tgt->ref_count == 0) { 1415 /* 1416 * Remove this target from our list. The softstate 1417 * will remain, and the device will remain registered 1418 * with the hardware unless/until we're told that the 1419 * device physically went away. 1420 */ 1421 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, 1422 "Removing target 0x%p (vtgt %d) from target list", 1423 (void *)tgt, tgt->target_num); 1424 pwp->targets[tgt->target_num] = NULL; 1425 tgt->target_num = PMCS_INVALID_TARGET_NUM; 1426 if (phyp) { 1427 phyp->target = NULL; 1428 } 1429 tgt->phy = NULL; 1430 pmcs_destroy_target(tgt); 1431 } else { 1432 mutex_exit(&tgt->statlock); 1433 } 1434 1435 if (phyp) { 1436 mutex_exit(&phyp->phy_lock); 1437 } 1438 mutex_exit(&pwp->lock); 1439 } 1440 1441 static int 1442 pmcs_scsi_quiesce(dev_info_t *dip) 1443 { 1444 pmcs_hw_t *pwp; 1445 int totactive = -1; 1446 pmcs_xscsi_t *xp; 1447 uint16_t target; 1448 1449 if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip))) 1450 return (0); /* iport */ 1451 1452 pwp = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip)); 1453 if (pwp == NULL) { 1454 return (-1); 1455 } 1456 mutex_enter(&pwp->lock); 1457 if (pwp->state != STATE_RUNNING) { 1458 mutex_exit(&pwp->lock); 1459 return (-1); 1460 } 1461 1462 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s called", __func__); 1463 pwp->quiesced = pwp->blocked = 1; 1464 while (totactive) { 1465 totactive = 0; 1466 for (target = 0; target < pwp->max_dev; target++) { 1467 xp = pwp->targets[target]; 1468 if (xp == NULL) { 1469 continue; 1470 } 1471 mutex_enter(&xp->statlock); 1472 if (xp->actv_cnt) { 1473 totactive += xp->actv_cnt; 1474 xp->draining = 1; 1475 } 1476 mutex_exit(&xp->statlock); 1477 } 1478 if (totactive) { 1479 cv_wait(&pwp->drain_cv, &pwp->lock); 1480 } 1481 /* 1482 * The pwp->blocked may have been reset. e.g a SCSI bus reset 1483 */ 1484 pwp->blocked = 1; 1485 } 1486 1487 for (target = 0; target < pwp->max_dev; target++) { 1488 xp = pwp->targets[target]; 1489 if (xp == NULL) { 1490 continue; 1491 } 1492 mutex_enter(&xp->statlock); 1493 xp->draining = 0; 1494 mutex_exit(&xp->statlock); 1495 } 1496 1497 mutex_exit(&pwp->lock); 1498 if (totactive == 0) { 1499 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1500 "%s drain complete", __func__); 1501 } 1502 return (0); 1503 } 1504 1505 static int 1506 pmcs_scsi_unquiesce(dev_info_t *dip) 1507 { 1508 pmcs_hw_t *pwp; 1509 1510 if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip))) 1511 return (0); /* iport */ 1512 1513 pwp = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip)); 1514 if (pwp == NULL) { 1515 return (-1); 1516 } 1517 mutex_enter(&pwp->lock); 1518 if (pwp->state != STATE_RUNNING) { 1519 mutex_exit(&pwp->lock); 1520 return (-1); 1521 } 1522 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s called", __func__); 1523 pwp->blocked = pwp->quiesced = 0; 1524 mutex_exit(&pwp->lock); 1525 1526 /* 1527 * Run all pending commands. 1528 */ 1529 pmcs_scsa_wq_run(pwp); 1530 1531 /* 1532 * Complete all completed commands. 1533 * This also unlocks us. 1534 */ 1535 PMCS_CQ_RUN(pwp); 1536 return (0); 1537 } 1538 1539 /* 1540 * Start commands for a particular device 1541 * If the actual start of a command fails, return B_FALSE. Any other result 1542 * is a B_TRUE return. 1543 */ 1544 boolean_t 1545 pmcs_scsa_wq_run_one(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 1546 { 1547 pmcs_cmd_t *sp; 1548 pmcs_phy_t *phyp; 1549 pmcwork_t *pwrk; 1550 boolean_t run_one, blocked; 1551 int rval; 1552 1553 /* 1554 * First, check to see if we're blocked or resource limited 1555 */ 1556 mutex_enter(&pwp->lock); 1557 blocked = pwp->blocked; 1558 /* 1559 * If resource_limited is set, we're resource constrained and 1560 * we will run only one work request for this target. 1561 */ 1562 run_one = pwp->resource_limited; 1563 mutex_exit(&pwp->lock); 1564 1565 if (blocked) { 1566 /* Queues will get restarted when we get unblocked */ 1567 return (B_TRUE); 1568 } 1569 1570 /* 1571 * Might as well verify the queue is not empty before moving on 1572 */ 1573 mutex_enter(&xp->wqlock); 1574 if (STAILQ_EMPTY(&xp->wq)) { 1575 mutex_exit(&xp->wqlock); 1576 return (B_TRUE); 1577 } 1578 mutex_exit(&xp->wqlock); 1579 1580 /* 1581 * If we're draining or resetting, just reschedule work queue and bail. 1582 */ 1583 mutex_enter(&xp->statlock); 1584 if (xp->draining || xp->resetting || xp->special_running || 1585 xp->special_needed) { 1586 mutex_exit(&xp->statlock); 1587 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1588 return (B_TRUE); 1589 } 1590 1591 /* 1592 * Next, check to see if the target is gone. 1593 */ 1594 if (xp->dev_gone) { 1595 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1596 "%s: Flushing wait queue for dead tgt 0x%p", __func__, 1597 (void *)xp); 1598 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_WAIT_QUEUE); 1599 mutex_exit(&xp->statlock); 1600 return (B_TRUE); 1601 } 1602 1603 /* 1604 * Increment the PHY's ref_count now so we know it won't go away 1605 * after we drop the target lock. Drop it before returning. If the 1606 * PHY dies, the commands we attempt to send will fail, but at least 1607 * we know we have a real PHY pointer. 1608 */ 1609 phyp = xp->phy; 1610 pmcs_inc_phy_ref_count(phyp); 1611 mutex_exit(&xp->statlock); 1612 1613 mutex_enter(&xp->wqlock); 1614 while ((sp = STAILQ_FIRST(&xp->wq)) != NULL) { 1615 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_CBACK, phyp); 1616 if (pwrk == NULL) { 1617 mutex_exit(&xp->wqlock); 1618 mutex_enter(&pwp->lock); 1619 if (pwp->resource_limited == 0) { 1620 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1621 "%s: out of work structures", __func__); 1622 } 1623 pwp->resource_limited = 1; 1624 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1625 mutex_exit(&pwp->lock); 1626 return (B_FALSE); 1627 } 1628 STAILQ_REMOVE_HEAD(&xp->wq, cmd_next); 1629 mutex_exit(&xp->wqlock); 1630 1631 pwrk->xp = xp; 1632 pwrk->arg = sp; 1633 sp->cmd_tag = pwrk->htag; 1634 pwrk->timer = US2WT(CMD2PKT(sp)->pkt_time * 1000000); 1635 if (pwrk->timer == 0) { 1636 pwrk->timer = US2WT(1000000); 1637 } 1638 1639 pwrk->dtype = xp->dtype; 1640 1641 if (xp->dtype == SAS) { 1642 pwrk->ptr = (void *) pmcs_SAS_done; 1643 if ((rval = pmcs_SAS_run(sp, pwrk)) != 0) { 1644 sp->cmd_tag = NULL; 1645 pmcs_dec_phy_ref_count(phyp); 1646 pmcs_pwork(pwp, pwrk); 1647 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1648 if (rval == PMCS_WQ_RUN_FAIL_RES) { 1649 return (B_FALSE); 1650 } else { 1651 return (B_TRUE); 1652 } 1653 } 1654 } else { 1655 ASSERT(xp->dtype == SATA); 1656 pwrk->ptr = (void *) pmcs_SATA_done; 1657 if ((rval = pmcs_SATA_run(sp, pwrk)) != 0) { 1658 sp->cmd_tag = NULL; 1659 pmcs_dec_phy_ref_count(phyp); 1660 pmcs_pwork(pwp, pwrk); 1661 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1662 if (rval == PMCS_WQ_RUN_FAIL_RES) { 1663 return (B_FALSE); 1664 } else { 1665 return (B_TRUE); 1666 } 1667 } 1668 } 1669 1670 if (run_one) { 1671 goto wq_out; 1672 } 1673 mutex_enter(&xp->wqlock); 1674 } 1675 1676 mutex_exit(&xp->wqlock); 1677 1678 wq_out: 1679 pmcs_dec_phy_ref_count(phyp); 1680 return (B_TRUE); 1681 } 1682 1683 /* 1684 * Start commands for all devices. 1685 */ 1686 void 1687 pmcs_scsa_wq_run(pmcs_hw_t *pwp) 1688 { 1689 pmcs_xscsi_t *xp; 1690 uint16_t target_start, target; 1691 boolean_t rval = B_TRUE; 1692 1693 mutex_enter(&pwp->lock); 1694 target_start = pwp->last_wq_dev; 1695 target = target_start; 1696 1697 do { 1698 xp = pwp->targets[target]; 1699 if ((xp == NULL) || (STAILQ_EMPTY(&xp->wq))) { 1700 if (++target == pwp->max_dev) { 1701 target = 0; 1702 } 1703 continue; 1704 } 1705 1706 mutex_exit(&pwp->lock); 1707 rval = pmcs_scsa_wq_run_one(pwp, xp); 1708 mutex_enter(&pwp->lock); 1709 1710 if (rval == B_FALSE) { 1711 break; 1712 } 1713 1714 if (++target == pwp->max_dev) { 1715 target = 0; 1716 } 1717 } while (target != target_start); 1718 1719 if (rval) { 1720 /* 1721 * If we were resource limited, but apparently are not now, 1722 * reschedule the work queues anyway. 1723 */ 1724 if (pwp->resource_limited) { 1725 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1726 } 1727 pwp->resource_limited = 0; /* Not resource-constrained */ 1728 } else { 1729 /* 1730 * Give everybody a chance, and reschedule to run the queues 1731 * again as long as we're limited. 1732 */ 1733 pwp->resource_limited = 1; 1734 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1735 } 1736 1737 pwp->last_wq_dev = target; 1738 mutex_exit(&pwp->lock); 1739 } 1740 1741 /* 1742 * Pull the completion queue, drop the lock and complete all elements. 1743 */ 1744 1745 void 1746 pmcs_scsa_cq_run(void *arg) 1747 { 1748 pmcs_cq_thr_info_t *cqti = (pmcs_cq_thr_info_t *)arg; 1749 pmcs_hw_t *pwp = cqti->cq_pwp; 1750 pmcs_cmd_t *sp, *nxt; 1751 struct scsi_pkt *pkt; 1752 pmcs_xscsi_t *tgt; 1753 pmcs_iocomp_cb_t *ioccb, *ioccb_next; 1754 pmcs_cb_t callback; 1755 1756 DTRACE_PROBE1(pmcs__scsa__cq__run__start, pmcs_cq_thr_info_t *, cqti); 1757 1758 mutex_enter(&pwp->cq_lock); 1759 1760 while (!pwp->cq_info.cq_stop) { 1761 /* 1762 * First, check the I/O completion callback queue. 1763 */ 1764 ioccb = pwp->iocomp_cb_head; 1765 pwp->iocomp_cb_head = NULL; 1766 pwp->iocomp_cb_tail = NULL; 1767 mutex_exit(&pwp->cq_lock); 1768 1769 while (ioccb) { 1770 /* 1771 * Grab the lock on the work structure. The callback 1772 * routine is responsible for clearing it. 1773 */ 1774 mutex_enter(&ioccb->pwrk->lock); 1775 ioccb_next = ioccb->next; 1776 callback = (pmcs_cb_t)ioccb->pwrk->ptr; 1777 (*callback)(pwp, ioccb->pwrk, 1778 (uint32_t *)((void *)ioccb->iomb)); 1779 kmem_cache_free(pwp->iocomp_cb_cache, ioccb); 1780 ioccb = ioccb_next; 1781 } 1782 1783 /* 1784 * Next, run the completion queue 1785 */ 1786 mutex_enter(&pwp->cq_lock); 1787 sp = STAILQ_FIRST(&pwp->cq); 1788 STAILQ_INIT(&pwp->cq); 1789 mutex_exit(&pwp->cq_lock); 1790 1791 DTRACE_PROBE1(pmcs__scsa__cq__run__start__loop, 1792 pmcs_cq_thr_info_t *, cqti); 1793 1794 if (sp && pmcs_check_acc_dma_handle(pwp)) { 1795 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED); 1796 } 1797 1798 while (sp) { 1799 nxt = STAILQ_NEXT(sp, cmd_next); 1800 pkt = CMD2PKT(sp); 1801 tgt = sp->cmd_target; 1802 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, tgt, 1803 "%s: calling completion on %p for tgt %p", __func__, 1804 (void *)sp, (void *)tgt); 1805 if (tgt) { 1806 mutex_enter(&tgt->statlock); 1807 ASSERT(tgt->actv_pkts != 0); 1808 tgt->actv_pkts--; 1809 mutex_exit(&tgt->statlock); 1810 } 1811 scsi_hba_pkt_comp(pkt); 1812 sp = nxt; 1813 } 1814 1815 DTRACE_PROBE1(pmcs__scsa__cq__run__end__loop, 1816 pmcs_cq_thr_info_t *, cqti); 1817 1818 /* 1819 * Check if there are more completions to do. If so, and we've 1820 * not been told to stop, skip the wait and cycle through again. 1821 */ 1822 mutex_enter(&pwp->cq_lock); 1823 if ((pwp->iocomp_cb_head == NULL) && STAILQ_EMPTY(&pwp->cq) && 1824 !pwp->cq_info.cq_stop) { 1825 mutex_exit(&pwp->cq_lock); 1826 mutex_enter(&cqti->cq_thr_lock); 1827 cv_wait(&cqti->cq_cv, &cqti->cq_thr_lock); 1828 mutex_exit(&cqti->cq_thr_lock); 1829 mutex_enter(&pwp->cq_lock); 1830 } 1831 } 1832 1833 mutex_exit(&pwp->cq_lock); 1834 DTRACE_PROBE1(pmcs__scsa__cq__run__stop, pmcs_cq_thr_info_t *, cqti); 1835 thread_exit(); 1836 } 1837 1838 /* 1839 * Run a SAS command. Called with pwrk->lock held, returns unlocked. 1840 */ 1841 static int 1842 pmcs_SAS_run(pmcs_cmd_t *sp, pmcwork_t *pwrk) 1843 { 1844 pmcs_hw_t *pwp = CMD2PMC(sp); 1845 struct scsi_pkt *pkt = CMD2PKT(sp); 1846 pmcs_xscsi_t *xp = pwrk->xp; 1847 uint32_t iq, *ptr; 1848 sas_ssp_cmd_iu_t sc; 1849 1850 ASSERT(xp != NULL); 1851 mutex_enter(&xp->statlock); 1852 if (!xp->assigned) { 1853 mutex_exit(&xp->statlock); 1854 return (PMCS_WQ_RUN_FAIL_OTHER); 1855 } 1856 if ((xp->actv_cnt >= xp->qdepth) || xp->recover_wait) { 1857 mutex_exit(&xp->statlock); 1858 mutex_enter(&xp->wqlock); 1859 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1860 mutex_exit(&xp->wqlock); 1861 return (PMCS_WQ_RUN_FAIL_OTHER); 1862 } 1863 GET_IO_IQ_ENTRY(pwp, ptr, pwrk->phy->device_id, iq); 1864 if (ptr == NULL) { 1865 mutex_exit(&xp->statlock); 1866 /* 1867 * This is a temporary failure not likely to unblocked by 1868 * commands completing as the test for scheduling the 1869 * restart of work is a per-device test. 1870 */ 1871 mutex_enter(&xp->wqlock); 1872 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1873 mutex_exit(&xp->wqlock); 1874 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1875 "%s: Failed to get IO IQ entry for tgt %d", 1876 __func__, xp->target_num); 1877 return (PMCS_WQ_RUN_FAIL_RES); 1878 1879 } 1880 1881 ptr[0] = 1882 LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SSP_INI_IO_START)); 1883 ptr[1] = LE_32(pwrk->htag); 1884 ptr[2] = LE_32(pwrk->phy->device_id); 1885 ptr[3] = LE_32(pkt->pkt_dma_len); 1886 if (ptr[3]) { 1887 ASSERT(pkt->pkt_numcookies); 1888 if (pkt->pkt_dma_flags & DDI_DMA_READ) { 1889 ptr[4] = LE_32(PMCIN_DATADIR_2_INI); 1890 } else { 1891 ptr[4] = LE_32(PMCIN_DATADIR_2_DEV); 1892 } 1893 if (pmcs_dma_load(pwp, sp, ptr)) { 1894 mutex_exit(&pwp->iqp_lock[iq]); 1895 mutex_exit(&xp->statlock); 1896 mutex_enter(&xp->wqlock); 1897 if (STAILQ_EMPTY(&xp->wq)) { 1898 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1899 mutex_exit(&xp->wqlock); 1900 } else { 1901 mutex_exit(&xp->wqlock); 1902 CMD2PKT(sp)->pkt_scbp[0] = STATUS_QFULL; 1903 CMD2PKT(sp)->pkt_reason = CMD_CMPLT; 1904 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | 1905 STATE_GOT_TARGET | STATE_SENT_CMD | 1906 STATE_GOT_STATUS; 1907 mutex_enter(&pwp->cq_lock); 1908 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 1909 PMCS_CQ_RUN_LOCKED(pwp); 1910 mutex_exit(&pwp->cq_lock); 1911 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1912 "%s: Failed to dma_load for tgt %d (QF)", 1913 __func__, xp->target_num); 1914 } 1915 return (PMCS_WQ_RUN_FAIL_RES); 1916 } 1917 } else { 1918 ptr[4] = LE_32(PMCIN_DATADIR_NONE); 1919 CLEAN_MESSAGE(ptr, 12); 1920 } 1921 xp->actv_cnt++; 1922 if (xp->actv_cnt > xp->maxdepth) { 1923 xp->maxdepth = xp->actv_cnt; 1924 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pwrk->phy, xp, "%s: max depth " 1925 "now %u", pwrk->phy->path, xp->maxdepth); 1926 } 1927 mutex_exit(&xp->statlock); 1928 1929 1930 #ifdef DEBUG 1931 /* 1932 * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED 1933 * event when this goes out on the wire. 1934 */ 1935 ptr[4] |= PMCIN_MESSAGE_REPORT; 1936 #endif 1937 /* 1938 * Fill in the SSP IU 1939 */ 1940 1941 bzero(&sc, sizeof (sas_ssp_cmd_iu_t)); 1942 bcopy((uint8_t *)&sp->cmd_lun->scsi_lun, sc.lun, sizeof (scsi_lun_t)); 1943 1944 switch (pkt->pkt_flags & FLAG_TAGMASK) { 1945 case FLAG_HTAG: 1946 sc.task_attribute = SAS_CMD_TASK_ATTR_HEAD; 1947 break; 1948 case FLAG_OTAG: 1949 sc.task_attribute = SAS_CMD_TASK_ATTR_ORDERED; 1950 break; 1951 case FLAG_STAG: 1952 default: 1953 sc.task_attribute = SAS_CMD_TASK_ATTR_SIMPLE; 1954 break; 1955 } 1956 (void) memcpy(sc.cdb, pkt->pkt_cdbp, 1957 min(SCSA_CDBLEN(sp), sizeof (sc.cdb))); 1958 (void) memcpy(&ptr[5], &sc, sizeof (sas_ssp_cmd_iu_t)); 1959 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1960 mutex_exit(&pwrk->lock); 1961 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 1962 "%s: giving pkt %p (tag %x) to the hardware", __func__, 1963 (void *)pkt, pwrk->htag); 1964 #ifdef DEBUG 1965 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SAS INI Message", ptr); 1966 #endif 1967 mutex_enter(&xp->aqlock); 1968 STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next); 1969 mutex_exit(&xp->aqlock); 1970 INC_IQ_ENTRY(pwp, iq); 1971 1972 /* 1973 * If we just submitted the last command queued from device state 1974 * recovery, clear the wq_recovery_tail pointer. 1975 */ 1976 mutex_enter(&xp->wqlock); 1977 if (xp->wq_recovery_tail == sp) { 1978 xp->wq_recovery_tail = NULL; 1979 } 1980 mutex_exit(&xp->wqlock); 1981 1982 return (PMCS_WQ_RUN_SUCCESS); 1983 } 1984 1985 /* 1986 * Complete a SAS command 1987 * 1988 * Called with pwrk lock held. 1989 * The free of pwrk releases the lock. 1990 */ 1991 1992 static void 1993 pmcs_SAS_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg) 1994 { 1995 pmcs_cmd_t *sp = pwrk->arg; 1996 pmcs_phy_t *pptr = pwrk->phy; 1997 pmcs_xscsi_t *xp = pwrk->xp; 1998 struct scsi_pkt *pkt = CMD2PKT(sp); 1999 int dead; 2000 uint32_t sts; 2001 boolean_t aborted = B_FALSE; 2002 boolean_t do_ds_recovery = B_FALSE; 2003 2004 ASSERT(xp != NULL); 2005 ASSERT(sp != NULL); 2006 ASSERT(pptr != NULL); 2007 2008 DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int, 2009 (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start, 2010 hrtime_t, gethrtime()); 2011 2012 dead = pwrk->dead; 2013 2014 if (msg) { 2015 sts = LE_32(msg[2]); 2016 } else { 2017 sts = 0; 2018 } 2019 2020 if (dead != 0) { 2021 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: dead cmd tag " 2022 "0x%x for %s", __func__, pwrk->htag, pptr->path); 2023 goto out; 2024 } 2025 2026 if (sts == PMCOUT_STATUS_ABORTED) { 2027 aborted = B_TRUE; 2028 } 2029 2030 if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) { 2031 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2032 "%s: cmd 0x%p (tag 0x%x) timed out for %s", 2033 __func__, (void *)sp, pwrk->htag, pptr->path); 2034 CMD2PKT(sp)->pkt_scbp[0] = STATUS_GOOD; 2035 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 2036 STATE_SENT_CMD; 2037 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 2038 goto out; 2039 } 2040 2041 /* 2042 * If the status isn't okay but not underflow, 2043 * step to the side and parse the (possible) error. 2044 */ 2045 #ifdef DEBUG 2046 if (msg) { 2047 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg); 2048 } 2049 #endif 2050 if (!msg) { 2051 goto out; 2052 } 2053 2054 switch (sts) { 2055 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2056 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: 2057 case PMCOUT_STATUS_IO_DS_IN_RECOVERY: 2058 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2059 "%s: PHY %s requires DS recovery (status=%d)", 2060 __func__, pptr->path, sts); 2061 do_ds_recovery = B_TRUE; 2062 break; 2063 case PMCOUT_STATUS_UNDERFLOW: 2064 (void) pmcs_set_resid(pkt, pkt->pkt_dma_len, LE_32(msg[3])); 2065 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, NULL, NULL, 2066 "%s: underflow %u for cdb 0x%x", 2067 __func__, LE_32(msg[3]), pkt->pkt_cdbp[0] & 0xff); 2068 sts = PMCOUT_STATUS_OK; 2069 msg[3] = 0; 2070 break; 2071 case PMCOUT_STATUS_OK: 2072 pkt->pkt_resid = 0; 2073 break; 2074 } 2075 2076 if (sts != PMCOUT_STATUS_OK) { 2077 pmcs_ioerror(pwp, SAS, pwrk, msg, sts); 2078 } else { 2079 if (msg[3]) { 2080 uint8_t local[PMCS_QENTRY_SIZE << 1], *xd; 2081 sas_ssp_rsp_iu_t *rptr = (void *)local; 2082 const int lim = 2083 (PMCS_QENTRY_SIZE << 1) - SAS_RSP_HDR_SIZE; 2084 static const uint8_t ssp_rsp_evec[] = { 2085 0x58, 0x61, 0x56, 0x72, 0x00 2086 }; 2087 2088 /* 2089 * Transform the the first part of the response 2090 * to host canonical form. This gives us enough 2091 * information to figure out what to do with the 2092 * rest (which remains unchanged in the incoming 2093 * message which can be up to two queue entries 2094 * in length). 2095 */ 2096 pmcs_endian_transform(pwp, local, &msg[5], 2097 ssp_rsp_evec); 2098 xd = (uint8_t *)(&msg[5]); 2099 xd += SAS_RSP_HDR_SIZE; 2100 2101 if (rptr->datapres == SAS_RSP_DATAPRES_RESPONSE_DATA) { 2102 if (rptr->response_data_length != 4) { 2103 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 2104 "Bad SAS RESPONSE DATA LENGTH", 2105 msg); 2106 pkt->pkt_reason = CMD_TRAN_ERR; 2107 goto out; 2108 } 2109 (void) memcpy(&sts, xd, sizeof (uint32_t)); 2110 sts = BE_32(sts); 2111 /* 2112 * The only response code we should legally get 2113 * here is an INVALID FRAME response code. 2114 */ 2115 if (sts == SAS_RSP_INVALID_FRAME) { 2116 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2117 "%s: pkt %p tgt %u path %s " 2118 "completed: INVALID FRAME response", 2119 __func__, (void *)pkt, 2120 xp->target_num, pptr->path); 2121 } else { 2122 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2123 "%s: pkt %p tgt %u path %s " 2124 "completed: illegal response 0x%x", 2125 __func__, (void *)pkt, 2126 xp->target_num, pptr->path, sts); 2127 } 2128 pkt->pkt_reason = CMD_TRAN_ERR; 2129 goto out; 2130 } 2131 if (rptr->datapres == SAS_RSP_DATAPRES_SENSE_DATA) { 2132 uint32_t slen; 2133 slen = rptr->sense_data_length; 2134 if (slen > lim) { 2135 slen = lim; 2136 } 2137 pmcs_latch_status(pwp, sp, rptr->status, xd, 2138 slen, pptr->path); 2139 } else if (rptr->datapres == SAS_RSP_DATAPRES_NO_DATA) { 2140 pmcout_ssp_comp_t *sspcp; 2141 sspcp = (pmcout_ssp_comp_t *)msg; 2142 uint32_t *residp; 2143 /* 2144 * This is the case for a plain SCSI status. 2145 * Note: If RESC_V is set and we're here, there 2146 * is a residual. We need to find it and update 2147 * the packet accordingly. 2148 */ 2149 pmcs_latch_status(pwp, sp, rptr->status, NULL, 2150 0, pptr->path); 2151 2152 if (sspcp->resc_v) { 2153 /* 2154 * Point residual to the SSP_RESP_IU 2155 */ 2156 residp = (uint32_t *)(sspcp + 1); 2157 /* 2158 * param contains the number of bytes 2159 * between where the SSP_RESP_IU may 2160 * or may not be and the residual. 2161 * Increment residp by the appropriate 2162 * number of words: (param+resc_pad)/4). 2163 */ 2164 residp += (LE_32(sspcp->param) + 2165 sspcp->resc_pad) / 2166 sizeof (uint32_t); 2167 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, 2168 pptr, xp, "%s: tgt 0x%p " 2169 "residual %d for pkt 0x%p", 2170 __func__, (void *) xp, *residp, 2171 (void *) pkt); 2172 ASSERT(LE_32(*residp) <= 2173 pkt->pkt_dma_len); 2174 (void) pmcs_set_resid(pkt, 2175 pkt->pkt_dma_len, LE_32(*residp)); 2176 } 2177 } else { 2178 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 2179 "illegal SAS response", msg); 2180 pkt->pkt_reason = CMD_TRAN_ERR; 2181 goto out; 2182 } 2183 } else { 2184 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2185 pptr->path); 2186 } 2187 if (pkt->pkt_dma_len) { 2188 pkt->pkt_state |= STATE_XFERRED_DATA; 2189 } 2190 } 2191 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2192 "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x", 2193 __func__, (void *)pkt, xp->target_num, pkt->pkt_reason, 2194 pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]); 2195 2196 if (pwrk->state == PMCS_WORK_STATE_ABORTED) { 2197 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2198 "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p", 2199 __func__, (void *)pkt, pptr->path, (void *)pwrk); 2200 aborted = B_TRUE; 2201 } 2202 2203 out: 2204 pmcs_dma_unload(pwp, sp); 2205 mutex_enter(&xp->statlock); 2206 2207 /* 2208 * If the device no longer has a PHY pointer, clear the PHY pointer 2209 * from the work structure before we free it. Otherwise, pmcs_pwork 2210 * may decrement the ref_count on a PHY that's been freed. 2211 */ 2212 if (xp->phy == NULL) { 2213 pwrk->phy = NULL; 2214 } 2215 2216 /* 2217 * We may arrive here due to a command timing out, which in turn 2218 * could be addressed in a different context. So, free the work 2219 * back, but only after confirming it's not already been freed 2220 * elsewhere. 2221 */ 2222 if (pwrk->htag != PMCS_TAG_FREE) { 2223 pmcs_pwork(pwp, pwrk); 2224 } 2225 2226 /* 2227 * If the device is gone, we only put this command on the completion 2228 * queue if the work structure is not marked dead. If it's marked 2229 * dead, it will already have been put there. 2230 */ 2231 if (xp->dev_gone) { 2232 mutex_exit(&xp->statlock); 2233 if (!dead) { 2234 mutex_enter(&xp->aqlock); 2235 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2236 mutex_exit(&xp->aqlock); 2237 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2238 "%s: Removing cmd 0x%p (htag 0x%x) from aq", 2239 __func__, (void *)sp, sp->cmd_tag); 2240 mutex_enter(&pwp->cq_lock); 2241 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2242 PMCS_CQ_RUN_LOCKED(pwp); 2243 mutex_exit(&pwp->cq_lock); 2244 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2245 "%s: Completing command for dead target 0x%p", 2246 __func__, (void *)xp); 2247 } 2248 return; 2249 } 2250 2251 ASSERT(xp->actv_cnt > 0); 2252 if (--(xp->actv_cnt) == 0) { 2253 if (xp->draining) { 2254 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, 2255 "%s: waking up drain waiters", __func__); 2256 cv_signal(&pwp->drain_cv); 2257 } 2258 } 2259 mutex_exit(&xp->statlock); 2260 2261 /* 2262 * If the status is other than OK, determine if it's something that 2263 * is worth re-attempting enumeration. If so, mark the PHY. 2264 */ 2265 if (sts != PMCOUT_STATUS_OK) { 2266 pmcs_status_disposition(pptr, sts); 2267 } 2268 2269 if (dead == 0) { 2270 #ifdef DEBUG 2271 pmcs_cmd_t *wp; 2272 mutex_enter(&xp->aqlock); 2273 STAILQ_FOREACH(wp, &xp->aq, cmd_next) { 2274 if (wp == sp) { 2275 break; 2276 } 2277 } 2278 ASSERT(wp != NULL); 2279 #else 2280 mutex_enter(&xp->aqlock); 2281 #endif 2282 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2283 "%s: Removing cmd 0x%p (htag 0x%x) from aq", __func__, 2284 (void *)sp, sp->cmd_tag); 2285 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2286 if (aborted) { 2287 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2288 "%s: Aborted cmd for tgt 0x%p, signaling waiters", 2289 __func__, (void *)xp); 2290 cv_signal(&xp->abort_cv); 2291 } 2292 mutex_exit(&xp->aqlock); 2293 } 2294 2295 /* 2296 * If do_ds_recovery is set, we need to initiate device state 2297 * recovery. In this case, we put this I/O back on the head of 2298 * the wait queue to run again after recovery is complete 2299 */ 2300 if (do_ds_recovery) { 2301 mutex_enter(&xp->statlock); 2302 pmcs_start_dev_state_recovery(xp, pptr); 2303 mutex_exit(&xp->statlock); 2304 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, "%s: Putting cmd 0x%p " 2305 "back on wq during recovery for tgt 0x%p", __func__, 2306 (void *)sp, (void *)xp); 2307 mutex_enter(&xp->wqlock); 2308 if (xp->wq_recovery_tail == NULL) { 2309 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2310 } else { 2311 /* 2312 * If there are other I/Os waiting at the head due to 2313 * device state recovery, add this one in the right spot 2314 * to maintain proper order. 2315 */ 2316 STAILQ_INSERT_AFTER(&xp->wq, xp->wq_recovery_tail, sp, 2317 cmd_next); 2318 } 2319 xp->wq_recovery_tail = sp; 2320 mutex_exit(&xp->wqlock); 2321 } else { 2322 /* 2323 * If we're not initiating device state recovery and this 2324 * command was not "dead", put it on the completion queue 2325 */ 2326 if (!dead) { 2327 mutex_enter(&pwp->cq_lock); 2328 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2329 PMCS_CQ_RUN_LOCKED(pwp); 2330 mutex_exit(&pwp->cq_lock); 2331 } 2332 } 2333 } 2334 2335 /* 2336 * Run a SATA command (normal reads and writes), 2337 * or block and schedule a SATL interpretation 2338 * of the command. 2339 * 2340 * Called with pwrk lock held, returns unlocked. 2341 */ 2342 2343 static int 2344 pmcs_SATA_run(pmcs_cmd_t *sp, pmcwork_t *pwrk) 2345 { 2346 pmcs_hw_t *pwp = CMD2PMC(sp); 2347 struct scsi_pkt *pkt = CMD2PKT(sp); 2348 pmcs_xscsi_t *xp; 2349 uint8_t cdb_base, asc, tag; 2350 uint32_t *ptr, iq, nblk, i, mtype; 2351 fis_t fis; 2352 size_t amt; 2353 uint64_t lba; 2354 2355 xp = pwrk->xp; 2356 ASSERT(xp != NULL); 2357 2358 /* 2359 * First, see if this is just a plain read/write command. 2360 * If not, we have to queue it up for processing, block 2361 * any additional commands from coming in, and wake up 2362 * the thread that will process this command. 2363 */ 2364 cdb_base = pkt->pkt_cdbp[0] & 0x1f; 2365 if (cdb_base != SCMD_READ && cdb_base != SCMD_WRITE) { 2366 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, NULL, 2367 "%s: special SATA cmd %p", __func__, (void *)sp); 2368 2369 ASSERT(xp->phy != NULL); 2370 pmcs_pwork(pwp, pwrk); 2371 pmcs_lock_phy(xp->phy); 2372 mutex_enter(&xp->statlock); 2373 xp->special_needed = 1; /* Set the special_needed flag */ 2374 STAILQ_INSERT_TAIL(&xp->sq, sp, cmd_next); 2375 if (pmcs_run_sata_special(pwp, xp)) { 2376 SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN); 2377 } 2378 mutex_exit(&xp->statlock); 2379 pmcs_unlock_phy(xp->phy); 2380 2381 return (PMCS_WQ_RUN_SUCCESS); 2382 } 2383 2384 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: regular cmd", __func__); 2385 2386 mutex_enter(&xp->statlock); 2387 if (!xp->assigned) { 2388 mutex_exit(&xp->statlock); 2389 return (PMCS_WQ_RUN_FAIL_OTHER); 2390 } 2391 if (xp->special_running || xp->special_needed || xp->recover_wait) { 2392 mutex_exit(&xp->statlock); 2393 mutex_enter(&xp->wqlock); 2394 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2395 mutex_exit(&xp->wqlock); 2396 /* 2397 * By the time we get here the special 2398 * commands running or waiting to be run 2399 * may have come and gone, so kick our 2400 * worker to run the waiting queues 2401 * just in case. 2402 */ 2403 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 2404 return (PMCS_WQ_RUN_FAIL_OTHER); 2405 } 2406 lba = xp->capacity; 2407 mutex_exit(&xp->statlock); 2408 2409 /* 2410 * Extract data length and lba parameters out of the command. The 2411 * function pmcs_SATA_rwparm returns a non-zero ASC value if the CDB 2412 * values are considered illegal. 2413 */ 2414 asc = pmcs_SATA_rwparm(pkt->pkt_cdbp, &nblk, &lba, lba); 2415 if (asc) { 2416 uint8_t sns[18]; 2417 bzero(sns, sizeof (sns)); 2418 sns[0] = 0xf0; 2419 sns[2] = 0x5; 2420 sns[12] = asc; 2421 pmcs_latch_status(pwp, sp, STATUS_CHECK, sns, sizeof (sns), 2422 pwrk->phy->path); 2423 pmcs_pwork(pwp, pwrk); 2424 mutex_enter(&pwp->cq_lock); 2425 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2426 PMCS_CQ_RUN_LOCKED(pwp); 2427 mutex_exit(&pwp->cq_lock); 2428 return (PMCS_WQ_RUN_SUCCESS); 2429 } 2430 2431 /* 2432 * If the command decodes as not moving any data, complete it here. 2433 */ 2434 amt = nblk; 2435 amt <<= 9; 2436 amt = pmcs_set_resid(pkt, amt, nblk << 9); 2437 if (amt == 0) { 2438 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2439 pwrk->phy->path); 2440 pmcs_pwork(pwp, pwrk); 2441 mutex_enter(&pwp->cq_lock); 2442 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2443 PMCS_CQ_RUN_LOCKED(pwp); 2444 mutex_exit(&pwp->cq_lock); 2445 return (PMCS_WQ_RUN_SUCCESS); 2446 } 2447 2448 /* 2449 * Get an inbound queue entry for this I/O 2450 */ 2451 GET_IO_IQ_ENTRY(pwp, ptr, xp->phy->device_id, iq); 2452 if (ptr == NULL) { 2453 /* 2454 * This is a temporary failure not likely to unblocked by 2455 * commands completing as the test for scheduling the 2456 * restart of work is a per-device test. 2457 */ 2458 mutex_enter(&xp->wqlock); 2459 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2460 mutex_exit(&xp->wqlock); 2461 pmcs_dma_unload(pwp, sp); 2462 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 2463 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 2464 "%s: Failed to get IO IQ entry for tgt %d", 2465 __func__, xp->target_num); 2466 return (PMCS_WQ_RUN_FAIL_RES); 2467 } 2468 2469 /* 2470 * Get a tag. At this point, hold statlock until the tagmap is 2471 * updated (just prior to sending the cmd to the hardware). 2472 */ 2473 mutex_enter(&xp->statlock); 2474 for (tag = 0; tag < xp->qdepth; tag++) { 2475 if ((xp->tagmap & (1 << tag)) == 0) { 2476 break; 2477 } 2478 } 2479 2480 if (tag == xp->qdepth) { 2481 mutex_exit(&xp->statlock); 2482 mutex_exit(&pwp->iqp_lock[iq]); 2483 mutex_enter(&xp->wqlock); 2484 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2485 mutex_exit(&xp->wqlock); 2486 return (PMCS_WQ_RUN_FAIL_OTHER); 2487 } 2488 2489 sp->cmd_satltag = (uint8_t)tag; 2490 2491 /* 2492 * Set up the command 2493 */ 2494 bzero(fis, sizeof (fis)); 2495 ptr[0] = 2496 LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SATA_HOST_IO_START)); 2497 ptr[1] = LE_32(pwrk->htag); 2498 ptr[2] = LE_32(pwrk->phy->device_id); 2499 ptr[3] = LE_32(amt); 2500 2501 if (xp->ncq) { 2502 mtype = SATA_PROTOCOL_FPDMA | (tag << 16); 2503 fis[0] = ((nblk & 0xff) << 24) | (C_BIT << 8) | FIS_REG_H2DEV; 2504 if (cdb_base == SCMD_READ) { 2505 fis[0] |= (READ_FPDMA_QUEUED << 16); 2506 } else { 2507 fis[0] |= (WRITE_FPDMA_QUEUED << 16); 2508 } 2509 fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff); 2510 fis[2] = ((nblk & 0xff00) << 16) | ((lba >> 24) & 0xffffff); 2511 fis[3] = tag << 3; 2512 } else { 2513 int op; 2514 fis[0] = (C_BIT << 8) | FIS_REG_H2DEV; 2515 if (xp->pio) { 2516 mtype = SATA_PROTOCOL_PIO; 2517 if (cdb_base == SCMD_READ) { 2518 op = READ_SECTORS_EXT; 2519 } else { 2520 op = WRITE_SECTORS_EXT; 2521 } 2522 } else { 2523 mtype = SATA_PROTOCOL_DMA; 2524 if (cdb_base == SCMD_READ) { 2525 op = READ_DMA_EXT; 2526 } else { 2527 op = WRITE_DMA_EXT; 2528 } 2529 } 2530 fis[0] |= (op << 16); 2531 fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff); 2532 fis[2] = (lba >> 24) & 0xffffff; 2533 fis[3] = nblk; 2534 } 2535 2536 if (cdb_base == SCMD_READ) { 2537 ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_INI); 2538 } else { 2539 ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_DEV); 2540 } 2541 #ifdef DEBUG 2542 /* 2543 * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED 2544 * event when this goes out on the wire. 2545 */ 2546 ptr[4] |= PMCIN_MESSAGE_REPORT; 2547 #endif 2548 for (i = 0; i < (sizeof (fis_t))/(sizeof (uint32_t)); i++) { 2549 ptr[i+5] = LE_32(fis[i]); 2550 } 2551 if (pmcs_dma_load(pwp, sp, ptr)) { 2552 mutex_exit(&xp->statlock); 2553 mutex_exit(&pwp->iqp_lock[iq]); 2554 mutex_enter(&xp->wqlock); 2555 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2556 mutex_exit(&xp->wqlock); 2557 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 2558 "%s: Failed to dma_load for tgt %d", 2559 __func__, xp->target_num); 2560 return (PMCS_WQ_RUN_FAIL_RES); 2561 2562 } 2563 2564 pwrk->state = PMCS_WORK_STATE_ONCHIP; 2565 mutex_exit(&pwrk->lock); 2566 xp->tagmap |= (1 << tag); 2567 xp->actv_cnt++; 2568 if (xp->actv_cnt > xp->maxdepth) { 2569 xp->maxdepth = xp->actv_cnt; 2570 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pwrk->phy, xp, 2571 "%s: max depth now %u", pwrk->phy->path, xp->maxdepth); 2572 } 2573 mutex_exit(&xp->statlock); 2574 mutex_enter(&xp->aqlock); 2575 STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next); 2576 mutex_exit(&xp->aqlock); 2577 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 2578 "%s: giving pkt %p to hardware", __func__, (void *)pkt); 2579 #ifdef DEBUG 2580 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SATA INI Message", ptr); 2581 #endif 2582 INC_IQ_ENTRY(pwp, iq); 2583 2584 return (PMCS_WQ_RUN_SUCCESS); 2585 } 2586 2587 /* 2588 * Complete a SATA command. Called with pwrk lock held. 2589 */ 2590 void 2591 pmcs_SATA_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg) 2592 { 2593 pmcs_cmd_t *sp = pwrk->arg; 2594 struct scsi_pkt *pkt = CMD2PKT(sp); 2595 pmcs_phy_t *pptr = pwrk->phy; 2596 int dead; 2597 uint32_t sts; 2598 pmcs_xscsi_t *xp; 2599 boolean_t aborted = B_FALSE; 2600 2601 xp = pwrk->xp; 2602 ASSERT(xp != NULL); 2603 2604 DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int, 2605 (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start, 2606 hrtime_t, gethrtime()); 2607 2608 dead = pwrk->dead; 2609 2610 if (msg) { 2611 sts = LE_32(msg[2]); 2612 } else { 2613 sts = 0; 2614 } 2615 2616 if (dead != 0) { 2617 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: dead cmd tag " 2618 "0x%x for %s", __func__, pwrk->htag, pptr->path); 2619 goto out; 2620 } 2621 if ((pwrk->state == PMCS_WORK_STATE_TIMED_OUT) && 2622 (sts != PMCOUT_STATUS_ABORTED)) { 2623 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2624 "%s: cmd 0x%p (tag 0x%x) timed out for %s", 2625 __func__, (void *)sp, pwrk->htag, pptr->path); 2626 CMD2PKT(sp)->pkt_scbp[0] = STATUS_GOOD; 2627 /* pkt_reason already set to CMD_TIMEOUT */ 2628 ASSERT(CMD2PKT(sp)->pkt_reason == CMD_TIMEOUT); 2629 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 2630 STATE_SENT_CMD; 2631 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 2632 goto out; 2633 } 2634 2635 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, "%s: pkt %p tgt %u done", 2636 __func__, (void *)pkt, xp->target_num); 2637 2638 /* 2639 * If the status isn't okay but not underflow, 2640 * step to the side and parse the (possible) error. 2641 */ 2642 #ifdef DEBUG 2643 if (msg) { 2644 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg); 2645 } 2646 #endif 2647 if (!msg) { 2648 goto out; 2649 } 2650 2651 /* 2652 * If the status isn't okay or we got a FIS response of some kind, 2653 * step to the side and parse the (possible) error. 2654 */ 2655 if ((sts != PMCOUT_STATUS_OK) || (LE_32(msg[3]) != 0)) { 2656 if (sts == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) { 2657 mutex_exit(&pwrk->lock); 2658 pmcs_lock_phy(pptr); 2659 mutex_enter(&xp->statlock); 2660 if ((xp->resetting == 0) && (xp->reset_success != 0) && 2661 (xp->reset_wait == 0)) { 2662 mutex_exit(&xp->statlock); 2663 if (pmcs_reset_phy(pwp, pptr, 2664 PMCS_PHYOP_LINK_RESET) != 0) { 2665 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2666 "%s: PHY (%s) Local Control/Link " 2667 "Reset FAILED as part of error " 2668 "recovery", __func__, pptr->path); 2669 } 2670 mutex_enter(&xp->statlock); 2671 } 2672 mutex_exit(&xp->statlock); 2673 pmcs_unlock_phy(pptr); 2674 mutex_enter(&pwrk->lock); 2675 } 2676 pmcs_ioerror(pwp, SATA, pwrk, msg, sts); 2677 } else { 2678 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2679 pwrk->phy->path); 2680 pkt->pkt_state |= STATE_XFERRED_DATA; 2681 pkt->pkt_resid = 0; 2682 } 2683 2684 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2685 "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x", 2686 __func__, (void *)pkt, xp->target_num, pkt->pkt_reason, 2687 pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]); 2688 2689 if (pwrk->state == PMCS_WORK_STATE_ABORTED) { 2690 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2691 "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p", 2692 __func__, (void *)pkt, pptr->path, (void *)pwrk); 2693 aborted = B_TRUE; 2694 } 2695 2696 out: 2697 pmcs_dma_unload(pwp, sp); 2698 mutex_enter(&xp->statlock); 2699 xp->tagmap &= ~(1 << sp->cmd_satltag); 2700 2701 /* 2702 * If the device no longer has a PHY pointer, clear the PHY pointer 2703 * from the work structure before we free it. Otherwise, pmcs_pwork 2704 * may decrement the ref_count on a PHY that's been freed. 2705 */ 2706 if (xp->phy == NULL) { 2707 pwrk->phy = NULL; 2708 } 2709 2710 /* 2711 * We may arrive here due to a command timing out, which in turn 2712 * could be addressed in a different context. So, free the work 2713 * back, but only after confirming it's not already been freed 2714 * elsewhere. 2715 */ 2716 if (pwrk->htag != PMCS_TAG_FREE) { 2717 pmcs_pwork(pwp, pwrk); 2718 } 2719 2720 if (xp->dev_gone) { 2721 mutex_exit(&xp->statlock); 2722 if (!dead) { 2723 mutex_enter(&xp->aqlock); 2724 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2725 mutex_exit(&xp->aqlock); 2726 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2727 "%s: Removing cmd 0x%p (htag 0x%x) from aq", 2728 __func__, (void *)sp, sp->cmd_tag); 2729 mutex_enter(&pwp->cq_lock); 2730 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2731 PMCS_CQ_RUN_LOCKED(pwp); 2732 mutex_exit(&pwp->cq_lock); 2733 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2734 "%s: Completing command for dead target 0x%p", 2735 __func__, (void *)xp); 2736 } 2737 return; 2738 } 2739 2740 ASSERT(xp->actv_cnt > 0); 2741 if (--(xp->actv_cnt) == 0) { 2742 if (xp->draining) { 2743 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, 2744 "%s: waking up drain waiters", __func__); 2745 cv_signal(&pwp->drain_cv); 2746 } else if (xp->special_needed) { 2747 SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN); 2748 } 2749 } 2750 mutex_exit(&xp->statlock); 2751 2752 /* 2753 * If the status is other than OK, determine if it's something that 2754 * is worth re-attempting enumeration. If so, mark the PHY. 2755 */ 2756 if (sts != PMCOUT_STATUS_OK) { 2757 pmcs_status_disposition(pptr, sts); 2758 } 2759 2760 if (dead == 0) { 2761 #ifdef DEBUG 2762 pmcs_cmd_t *wp; 2763 mutex_enter(&xp->aqlock); 2764 STAILQ_FOREACH(wp, &xp->aq, cmd_next) { 2765 if (wp == sp) { 2766 break; 2767 } 2768 } 2769 ASSERT(wp != NULL); 2770 #else 2771 mutex_enter(&xp->aqlock); 2772 #endif 2773 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2774 if (aborted) { 2775 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2776 "%s: Aborted cmd for tgt 0x%p, signaling waiters", 2777 __func__, (void *)xp); 2778 cv_signal(&xp->abort_cv); 2779 } 2780 mutex_exit(&xp->aqlock); 2781 mutex_enter(&pwp->cq_lock); 2782 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2783 PMCS_CQ_RUN_LOCKED(pwp); 2784 mutex_exit(&pwp->cq_lock); 2785 } 2786 } 2787 2788 static uint8_t 2789 pmcs_SATA_rwparm(uint8_t *cdb, uint32_t *xfr, uint64_t *lba, uint64_t lbamax) 2790 { 2791 uint8_t asc = 0; 2792 switch (cdb[0]) { 2793 case SCMD_READ_G5: 2794 case SCMD_WRITE_G5: 2795 *xfr = 2796 (((uint32_t)cdb[10]) << 24) | 2797 (((uint32_t)cdb[11]) << 16) | 2798 (((uint32_t)cdb[12]) << 8) | 2799 ((uint32_t)cdb[13]); 2800 *lba = 2801 (((uint64_t)cdb[2]) << 56) | 2802 (((uint64_t)cdb[3]) << 48) | 2803 (((uint64_t)cdb[4]) << 40) | 2804 (((uint64_t)cdb[5]) << 32) | 2805 (((uint64_t)cdb[6]) << 24) | 2806 (((uint64_t)cdb[7]) << 16) | 2807 (((uint64_t)cdb[8]) << 8) | 2808 ((uint64_t)cdb[9]); 2809 /* Check for illegal bits */ 2810 if (cdb[15]) { 2811 asc = 0x24; /* invalid field in cdb */ 2812 } 2813 break; 2814 case SCMD_READ_G4: 2815 case SCMD_WRITE_G4: 2816 *xfr = 2817 (((uint32_t)cdb[6]) << 16) | 2818 (((uint32_t)cdb[7]) << 8) | 2819 ((uint32_t)cdb[8]); 2820 *lba = 2821 (((uint32_t)cdb[2]) << 24) | 2822 (((uint32_t)cdb[3]) << 16) | 2823 (((uint32_t)cdb[4]) << 8) | 2824 ((uint32_t)cdb[5]); 2825 /* Check for illegal bits */ 2826 if (cdb[11]) { 2827 asc = 0x24; /* invalid field in cdb */ 2828 } 2829 break; 2830 case SCMD_READ_G1: 2831 case SCMD_WRITE_G1: 2832 *xfr = (((uint32_t)cdb[7]) << 8) | ((uint32_t)cdb[8]); 2833 *lba = 2834 (((uint32_t)cdb[2]) << 24) | 2835 (((uint32_t)cdb[3]) << 16) | 2836 (((uint32_t)cdb[4]) << 8) | 2837 ((uint32_t)cdb[5]); 2838 /* Check for illegal bits */ 2839 if (cdb[9]) { 2840 asc = 0x24; /* invalid field in cdb */ 2841 } 2842 break; 2843 case SCMD_READ: 2844 case SCMD_WRITE: 2845 *xfr = cdb[4]; 2846 if (*xfr == 0) { 2847 *xfr = 256; 2848 } 2849 *lba = 2850 (((uint32_t)cdb[1] & 0x1f) << 16) | 2851 (((uint32_t)cdb[2]) << 8) | 2852 ((uint32_t)cdb[3]); 2853 /* Check for illegal bits */ 2854 if (cdb[5]) { 2855 asc = 0x24; /* invalid field in cdb */ 2856 } 2857 break; 2858 } 2859 2860 if (asc == 0) { 2861 if ((*lba + *xfr) > lbamax) { 2862 asc = 0x21; /* logical block out of range */ 2863 } 2864 } 2865 return (asc); 2866 } 2867 2868 /* 2869 * Called with pwrk lock held. 2870 */ 2871 static void 2872 pmcs_ioerror(pmcs_hw_t *pwp, pmcs_dtype_t t, pmcwork_t *pwrk, uint32_t *w, 2873 uint32_t status) 2874 { 2875 static uint8_t por[] = { 2876 0xf0, 0x0, 0x6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x28 2877 }; 2878 static uint8_t parity[] = { 2879 0xf0, 0x0, 0xb, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x47, 5 2880 }; 2881 const char *msg; 2882 char buf[20]; 2883 pmcs_cmd_t *sp = pwrk->arg; 2884 pmcs_phy_t *phyp = pwrk->phy; 2885 struct scsi_pkt *pkt = CMD2PKT(sp); 2886 uint32_t resid; 2887 2888 ASSERT(w != NULL); 2889 resid = LE_32(w[3]); 2890 2891 msg = pmcs_status_str(status); 2892 if (msg == NULL) { 2893 (void) snprintf(buf, sizeof (buf), "Error 0x%x", status); 2894 msg = buf; 2895 } 2896 2897 if (status != PMCOUT_STATUS_OK) { 2898 pmcs_prt(pwp, PMCS_PRT_DEBUG1, phyp, NULL, 2899 "%s: device %s tag 0x%x status %s @ %llu", __func__, 2900 phyp->path, pwrk->htag, msg, 2901 (unsigned long long)gethrtime()); 2902 } 2903 2904 pkt->pkt_reason = CMD_CMPLT; /* default reason */ 2905 2906 switch (status) { 2907 case PMCOUT_STATUS_OK: 2908 if (t == SATA) { 2909 int i; 2910 fis_t fis; 2911 for (i = 0; i < sizeof (fis) / sizeof (fis[0]); i++) { 2912 fis[i] = LE_32(w[4+i]); 2913 } 2914 if ((fis[0] & 0xff) != FIS_REG_D2H) { 2915 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2916 "unexpected fis code 0x%x", fis[0] & 0xff); 2917 } else { 2918 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2919 "FIS ERROR"); 2920 pmcs_fis_dump(pwp, fis); 2921 } 2922 pkt->pkt_reason = CMD_TRAN_ERR; 2923 break; 2924 } 2925 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path); 2926 break; 2927 2928 case PMCOUT_STATUS_ABORTED: 2929 /* 2930 * Command successfully aborted. 2931 */ 2932 if (phyp->dead) { 2933 pkt->pkt_reason = CMD_DEV_GONE; 2934 pkt->pkt_state = STATE_GOT_BUS; 2935 } else if (pwrk->ssp_event != 0) { 2936 pkt->pkt_reason = CMD_TRAN_ERR; 2937 pkt->pkt_state = STATE_GOT_BUS; 2938 } else if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) { 2939 pkt->pkt_reason = CMD_TIMEOUT; 2940 pkt->pkt_statistics |= STAT_TIMEOUT; 2941 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 2942 STATE_SENT_CMD; 2943 } else { 2944 pkt->pkt_reason = CMD_ABORTED; 2945 pkt->pkt_statistics |= STAT_ABORTED; 2946 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 2947 STATE_SENT_CMD; 2948 } 2949 2950 /* 2951 * PMCS_WORK_STATE_TIMED_OUT doesn't need to be preserved past 2952 * this point, so go ahead and mark it as aborted. 2953 */ 2954 pwrk->state = PMCS_WORK_STATE_ABORTED; 2955 break; 2956 2957 case PMCOUT_STATUS_UNDERFLOW: 2958 /* 2959 * This will only get called for SATA 2960 */ 2961 pkt->pkt_resid = resid; 2962 if (pkt->pkt_dma_len < pkt->pkt_resid) { 2963 (void) pmcs_set_resid(pkt, pkt->pkt_dma_len, resid); 2964 } 2965 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path); 2966 break; 2967 2968 case PMCOUT_STATUS_NO_DEVICE: 2969 case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT: 2970 pkt->pkt_reason = CMD_DEV_GONE; 2971 break; 2972 2973 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION: 2974 /* 2975 * Need to do rediscovery. We probably have 2976 * the wrong device (disk swap), so kill 2977 * this one. 2978 */ 2979 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED: 2980 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION: 2981 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 2982 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR: 2983 /* 2984 * Need to do rediscovery. 2985 */ 2986 if (!phyp->dead) { 2987 mutex_exit(&pwrk->lock); 2988 pmcs_lock_phy(pwrk->phy); 2989 pmcs_kill_changed(pwp, pwrk->phy, 0); 2990 pmcs_unlock_phy(pwrk->phy); 2991 mutex_enter(&pwrk->lock); 2992 pkt->pkt_reason = CMD_INCOMPLETE; 2993 pkt->pkt_state = STATE_GOT_BUS; 2994 } else { 2995 pkt->pkt_reason = CMD_DEV_GONE; 2996 } 2997 break; 2998 2999 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK: 3000 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 3001 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION: 3002 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED: 3003 /* cmd is pending on the target */ 3004 case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH: 3005 case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE: 3006 /* transitory - commands sent while in NCQ failure mode */ 3007 case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE: 3008 /* NCQ failure */ 3009 case PMCOUT_STATUS_IO_PORT_IN_RESET: 3010 case PMCOUT_STATUS_XFER_ERR_BREAK: 3011 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 3012 pkt->pkt_reason = CMD_INCOMPLETE; 3013 pkt->pkt_state = STATE_GOT_BUS; 3014 break; 3015 3016 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 3017 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 3018 "STATUS_BUSY for htag 0x%08x", sp->cmd_tag); 3019 pmcs_latch_status(pwp, sp, STATUS_BUSY, NULL, 0, phyp->path); 3020 break; 3021 3022 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 3023 /* synthesize a RESERVATION CONFLICT */ 3024 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 3025 "%s: Potential affiliation active on 0x%" PRIx64, __func__, 3026 pmcs_barray2wwn(phyp->sas_address)); 3027 pmcs_latch_status(pwp, sp, STATUS_RESERVATION_CONFLICT, NULL, 3028 0, phyp->path); 3029 break; 3030 3031 case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST: 3032 /* synthesize a power-on/reset */ 3033 pmcs_latch_status(pwp, sp, STATUS_CHECK, por, sizeof (por), 3034 phyp->path); 3035 break; 3036 3037 case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE: 3038 case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN: 3039 case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED: 3040 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: 3041 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK: 3042 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK: 3043 /* synthesize a PARITY ERROR */ 3044 pmcs_latch_status(pwp, sp, STATUS_CHECK, parity, 3045 sizeof (parity), phyp->path); 3046 break; 3047 3048 case PMCOUT_STATUS_IO_XFER_ERROR_DMA: 3049 case PMCOUT_STATUS_IO_NOT_VALID: 3050 case PMCOUT_STATUS_PROG_ERROR: 3051 case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED: 3052 case PMCOUT_STATUS_XFER_ERROR_SATA: /* non-NCQ failure */ 3053 default: 3054 pkt->pkt_reason = CMD_TRAN_ERR; 3055 break; 3056 } 3057 } 3058 3059 /* 3060 * Latch up SCSI status 3061 */ 3062 3063 void 3064 pmcs_latch_status(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint8_t status, 3065 uint8_t *snsp, size_t snslen, char *path) 3066 { 3067 static const char c1[] = 3068 "%s: Status Byte 0x%02x for CDB0=0x%02x (%02x %02x %02x) " 3069 "HTAG 0x%x @ %llu"; 3070 static const char c2[] = 3071 "%s: Status Byte 0x%02x for CDB0=0x%02x HTAG 0x%x @ %llu"; 3072 3073 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 3074 STATE_SENT_CMD | STATE_GOT_STATUS; 3075 CMD2PKT(sp)->pkt_scbp[0] = status; 3076 3077 if (status == STATUS_CHECK && snsp && 3078 (size_t)SCSA_STSLEN(sp) >= sizeof (struct scsi_arq_status)) { 3079 struct scsi_arq_status *aqp = 3080 (void *) CMD2PKT(sp)->pkt_scbp; 3081 size_t amt = sizeof (struct scsi_extended_sense); 3082 uint8_t key = scsi_sense_key(snsp); 3083 uint8_t asc = scsi_sense_asc(snsp); 3084 uint8_t ascq = scsi_sense_ascq(snsp); 3085 if (amt > snslen) { 3086 amt = snslen; 3087 } 3088 pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, NULL, NULL, c1, path, 3089 status, CMD2PKT(sp)->pkt_cdbp[0] & 0xff, key, asc, ascq, 3090 sp->cmd_tag, (unsigned long long)gethrtime()); 3091 CMD2PKT(sp)->pkt_state |= STATE_ARQ_DONE; 3092 (*(uint8_t *)&aqp->sts_rqpkt_status) = STATUS_GOOD; 3093 aqp->sts_rqpkt_statistics = 0; 3094 aqp->sts_rqpkt_reason = CMD_CMPLT; 3095 aqp->sts_rqpkt_state = STATE_GOT_BUS | 3096 STATE_GOT_TARGET | STATE_SENT_CMD | 3097 STATE_XFERRED_DATA | STATE_GOT_STATUS; 3098 (void) memcpy(&aqp->sts_sensedata, snsp, amt); 3099 if (aqp->sts_sensedata.es_class != CLASS_EXTENDED_SENSE) { 3100 aqp->sts_rqpkt_reason = CMD_TRAN_ERR; 3101 aqp->sts_rqpkt_state = 0; 3102 aqp->sts_rqpkt_resid = 3103 sizeof (struct scsi_extended_sense); 3104 } else { 3105 aqp->sts_rqpkt_resid = 3106 sizeof (struct scsi_extended_sense) - amt; 3107 } 3108 } else if (status) { 3109 pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, NULL, NULL, c2, 3110 path, status, CMD2PKT(sp)->pkt_cdbp[0] & 0xff, 3111 sp->cmd_tag, (unsigned long long)gethrtime()); 3112 } 3113 3114 CMD2PKT(sp)->pkt_reason = CMD_CMPLT; 3115 } 3116 3117 /* 3118 * Calculate and set packet residual and return the amount 3119 * left over after applying various filters. 3120 */ 3121 size_t 3122 pmcs_set_resid(struct scsi_pkt *pkt, size_t amt, uint32_t cdbamt) 3123 { 3124 pkt->pkt_resid = cdbamt; 3125 if (amt > pkt->pkt_resid) { 3126 amt = pkt->pkt_resid; 3127 } 3128 if (amt > pkt->pkt_dma_len) { 3129 amt = pkt->pkt_dma_len; 3130 } 3131 return (amt); 3132 } 3133 3134 /* 3135 * Return the existing target softstate (unlocked) if there is one. If so, 3136 * the PHY is locked and that lock must be freed by the caller after the 3137 * target/PHY linkage is established. If there isn't one, and alloc_tgt is 3138 * TRUE, then allocate one. 3139 */ 3140 pmcs_xscsi_t * 3141 pmcs_get_target(pmcs_iport_t *iport, char *tgt_port, boolean_t alloc_tgt) 3142 { 3143 pmcs_hw_t *pwp = iport->pwp; 3144 pmcs_phy_t *phyp; 3145 pmcs_xscsi_t *tgt; 3146 uint64_t wwn; 3147 char unit_address[PMCS_MAX_UA_SIZE]; 3148 int ua_form = 1; 3149 3150 /* 3151 * Find the PHY for this target 3152 */ 3153 phyp = pmcs_find_phy_by_sas_address(pwp, iport, NULL, tgt_port); 3154 if (phyp == NULL) { 3155 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 3156 "%s: No PHY for target @ %s", __func__, tgt_port); 3157 return (NULL); 3158 } 3159 3160 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port); 3161 3162 if (tgt) { 3163 mutex_enter(&tgt->statlock); 3164 /* 3165 * There's already a target. Check its PHY pointer to see 3166 * if we need to clear the old linkages 3167 */ 3168 if (tgt->phy && (tgt->phy != phyp)) { 3169 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3170 "%s: Target PHY updated from %p to %p", __func__, 3171 (void *)tgt->phy, (void *)phyp); 3172 if (!IS_ROOT_PHY(tgt->phy)) { 3173 pmcs_dec_phy_ref_count(tgt->phy); 3174 pmcs_inc_phy_ref_count(phyp); 3175 } 3176 tgt->phy->target = NULL; 3177 } 3178 3179 /* 3180 * If this target has no PHY pointer and alloc_tgt is FALSE, 3181 * that implies we expect the target to already exist. This 3182 * implies that there has already been a tran_tgt_init on at 3183 * least one LU. 3184 */ 3185 if ((tgt->phy == NULL) && !alloc_tgt) { 3186 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, tgt, 3187 "%s: Establish linkage from new PHY to old target @" 3188 "%s", __func__, tgt->unit_address); 3189 for (int idx = 0; idx < tgt->ref_count; idx++) { 3190 pmcs_inc_phy_ref_count(phyp); 3191 } 3192 } 3193 3194 tgt->phy = phyp; 3195 phyp->target = tgt; 3196 3197 mutex_exit(&tgt->statlock); 3198 return (tgt); 3199 } 3200 3201 /* 3202 * Make sure the PHY we found is on the correct iport 3203 */ 3204 if (phyp->iport != iport) { 3205 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 3206 "%s: No target at %s on this iport", __func__, tgt_port); 3207 pmcs_unlock_phy(phyp); 3208 return (NULL); 3209 } 3210 3211 /* 3212 * If this was just a lookup (i.e. alloc_tgt is false), return now. 3213 */ 3214 if (alloc_tgt == B_FALSE) { 3215 pmcs_unlock_phy(phyp); 3216 return (NULL); 3217 } 3218 3219 /* 3220 * Allocate the new softstate 3221 */ 3222 wwn = pmcs_barray2wwn(phyp->sas_address); 3223 (void) scsi_wwn_to_wwnstr(wwn, ua_form, unit_address); 3224 3225 if (ddi_soft_state_bystr_zalloc(iport->tgt_sstate, unit_address) != 3226 DDI_SUCCESS) { 3227 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3228 "%s: Couldn't alloc softstate for device at %s", 3229 __func__, unit_address); 3230 pmcs_unlock_phy(phyp); 3231 return (NULL); 3232 } 3233 3234 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, unit_address); 3235 ASSERT(tgt != NULL); 3236 STAILQ_INIT(&tgt->wq); 3237 STAILQ_INIT(&tgt->aq); 3238 STAILQ_INIT(&tgt->sq); 3239 mutex_init(&tgt->statlock, NULL, MUTEX_DRIVER, 3240 DDI_INTR_PRI(pwp->intr_pri)); 3241 mutex_init(&tgt->wqlock, NULL, MUTEX_DRIVER, 3242 DDI_INTR_PRI(pwp->intr_pri)); 3243 mutex_init(&tgt->aqlock, NULL, MUTEX_DRIVER, 3244 DDI_INTR_PRI(pwp->intr_pri)); 3245 cv_init(&tgt->reset_cv, NULL, CV_DRIVER, NULL); 3246 cv_init(&tgt->abort_cv, NULL, CV_DRIVER, NULL); 3247 list_create(&tgt->lun_list, sizeof (pmcs_lun_t), 3248 offsetof(pmcs_lun_t, lun_list_next)); 3249 tgt->qdepth = 1; 3250 tgt->target_num = PMCS_INVALID_TARGET_NUM; 3251 bcopy(unit_address, tgt->unit_address, PMCS_MAX_UA_SIZE); 3252 tgt->pwp = pwp; 3253 tgt->ua = strdup(iport->ua); 3254 tgt->phy = phyp; 3255 ASSERT((phyp->target == NULL) || (phyp->target == tgt)); 3256 if (phyp->target == NULL) { 3257 phyp->target = tgt; 3258 } 3259 3260 /* 3261 * Don't allocate LUN softstate for SMP targets 3262 */ 3263 if (phyp->dtype == EXPANDER) { 3264 return (tgt); 3265 } 3266 3267 if (ddi_soft_state_bystr_init(&tgt->lun_sstate, 3268 sizeof (pmcs_lun_t), PMCS_LUN_SSTATE_SZ) != 0) { 3269 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3270 "%s: LUN soft_state_bystr_init failed", __func__); 3271 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port); 3272 pmcs_unlock_phy(phyp); 3273 return (NULL); 3274 } 3275 3276 return (tgt); 3277 } 3278