1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * SCSI (SCSA) midlayer interface for PMC drier. 26 */ 27 28 #include <sys/scsi/adapters/pmcs/pmcs.h> 29 30 extern scsi_lun_t scsi_lun64_to_lun(scsi_lun64_t lun64); 31 32 static int pmcs_scsa_tran_tgt_init(dev_info_t *, dev_info_t *, 33 scsi_hba_tran_t *, struct scsi_device *); 34 static void pmcs_scsa_tran_tgt_free(dev_info_t *, dev_info_t *, 35 scsi_hba_tran_t *, struct scsi_device *); 36 static int pmcs_scsa_start(struct scsi_address *, struct scsi_pkt *); 37 static int pmcs_scsa_abort(struct scsi_address *, struct scsi_pkt *); 38 static int pmcs_scsa_reset(struct scsi_address *, int); 39 static int pmcs_scsi_reset_notify(struct scsi_address *, int, 40 void (*)(caddr_t), caddr_t); 41 static int pmcs_scsa_getcap(struct scsi_address *, char *, int); 42 static int pmcs_scsa_setcap(struct scsi_address *, char *, int, int); 43 static int pmcs_scsa_setup_pkt(struct scsi_pkt *, int (*)(caddr_t), caddr_t); 44 static void pmcs_scsa_teardown_pkt(struct scsi_pkt *); 45 46 static int pmcs_smp_init(dev_info_t *, dev_info_t *, smp_hba_tran_t *, 47 smp_device_t *); 48 static void pmcs_smp_free(dev_info_t *, dev_info_t *, smp_hba_tran_t *, 49 smp_device_t *); 50 static int pmcs_smp_start(struct smp_pkt *); 51 52 static int pmcs_scsi_quiesce(dev_info_t *); 53 static int pmcs_scsi_unquiesce(dev_info_t *); 54 55 static int pmcs_cap(struct scsi_address *, char *, int, int, int); 56 static pmcs_xscsi_t * 57 pmcs_addr2xp(struct scsi_address *, uint64_t *, pmcs_cmd_t *); 58 static int pmcs_SAS_run(pmcs_cmd_t *, pmcwork_t *); 59 static void pmcs_SAS_done(pmcs_hw_t *, pmcwork_t *, uint32_t *); 60 61 static int pmcs_SATA_run(pmcs_cmd_t *, pmcwork_t *); 62 static void pmcs_SATA_done(pmcs_hw_t *, pmcwork_t *, uint32_t *); 63 static uint8_t pmcs_SATA_rwparm(uint8_t *, uint32_t *, uint64_t *, uint64_t); 64 65 static void pmcs_ioerror(pmcs_hw_t *, pmcs_dtype_t pmcs_dtype, 66 pmcwork_t *, uint32_t *, uint32_t); 67 68 69 int 70 pmcs_scsa_init(pmcs_hw_t *pwp, const ddi_dma_attr_t *ap) 71 { 72 scsi_hba_tran_t *tran; 73 ddi_dma_attr_t pmcs_scsa_dattr; 74 int flags; 75 76 (void) memcpy(&pmcs_scsa_dattr, ap, sizeof (ddi_dma_attr_t)); 77 pmcs_scsa_dattr.dma_attr_sgllen = 78 ((PMCS_SGL_NCHUNKS - 1) * (PMCS_MAX_CHUNKS - 1)) + PMCS_SGL_NCHUNKS; 79 pmcs_scsa_dattr.dma_attr_flags = DDI_DMA_RELAXED_ORDERING; 80 pmcs_scsa_dattr.dma_attr_flags |= DDI_DMA_FLAGERR; 81 82 /* 83 * Allocate a transport structure 84 */ 85 tran = scsi_hba_tran_alloc(pwp->dip, SCSI_HBA_CANSLEEP); 86 if (tran == NULL) { 87 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 88 "scsi_hba_tran_alloc failed"); 89 return (DDI_FAILURE); 90 } 91 92 tran->tran_hba_private = pwp; 93 tran->tran_tgt_init = pmcs_scsa_tran_tgt_init; 94 tran->tran_tgt_free = pmcs_scsa_tran_tgt_free; 95 tran->tran_start = pmcs_scsa_start; 96 tran->tran_abort = pmcs_scsa_abort; 97 tran->tran_reset = pmcs_scsa_reset; 98 tran->tran_reset_notify = pmcs_scsi_reset_notify; 99 tran->tran_getcap = pmcs_scsa_getcap; 100 tran->tran_setcap = pmcs_scsa_setcap; 101 tran->tran_setup_pkt = pmcs_scsa_setup_pkt; 102 tran->tran_teardown_pkt = pmcs_scsa_teardown_pkt; 103 tran->tran_quiesce = pmcs_scsi_quiesce; 104 tran->tran_unquiesce = pmcs_scsi_unquiesce; 105 tran->tran_interconnect_type = INTERCONNECT_SAS; 106 tran->tran_hba_len = sizeof (pmcs_cmd_t); 107 108 /* 109 * Attach this instance of the hba 110 */ 111 112 flags = SCSI_HBA_TRAN_SCB | SCSI_HBA_TRAN_CDB | SCSI_HBA_ADDR_COMPLEX | 113 SCSI_HBA_TRAN_PHCI | SCSI_HBA_HBA; 114 115 if (scsi_hba_attach_setup(pwp->dip, &pmcs_scsa_dattr, tran, flags)) { 116 scsi_hba_tran_free(tran); 117 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 118 "scsi_hba_attach failed"); 119 return (DDI_FAILURE); 120 } 121 pwp->tran = tran; 122 123 /* 124 * Attach the SMP part of this hba 125 */ 126 pwp->smp_tran = smp_hba_tran_alloc(pwp->dip); 127 ASSERT(pwp->smp_tran != NULL); 128 pwp->smp_tran->smp_tran_hba_private = pwp; 129 pwp->smp_tran->smp_tran_init = pmcs_smp_init; 130 pwp->smp_tran->smp_tran_free = pmcs_smp_free; 131 pwp->smp_tran->smp_tran_start = pmcs_smp_start; 132 133 if (smp_hba_attach_setup(pwp->dip, pwp->smp_tran) != DDI_SUCCESS) { 134 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 135 "smp_hba_attach failed"); 136 smp_hba_tran_free(pwp->smp_tran); 137 pwp->smp_tran = NULL; 138 scsi_hba_tran_free(tran); 139 return (DDI_FAILURE); 140 } 141 142 return (DDI_SUCCESS); 143 } 144 145 /* 146 * SCSA entry points 147 */ 148 149 static int 150 pmcs_scsa_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 151 scsi_hba_tran_t *tran, struct scsi_device *sd) 152 { 153 pmcs_hw_t *pwp = NULL; 154 int rval; 155 char *variant_prop = "sata"; 156 char *tgt_port = NULL, *ua = NULL; 157 pmcs_xscsi_t *tgt = NULL; 158 pmcs_iport_t *iport; 159 pmcs_lun_t *lun = NULL; 160 pmcs_phy_t *phyp = NULL; 161 uint64_t lun_num; 162 boolean_t got_scratch = B_FALSE; 163 164 /* 165 * First, make sure we're an iport and get the pointer to the HBA 166 * node's softstate 167 */ 168 if (scsi_hba_iport_unit_address(hba_dip) == NULL) { 169 pmcs_prt(TRAN2PMC(tran), PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 170 "%s: We don't enumerate devices on the HBA node", __func__); 171 goto tgt_init_fail; 172 } 173 174 pwp = ITRAN2PMC(tran); 175 iport = ITRAN2IPORT(tran); 176 177 /* 178 * Get the unit-address 179 */ 180 ua = scsi_device_unit_address(sd); 181 if (ua == NULL) { 182 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 183 "%s: Couldn't get UA", __func__); 184 pwp = NULL; 185 goto tgt_init_fail; 186 } 187 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 188 "got ua '%s'", ua); 189 190 /* 191 * Get the target address 192 */ 193 rval = scsi_device_prop_lookup_string(sd, SCSI_DEVICE_PROP_PATH, 194 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port); 195 if (rval != DDI_PROP_SUCCESS) { 196 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 197 "Couldn't get target UA"); 198 pwp = NULL; 199 goto tgt_init_fail; 200 } 201 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 202 "got tgt_port '%s'", tgt_port); 203 204 /* 205 * Validate that this tran_tgt_init is for an active iport. 206 */ 207 if (iport->ua_state == UA_INACTIVE) { 208 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 209 "%s: Got tran_tgt_init on inactive iport for '%s'", 210 __func__, tgt_port); 211 pwp = NULL; 212 goto tgt_init_fail; 213 } 214 215 /* 216 * Since we're going to wait for scratch, be sure to acquire it while 217 * we're not holding any other locks 218 */ 219 (void) pmcs_acquire_scratch(pwp, B_TRUE); 220 got_scratch = B_TRUE; 221 222 mutex_enter(&pwp->lock); 223 224 /* 225 * See if there's already a target softstate. If not, allocate one. 226 */ 227 tgt = pmcs_get_target(iport, tgt_port, B_TRUE); 228 229 if (tgt == NULL) { 230 goto tgt_init_fail; 231 } 232 233 phyp = tgt->phy; 234 if (!IS_ROOT_PHY(phyp)) { 235 pmcs_inc_phy_ref_count(phyp); 236 } 237 ASSERT(mutex_owned(&phyp->phy_lock)); 238 239 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, "@%s tgt = 0x%p, dip = 0x%p", 240 ua, (void *)tgt, (void *)tgt_dip); 241 242 /* Now get the lun */ 243 lun_num = scsi_device_prop_get_int64(sd, SCSI_DEVICE_PROP_PATH, 244 SCSI_ADDR_PROP_LUN64, SCSI_LUN64_ILLEGAL); 245 if (lun_num == SCSI_LUN64_ILLEGAL) { 246 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 247 "No LUN for tgt %p", (void *)tgt); 248 goto tgt_init_fail; 249 } 250 251 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, "%s: @%s tgt 0x%p phy " 252 "0x%p (%s)", __func__, ua, (void *)tgt, (void *)phyp, phyp->path); 253 254 mutex_enter(&tgt->statlock); 255 tgt->dtype = phyp->dtype; 256 if (tgt->dtype != SAS && tgt->dtype != SATA) { 257 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 258 "PHY 0x%p went away?", (void *)phyp); 259 goto tgt_init_fail; 260 } 261 262 /* We don't support SATA devices at LUN > 0. */ 263 if ((tgt->dtype == SATA) && (lun_num > 0)) { 264 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 265 "%s: No support for SATA devices at LUN > 0 " 266 "(target = 0x%p)", __func__, (void *)tgt); 267 goto tgt_init_fail; 268 } 269 270 /* 271 * Allocate LU soft state. We use ddi_soft_state_bystr_zalloc instead 272 * of kmem_alloc because ddi_soft_state_bystr_zalloc allows us to 273 * verify that the framework never tries to initialize two scsi_device 274 * structures with the same unit-address at the same time. 275 */ 276 if (ddi_soft_state_bystr_zalloc(tgt->lun_sstate, ua) != DDI_SUCCESS) { 277 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, 278 "Couldn't allocate LU soft state"); 279 goto tgt_init_fail; 280 } 281 282 lun = ddi_soft_state_bystr_get(tgt->lun_sstate, ua); 283 if (lun == NULL) { 284 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, 285 "Couldn't get LU soft state"); 286 goto tgt_init_fail; 287 } 288 scsi_device_hba_private_set(sd, lun); 289 lun->lun_num = lun_num; 290 291 /* convert the scsi_lun64_t value to SCSI standard form */ 292 lun->scsi_lun = scsi_lun64_to_lun(lun_num); 293 294 ASSERT(strlen(ua) < (PMCS_MAX_UA_SIZE - 1)); 295 bcopy(ua, lun->unit_address, strnlen(ua, PMCS_MAX_UA_SIZE - 1)); 296 297 lun->target = tgt; 298 299 /* 300 * If this is the first tran_tgt_init, add this target to our list 301 */ 302 if (tgt->target_num == PMCS_INVALID_TARGET_NUM) { 303 int target; 304 for (target = 0; target < pwp->max_dev; target++) { 305 if (pwp->targets[target] != NULL) { 306 continue; 307 } 308 309 pwp->targets[target] = tgt; 310 tgt->target_num = (uint16_t)target; 311 break; 312 } 313 314 if (target == pwp->max_dev) { 315 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 316 "Target list full."); 317 goto tgt_init_fail; 318 } 319 } 320 321 tgt->dip = sd->sd_dev; 322 lun->sd = sd; 323 list_insert_tail(&tgt->lun_list, lun); 324 325 if (!pmcs_assign_device(pwp, tgt)) { 326 pmcs_release_scratch(pwp); 327 pwp->targets[tgt->target_num] = NULL; 328 tgt->target_num = PMCS_INVALID_TARGET_NUM; 329 tgt->phy = NULL; 330 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 331 "%s: pmcs_assign_device failed for target 0x%p", 332 __func__, (void *)tgt); 333 goto tgt_init_fail; 334 } 335 336 pmcs_release_scratch(pwp); 337 tgt->ref_count++; 338 339 (void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH, 340 SCSI_ADDR_PROP_TARGET, (uint32_t)(tgt->target_num)); 341 342 /* SM-HBA */ 343 if (tgt->dtype == SATA) { 344 /* TCR in PSARC/1997/281 opinion */ 345 (void) scsi_device_prop_update_string(sd, 346 SCSI_DEVICE_PROP_PATH, "variant", variant_prop); 347 } 348 349 tgt->phy_addressable = PMCS_PHY_ADDRESSABLE(phyp); 350 351 if (tgt->phy_addressable) { 352 (void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH, 353 SCSI_ADDR_PROP_SATA_PHY, phyp->phynum); 354 } 355 356 /* SM-HBA */ 357 (void) pmcs_smhba_set_scsi_device_props(pwp, phyp, sd); 358 /* 359 * Make sure attached port and target port pm props are updated 360 * By passing in 0s, we're not actually updating any values, but 361 * the properties should now get updated on the node. 362 */ 363 364 mutex_exit(&tgt->statlock); 365 pmcs_update_phy_pm_props(phyp, 0, 0, B_TRUE); 366 pmcs_unlock_phy(phyp); 367 mutex_exit(&pwp->lock); 368 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port); 369 return (DDI_SUCCESS); 370 371 tgt_init_fail: 372 scsi_device_hba_private_set(sd, NULL); 373 if (got_scratch) { 374 pmcs_release_scratch(pwp); 375 } 376 if (lun) { 377 list_remove(&tgt->lun_list, lun); 378 ddi_soft_state_bystr_free(tgt->lun_sstate, ua); 379 } 380 if (phyp) { 381 mutex_exit(&tgt->statlock); 382 pmcs_unlock_phy(phyp); 383 /* 384 * phyp's ref count was incremented in pmcs_new_tport. 385 * We're failing configuration, we now need to decrement it. 386 */ 387 if (!IS_ROOT_PHY(phyp)) { 388 pmcs_dec_phy_ref_count(phyp); 389 } 390 phyp->target = NULL; 391 } 392 if (tgt && tgt->ref_count == 0) { 393 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port); 394 } 395 if (pwp) { 396 mutex_exit(&pwp->lock); 397 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 398 "%s: failed for @%s tgt 0x%p phy 0x%p", __func__, ua, 399 (void *)tgt, (void *)phyp); 400 } 401 if (tgt_port) { 402 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port); 403 } 404 return (DDI_FAILURE); 405 } 406 407 static void 408 pmcs_scsa_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 409 scsi_hba_tran_t *tran, struct scsi_device *sd) 410 { 411 _NOTE(ARGUNUSED(hba_dip, tgt_dip)); 412 pmcs_hw_t *pwp; 413 pmcs_lun_t *lun; 414 pmcs_xscsi_t *target; 415 char *unit_address; 416 pmcs_phy_t *phyp; 417 418 if (scsi_hba_iport_unit_address(hba_dip) == NULL) { 419 pwp = TRAN2PMC(tran); 420 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 421 "%s: We don't enumerate devices on the HBA node", __func__); 422 return; 423 } 424 425 lun = (pmcs_lun_t *)scsi_device_hba_private_get(sd); 426 427 ASSERT((lun != NULL) && (lun->target != NULL)); 428 ASSERT(lun->target->ref_count > 0); 429 430 target = lun->target; 431 unit_address = lun->unit_address; 432 list_remove(&target->lun_list, lun); 433 434 pwp = ITRAN2PMC(tran); 435 mutex_enter(&pwp->lock); 436 phyp = target->phy; 437 if (phyp) { 438 mutex_enter(&phyp->phy_lock); 439 } 440 mutex_enter(&target->statlock); 441 442 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, 443 "%s: for @%s tgt 0x%p phy 0x%p", __func__, unit_address, 444 (void *)target, (void *)phyp); 445 ddi_soft_state_bystr_free(lun->target->lun_sstate, unit_address); 446 447 if (target->recover_wait) { 448 mutex_exit(&target->statlock); 449 if (phyp) { 450 mutex_exit(&phyp->phy_lock); 451 } 452 mutex_exit(&pwp->lock); 453 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, "%s: " 454 "Target 0x%p in device state recovery, fail tran_tgt_free", 455 __func__, (void *)target); 456 return; 457 } 458 459 /* 460 * If this target still has a PHY pointer and that PHY's target pointer 461 * has been cleared, then that PHY has been reaped. In that case, there 462 * would be no need to decrement the reference count 463 */ 464 if (phyp && !IS_ROOT_PHY(phyp) && phyp->target) { 465 pmcs_dec_phy_ref_count(phyp); 466 } 467 468 if (--target->ref_count == 0) { 469 /* 470 * Remove this target from our list. The target soft 471 * state will remain, and the device will remain registered 472 * with the hardware unless/until we're told the device 473 * physically went away. 474 */ 475 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, 476 "%s: Free target 0x%p (vtgt %d)", __func__, (void *)target, 477 target->target_num); 478 pwp->targets[target->target_num] = NULL; 479 target->target_num = PMCS_INVALID_TARGET_NUM; 480 /* 481 * If the target still has a PHY pointer, break the linkage 482 */ 483 if (phyp) { 484 phyp->target = NULL; 485 } 486 target->phy = NULL; 487 pmcs_destroy_target(target); 488 } else { 489 mutex_exit(&target->statlock); 490 } 491 492 if (phyp) { 493 mutex_exit(&phyp->phy_lock); 494 } 495 mutex_exit(&pwp->lock); 496 } 497 498 static int 499 pmcs_scsa_start(struct scsi_address *ap, struct scsi_pkt *pkt) 500 { 501 pmcs_cmd_t *sp = PKT2CMD(pkt); 502 pmcs_hw_t *pwp = ADDR2PMC(ap); 503 pmcs_xscsi_t *xp; 504 boolean_t blocked; 505 uint32_t hba_state; 506 507 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 508 "%s: pkt %p sd %p cdb0=0x%02x dl=%lu", __func__, (void *)pkt, 509 (void *)scsi_address_device(&pkt->pkt_address), 510 pkt->pkt_cdbp[0] & 0xff, pkt->pkt_dma_len); 511 512 if (pkt->pkt_flags & FLAG_NOINTR) { 513 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 514 "%s: nointr pkt", __func__); 515 return (TRAN_BADPKT); 516 } 517 518 sp->cmd_tag = 0; 519 pkt->pkt_state = pkt->pkt_statistics = 0; 520 pkt->pkt_reason = CMD_INCOMPLETE; 521 522 mutex_enter(&pwp->lock); 523 hba_state = pwp->state; 524 blocked = pwp->blocked; 525 mutex_exit(&pwp->lock); 526 527 if (hba_state != STATE_RUNNING) { 528 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 529 "%s: hba dead", __func__); 530 return (TRAN_FATAL_ERROR); 531 } 532 533 xp = pmcs_addr2xp(ap, NULL, sp); 534 if (xp == NULL) { 535 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 536 "%s: dropping due to null target", __func__); 537 goto dead_target; 538 } 539 ASSERT(mutex_owned(&xp->statlock)); 540 541 /* 542 * First, check to see if the device is gone. 543 */ 544 if (xp->dev_gone) { 545 xp->actv_pkts++; 546 mutex_exit(&xp->statlock); 547 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, xp, 548 "%s: dropping due to dead target 0x%p", 549 __func__, (void *)xp); 550 goto dead_target; 551 } 552 553 /* 554 * If we're blocked (quiesced) just return. 555 */ 556 if (blocked) { 557 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 558 "%s: hba blocked", __func__); 559 xp->actv_pkts++; 560 mutex_exit(&xp->statlock); 561 mutex_enter(&xp->wqlock); 562 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 563 mutex_exit(&xp->wqlock); 564 return (TRAN_ACCEPT); 565 } 566 567 /* 568 * If we're draining or resetting, queue and return. 569 */ 570 if (xp->draining || xp->resetting || xp->recover_wait) { 571 xp->actv_pkts++; 572 mutex_exit(&xp->statlock); 573 mutex_enter(&xp->wqlock); 574 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 575 mutex_exit(&xp->wqlock); 576 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, xp, 577 "%s: draining/resetting/recovering (cnt %u)", 578 __func__, xp->actv_cnt); 579 /* 580 * By the time we get here, draining or 581 * resetting may have come and gone, not 582 * yet noticing that we had put something 583 * on the wait queue, so schedule a worker 584 * to look at this later. 585 */ 586 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 587 return (TRAN_ACCEPT); 588 } 589 590 xp->actv_pkts++; 591 mutex_exit(&xp->statlock); 592 593 /* 594 * Queue this command to the tail of the wait queue. 595 * This keeps us getting commands out of order. 596 */ 597 mutex_enter(&xp->wqlock); 598 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 599 mutex_exit(&xp->wqlock); 600 601 /* 602 * Now run the queue for this device. 603 */ 604 (void) pmcs_scsa_wq_run_one(pwp, xp); 605 606 return (TRAN_ACCEPT); 607 608 dead_target: 609 pkt->pkt_state = STATE_GOT_BUS; 610 pkt->pkt_reason = CMD_DEV_GONE; 611 mutex_enter(&pwp->cq_lock); 612 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 613 PMCS_CQ_RUN_LOCKED(pwp); 614 mutex_exit(&pwp->cq_lock); 615 return (TRAN_ACCEPT); 616 } 617 618 /* Return code 1 = Success */ 619 static int 620 pmcs_scsa_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 621 { 622 pmcs_hw_t *pwp = ADDR2PMC(ap); 623 pmcs_cmd_t *sp = NULL; 624 pmcs_xscsi_t *xp = NULL; 625 pmcs_phy_t *pptr = NULL; 626 pmcs_lun_t *pmcs_lun = (pmcs_lun_t *) 627 scsi_device_hba_private_get(scsi_address_device(ap)); 628 uint32_t tag; 629 uint64_t lun; 630 pmcwork_t *pwrk; 631 632 mutex_enter(&pwp->lock); 633 if (pwp->state != STATE_RUNNING) { 634 mutex_exit(&pwp->lock); 635 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 636 "%s: hba dead", __func__); 637 return (0); 638 } 639 mutex_exit(&pwp->lock); 640 641 if (pkt == NULL) { 642 if (pmcs_lun == NULL) { 643 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: " 644 "No pmcs_lun_t struct to do ABORT_ALL", __func__); 645 return (0); 646 } 647 xp = pmcs_lun->target; 648 if (xp != NULL) { 649 pptr = xp->phy; 650 } 651 if (pptr == NULL) { 652 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, "%s: pkt is " 653 "NULL. No tgt/phy to do ABORT_ALL", __func__); 654 return (0); 655 } 656 pmcs_lock_phy(pptr); 657 if (pmcs_abort(pwp, pptr, 0, 1, 0)) { 658 pptr->abort_pending = 1; 659 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 660 } 661 pmcs_unlock_phy(pptr); 662 return (1); 663 } 664 665 sp = PKT2CMD(pkt); 666 xp = sp->cmd_target; 667 668 if (sp->cmd_lun) { 669 lun = sp->cmd_lun->lun_num; 670 } else { 671 lun = 0; 672 } 673 if (xp == NULL) { 674 return (0); 675 } 676 677 /* 678 * See if we have a real work structure associated with this cmd. 679 */ 680 pwrk = pmcs_tag2wp(pwp, sp->cmd_tag, B_FALSE); 681 if (pwrk && pwrk->arg == sp) { 682 tag = pwrk->htag; 683 pptr = pwrk->phy; 684 pwrk->timer = 0; /* we don't time this here */ 685 ASSERT(pwrk->state == PMCS_WORK_STATE_ONCHIP); 686 mutex_exit(&pwrk->lock); 687 pmcs_lock_phy(pptr); 688 if (pptr->dtype == SAS) { 689 if (pmcs_ssp_tmf(pwp, pptr, SAS_ABORT_TASK, tag, lun, 690 NULL)) { 691 pptr->abort_pending = 1; 692 pmcs_unlock_phy(pptr); 693 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 694 return (0); 695 } 696 } else { 697 /* 698 * XXX: Was the command that was active an 699 * NCQ I/O command? 700 */ 701 pptr->need_rl_ext = 1; 702 if (pmcs_sata_abort_ncq(pwp, pptr)) { 703 pptr->abort_pending = 1; 704 pmcs_unlock_phy(pptr); 705 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 706 return (0); 707 } 708 } 709 pptr->abort_pending = 1; 710 pmcs_unlock_phy(pptr); 711 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 712 return (1); 713 } 714 if (pwrk) { 715 mutex_exit(&pwrk->lock); 716 } 717 /* 718 * Okay, those weren't the droids we were looking for. 719 * See if the command is on any of the wait queues. 720 */ 721 mutex_enter(&xp->wqlock); 722 sp = NULL; 723 STAILQ_FOREACH(sp, &xp->wq, cmd_next) { 724 if (sp == PKT2CMD(pkt)) { 725 STAILQ_REMOVE(&xp->wq, sp, pmcs_cmd, cmd_next); 726 break; 727 } 728 } 729 mutex_exit(&xp->wqlock); 730 if (sp) { 731 pkt->pkt_reason = CMD_ABORTED; 732 pkt->pkt_statistics |= STAT_ABORTED; 733 mutex_enter(&pwp->cq_lock); 734 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 735 PMCS_CQ_RUN_LOCKED(pwp); 736 mutex_exit(&pwp->cq_lock); 737 return (1); 738 } 739 return (0); 740 } 741 742 /* 743 * SCSA reset functions 744 */ 745 static int 746 pmcs_scsa_reset(struct scsi_address *ap, int level) 747 { 748 pmcs_hw_t *pwp = ADDR2PMC(ap); 749 pmcs_phy_t *pptr; 750 pmcs_xscsi_t *xp; 751 uint64_t lun = (uint64_t)-1, *lp = NULL; 752 int rval; 753 754 mutex_enter(&pwp->lock); 755 if (pwp->state != STATE_RUNNING) { 756 mutex_exit(&pwp->lock); 757 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 758 "%s: hba dead", __func__); 759 return (0); 760 } 761 mutex_exit(&pwp->lock); 762 763 switch (level) { 764 case RESET_ALL: 765 rval = 0; 766 break; 767 case RESET_LUN: 768 /* 769 * Point lp at lun so that pmcs_addr2xp 770 * will fill out the 64 bit lun number. 771 */ 772 lp = &lun; 773 /* FALLTHROUGH */ 774 case RESET_TARGET: 775 xp = pmcs_addr2xp(ap, lp, NULL); 776 if (xp == NULL) { 777 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 778 "%s: no xp found for this scsi address", __func__); 779 return (0); 780 } 781 782 if (xp->dev_gone) { 783 mutex_exit(&xp->statlock); 784 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 785 "%s: Target 0x%p has gone away", __func__, 786 (void *)xp); 787 return (0); 788 } 789 790 /* 791 * If we're already performing this action, or if device 792 * state recovery is already running, just return failure. 793 */ 794 if (xp->resetting || xp->recover_wait) { 795 mutex_exit(&xp->statlock); 796 return (0); 797 } 798 xp->reset_wait = 0; 799 xp->reset_success = 0; 800 xp->resetting = 1; 801 pptr = xp->phy; 802 mutex_exit(&xp->statlock); 803 804 if (pmcs_reset_dev(pwp, pptr, lun)) { 805 rval = 0; 806 } else { 807 rval = 1; 808 } 809 810 mutex_enter(&xp->statlock); 811 if (rval == 1) { 812 xp->reset_success = 1; 813 } 814 if (xp->reset_wait) { 815 xp->reset_wait = 0; 816 cv_signal(&xp->reset_cv); 817 } 818 xp->resetting = 0; 819 mutex_exit(&xp->statlock); 820 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 821 break; 822 default: 823 rval = 0; 824 break; 825 } 826 827 return (rval); 828 } 829 830 static int 831 pmcs_scsi_reset_notify(struct scsi_address *ap, int flag, 832 void (*callback)(caddr_t), caddr_t arg) 833 { 834 pmcs_hw_t *pwp = ADDR2PMC(ap); 835 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 836 &pwp->lock, &pwp->reset_notify_listf)); 837 } 838 839 840 static int 841 pmcs_cap(struct scsi_address *ap, char *cap, int val, int tonly, int set) 842 { 843 _NOTE(ARGUNUSED(val, tonly)); 844 int cidx, rval = 0; 845 pmcs_xscsi_t *xp; 846 847 cidx = scsi_hba_lookup_capstr(cap); 848 if (cidx == -1) { 849 return (-1); 850 } 851 852 xp = pmcs_addr2xp(ap, NULL, NULL); 853 if (xp == NULL) { 854 return (-1); 855 } 856 857 switch (cidx) { 858 case SCSI_CAP_DMA_MAX: 859 case SCSI_CAP_INITIATOR_ID: 860 if (set == 0) { 861 rval = INT_MAX; /* argh */ 862 } 863 break; 864 case SCSI_CAP_DISCONNECT: 865 case SCSI_CAP_SYNCHRONOUS: 866 case SCSI_CAP_WIDE_XFER: 867 case SCSI_CAP_PARITY: 868 case SCSI_CAP_ARQ: 869 case SCSI_CAP_UNTAGGED_QING: 870 if (set == 0) { 871 rval = 1; 872 } 873 break; 874 875 case SCSI_CAP_TAGGED_QING: 876 rval = 1; 877 break; 878 879 case SCSI_CAP_MSG_OUT: 880 case SCSI_CAP_RESET_NOTIFICATION: 881 case SCSI_CAP_QFULL_RETRIES: 882 case SCSI_CAP_QFULL_RETRY_INTERVAL: 883 break; 884 case SCSI_CAP_SCSI_VERSION: 885 if (set == 0) { 886 rval = SCSI_VERSION_3; 887 } 888 break; 889 case SCSI_CAP_INTERCONNECT_TYPE: 890 if (set) { 891 break; 892 } 893 if (xp->phy_addressable) { 894 rval = INTERCONNECT_SATA; 895 } else { 896 rval = INTERCONNECT_SAS; 897 } 898 break; 899 case SCSI_CAP_CDB_LEN: 900 if (set == 0) { 901 rval = 16; 902 } 903 break; 904 case SCSI_CAP_LUN_RESET: 905 if (set) { 906 break; 907 } 908 if (xp->dtype == SATA) { 909 rval = 0; 910 } else { 911 rval = 1; 912 } 913 break; 914 default: 915 rval = -1; 916 break; 917 } 918 mutex_exit(&xp->statlock); 919 pmcs_prt(ADDR2PMC(ap), PMCS_PRT_DEBUG3, NULL, NULL, 920 "%s: cap %s val %d set %d rval %d", 921 __func__, cap, val, set, rval); 922 return (rval); 923 } 924 925 /* 926 * Returns with statlock held if the xp is found. 927 * Fills in pmcs_cmd_t with values if pmcs_cmd_t pointer non-NULL. 928 */ 929 static pmcs_xscsi_t * 930 pmcs_addr2xp(struct scsi_address *ap, uint64_t *lp, pmcs_cmd_t *sp) 931 { 932 pmcs_xscsi_t *xp; 933 pmcs_lun_t *lun = (pmcs_lun_t *) 934 scsi_device_hba_private_get(scsi_address_device(ap)); 935 936 if ((lun == NULL) || (lun->target == NULL)) { 937 return (NULL); 938 } 939 xp = lun->target; 940 mutex_enter(&xp->statlock); 941 942 if (xp->dev_gone || (xp->phy == NULL)) { 943 /* 944 * This may be a retried packet, so it's possible cmd_target 945 * and cmd_lun may still be populated. Clear them. 946 */ 947 if (sp != NULL) { 948 sp->cmd_target = NULL; 949 sp->cmd_lun = NULL; 950 } 951 mutex_exit(&xp->statlock); 952 return (NULL); 953 } 954 955 if (sp != NULL) { 956 sp->cmd_target = xp; 957 sp->cmd_lun = lun; 958 } 959 if (lp) { 960 *lp = lun->lun_num; 961 } 962 return (xp); 963 } 964 965 static int 966 pmcs_scsa_getcap(struct scsi_address *ap, char *cap, int whom) 967 { 968 int r; 969 if (cap == NULL) { 970 return (-1); 971 } 972 r = pmcs_cap(ap, cap, 0, whom, 0); 973 return (r); 974 } 975 976 static int 977 pmcs_scsa_setcap(struct scsi_address *ap, char *cap, int value, int whom) 978 { 979 int r; 980 if (cap == NULL) { 981 return (-1); 982 } 983 r = pmcs_cap(ap, cap, value, whom, 1); 984 return (r); 985 } 986 987 static int 988 pmcs_scsa_setup_pkt(struct scsi_pkt *pkt, int (*callback)(caddr_t), 989 caddr_t cbarg) 990 { 991 _NOTE(ARGUNUSED(callback, cbarg)); 992 pmcs_cmd_t *sp = pkt->pkt_ha_private; 993 994 bzero(sp, sizeof (pmcs_cmd_t)); 995 sp->cmd_pkt = pkt; 996 return (0); 997 } 998 999 static void 1000 pmcs_scsa_teardown_pkt(struct scsi_pkt *pkt) 1001 { 1002 pmcs_cmd_t *sp = pkt->pkt_ha_private; 1003 sp->cmd_target = NULL; 1004 sp->cmd_lun = NULL; 1005 } 1006 1007 static int 1008 pmcs_smp_start(struct smp_pkt *smp_pkt) 1009 { 1010 struct pmcwork *pwrk; 1011 pmcs_iport_t *iport; 1012 const uint_t rdoff = SAS_SMP_MAX_PAYLOAD; 1013 uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status; 1014 uint64_t wwn; 1015 pmcs_hw_t *pwp; 1016 pmcs_phy_t *pptr; 1017 pmcs_xscsi_t *xp; 1018 uint_t reqsz, rspsz, will_retry; 1019 int result; 1020 1021 pwp = smp_pkt->smp_pkt_address->smp_a_hba_tran->smp_tran_hba_private; 1022 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE); 1023 1024 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, NULL, 1025 "%s: starting for wwn 0x%" PRIx64, __func__, wwn); 1026 1027 will_retry = smp_pkt->smp_pkt_will_retry; 1028 1029 (void) pmcs_acquire_scratch(pwp, B_TRUE); 1030 reqsz = smp_pkt->smp_pkt_reqsize; 1031 if (reqsz > SAS_SMP_MAX_PAYLOAD) { 1032 reqsz = SAS_SMP_MAX_PAYLOAD; 1033 } 1034 (void) memcpy(pwp->scratch, smp_pkt->smp_pkt_req, reqsz); 1035 1036 rspsz = smp_pkt->smp_pkt_rspsize; 1037 if (rspsz > SAS_SMP_MAX_PAYLOAD) { 1038 rspsz = SAS_SMP_MAX_PAYLOAD; 1039 } 1040 1041 /* 1042 * The request size from the SMP driver always includes 4 bytes 1043 * for the CRC. The PMCS chip, however, doesn't want to see those 1044 * counts as part of the transfer size. 1045 */ 1046 reqsz -= 4; 1047 1048 pptr = pmcs_find_phy_by_wwn(pwp, wwn); 1049 /* PHY is now locked */ 1050 if (pptr == NULL || pptr->dtype != EXPANDER) { 1051 if (pptr) { 1052 pmcs_unlock_phy(pptr); 1053 } 1054 pmcs_release_scratch(pwp); 1055 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1056 "%s: could not find phy", __func__); 1057 smp_pkt->smp_pkt_reason = ENXIO; 1058 return (DDI_FAILURE); 1059 } 1060 1061 if ((pptr->iport == NULL) || !pptr->valid_device_id) { 1062 pmcs_unlock_phy(pptr); 1063 pmcs_release_scratch(pwp); 1064 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target, 1065 "%s: Can't reach PHY %s", __func__, pptr->path); 1066 smp_pkt->smp_pkt_reason = ENXIO; 1067 return (DDI_FAILURE); 1068 } 1069 1070 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 1071 if (pwrk == NULL) { 1072 pmcs_unlock_phy(pptr); 1073 pmcs_release_scratch(pwp); 1074 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1075 "%s: could not get work structure", __func__); 1076 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN : EBUSY; 1077 return (DDI_FAILURE); 1078 } 1079 1080 pwrk->arg = msg; 1081 pwrk->dtype = EXPANDER; 1082 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1083 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1084 if (ptr == NULL) { 1085 pmcs_pwork(pwp, pwrk); 1086 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1087 pmcs_unlock_phy(pptr); 1088 pmcs_release_scratch(pwp); 1089 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1090 "%s: could not get IQ entry", __func__); 1091 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN :EBUSY; 1092 return (DDI_FAILURE); 1093 } 1094 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST)); 1095 msg[1] = LE_32(pwrk->htag); 1096 msg[2] = LE_32(pptr->device_id); 1097 msg[3] = LE_32(SMP_INDIRECT_RESPONSE | SMP_INDIRECT_REQUEST); 1098 msg[8] = LE_32(DWORD0(pwp->scratch_dma)); 1099 msg[9] = LE_32(DWORD1(pwp->scratch_dma)); 1100 msg[10] = LE_32(reqsz); 1101 msg[11] = 0; 1102 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff)); 1103 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff)); 1104 msg[14] = LE_32(rspsz); 1105 msg[15] = 0; 1106 1107 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE); 1108 1109 pmcs_hold_iport(pptr->iport); 1110 iport = pptr->iport; 1111 pmcs_smp_acquire(iport); 1112 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1113 htag = pwrk->htag; 1114 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1115 pmcs_unlock_phy(pptr); 1116 WAIT_FOR(pwrk, smp_pkt->smp_pkt_timeout * 1000, result); 1117 pmcs_pwork(pwp, pwrk); 1118 pmcs_smp_release(iport); 1119 pmcs_rele_iport(iport); 1120 pmcs_lock_phy(pptr); 1121 1122 if (result) { 1123 pmcs_timed_out(pwp, htag, __func__); 1124 if (pmcs_abort(pwp, pptr, htag, 0, 0)) { 1125 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1126 "%s: Unable to issue SMP ABORT for htag 0x%08x", 1127 __func__, htag); 1128 } else { 1129 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1130 "%s: Issuing SMP ABORT for htag 0x%08x", 1131 __func__, htag); 1132 } 1133 pmcs_unlock_phy(pptr); 1134 pmcs_release_scratch(pwp); 1135 smp_pkt->smp_pkt_reason = ETIMEDOUT; 1136 return (DDI_FAILURE); 1137 } 1138 status = LE_32(msg[2]); 1139 if (status == PMCOUT_STATUS_OVERFLOW) { 1140 status = PMCOUT_STATUS_OK; 1141 smp_pkt->smp_pkt_reason = EOVERFLOW; 1142 } 1143 if (status != PMCOUT_STATUS_OK) { 1144 const char *emsg = pmcs_status_str(status); 1145 if (emsg == NULL) { 1146 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1147 "SMP operation failed (0x%x)", status); 1148 } else { 1149 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1150 "SMP operation failed (%s)", emsg); 1151 } 1152 1153 if ((status == PMCOUT_STATUS_ERROR_HW_TIMEOUT) || 1154 (status == PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT)) { 1155 smp_pkt->smp_pkt_reason = 1156 will_retry ? EAGAIN : ETIMEDOUT; 1157 result = DDI_FAILURE; 1158 } else if (status == 1159 PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS) { 1160 xp = pptr->target; 1161 if (xp == NULL) { 1162 smp_pkt->smp_pkt_reason = EIO; 1163 result = DDI_FAILURE; 1164 goto out; 1165 } 1166 if (xp->dev_state != 1167 PMCS_DEVICE_STATE_NON_OPERATIONAL) { 1168 xp->dev_state = 1169 PMCS_DEVICE_STATE_NON_OPERATIONAL; 1170 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, xp->phy, 1171 xp, "%s: Got _IT_NEXUS_LOSS SMP status. " 1172 "Tgt(0x%p) dev_state set to " 1173 "_NON_OPERATIONAL", __func__, 1174 (void *)xp); 1175 } 1176 /* ABORT any pending commands related to this device */ 1177 if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) != 0) { 1178 pptr->abort_pending = 1; 1179 smp_pkt->smp_pkt_reason = EIO; 1180 result = DDI_FAILURE; 1181 } 1182 } else { 1183 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN : EIO; 1184 result = DDI_FAILURE; 1185 } 1186 } else { 1187 (void) memcpy(smp_pkt->smp_pkt_rsp, 1188 &((uint8_t *)pwp->scratch)[rdoff], rspsz); 1189 if (smp_pkt->smp_pkt_reason == EOVERFLOW) { 1190 result = DDI_FAILURE; 1191 } else { 1192 result = DDI_SUCCESS; 1193 } 1194 } 1195 out: 1196 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, pptr->target, 1197 "%s: done for wwn 0x%" PRIx64, __func__, wwn); 1198 1199 pmcs_unlock_phy(pptr); 1200 pmcs_release_scratch(pwp); 1201 return (result); 1202 } 1203 1204 static int 1205 pmcs_smp_init(dev_info_t *self, dev_info_t *child, 1206 smp_hba_tran_t *tran, smp_device_t *smp_sd) 1207 { 1208 _NOTE(ARGUNUSED(tran, smp_sd)); 1209 pmcs_iport_t *iport; 1210 pmcs_hw_t *pwp; 1211 pmcs_xscsi_t *tgt; 1212 pmcs_phy_t *phy, *pphy; 1213 uint64_t wwn; 1214 char *addr, *tgt_port; 1215 int ua_form = 1; 1216 1217 iport = ddi_get_soft_state(pmcs_iport_softstate, 1218 ddi_get_instance(self)); 1219 ASSERT(iport); 1220 if (iport == NULL) 1221 return (DDI_FAILURE); 1222 pwp = iport->pwp; 1223 ASSERT(pwp); 1224 if (pwp == NULL) 1225 return (DDI_FAILURE); 1226 1227 /* Get "target-port" prop from devinfo node */ 1228 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child, 1229 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1230 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) { 1231 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to " 1232 "lookup prop ("SCSI_ADDR_PROP_TARGET_PORT")", __func__); 1233 /* Dont fail _smp_init() because we couldnt get/set a prop */ 1234 return (DDI_SUCCESS); 1235 } 1236 1237 /* 1238 * Validate that this tran_tgt_init is for an active iport. 1239 */ 1240 if (iport->ua_state == UA_INACTIVE) { 1241 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1242 "%s: Init on inactive iport for '%s'", __func__, tgt_port); 1243 ddi_prop_free(tgt_port); 1244 return (DDI_FAILURE); 1245 } 1246 1247 mutex_enter(&pwp->lock); 1248 1249 /* Retrieve softstate using unit-address */ 1250 tgt = pmcs_get_target(iport, tgt_port, B_TRUE); 1251 if (tgt == NULL) { 1252 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1253 "%s: tgt softstate not found", __func__); 1254 ddi_prop_free(tgt_port); 1255 mutex_exit(&pwp->lock); 1256 return (DDI_FAILURE); 1257 } 1258 1259 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, "%s: %s (%s)", 1260 __func__, ddi_get_name(child), tgt_port); 1261 1262 mutex_enter(&tgt->statlock); 1263 phy = tgt->phy; 1264 ASSERT(mutex_owned(&phy->phy_lock)); 1265 1266 if (IS_ROOT_PHY(phy)) { 1267 /* Expander attached to HBA - don't ref_count it */ 1268 wwn = pwp->sas_wwns[0]; 1269 } else { 1270 pmcs_inc_phy_ref_count(phy); 1271 1272 /* 1273 * Parent (in topology) is also an expander 1274 * Now that we've increased the ref count on phy, it's OK 1275 * to drop the lock so we can acquire the parent's lock. 1276 */ 1277 pphy = phy->parent; 1278 mutex_exit(&tgt->statlock); 1279 pmcs_unlock_phy(phy); 1280 pmcs_lock_phy(pphy); 1281 wwn = pmcs_barray2wwn(pphy->sas_address); 1282 pmcs_unlock_phy(pphy); 1283 pmcs_lock_phy(phy); 1284 mutex_enter(&tgt->statlock); 1285 } 1286 1287 /* 1288 * If this is the 1st smp_init, add this to our list. 1289 */ 1290 if (tgt->target_num == PMCS_INVALID_TARGET_NUM) { 1291 int target; 1292 for (target = 0; target < pwp->max_dev; target++) { 1293 if (pwp->targets[target] != NULL) { 1294 continue; 1295 } 1296 1297 pwp->targets[target] = tgt; 1298 tgt->target_num = (uint16_t)target; 1299 tgt->assigned = 1; 1300 tgt->dev_state = PMCS_DEVICE_STATE_OPERATIONAL; 1301 break; 1302 } 1303 1304 if (target == pwp->max_dev) { 1305 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 1306 "Target list full."); 1307 goto smp_init_fail; 1308 } 1309 } 1310 1311 if (!pmcs_assign_device(pwp, tgt)) { 1312 pwp->targets[tgt->target_num] = NULL; 1313 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, 1314 "%s: pmcs_assign_device failed for target 0x%p", 1315 __func__, (void *)tgt); 1316 goto smp_init_fail; 1317 } 1318 1319 /* 1320 * Update the attached port and target port pm properties 1321 */ 1322 tgt->smpd = smp_sd; 1323 1324 pmcs_unlock_phy(phy); 1325 mutex_exit(&pwp->lock); 1326 1327 tgt->ref_count++; 1328 tgt->dtype = phy->dtype; 1329 mutex_exit(&tgt->statlock); 1330 1331 pmcs_update_phy_pm_props(phy, 0, 0, B_TRUE); 1332 1333 addr = scsi_wwn_to_wwnstr(wwn, ua_form, NULL); 1334 if (smp_device_prop_update_string(smp_sd, SCSI_ADDR_PROP_ATTACHED_PORT, 1335 addr) != DDI_SUCCESS) { 1336 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to set " 1337 "prop ("SCSI_ADDR_PROP_ATTACHED_PORT")", __func__); 1338 } 1339 (void) scsi_free_wwnstr(addr); 1340 ddi_prop_free(tgt_port); 1341 return (DDI_SUCCESS); 1342 1343 smp_init_fail: 1344 tgt->phy = NULL; 1345 tgt->target_num = PMCS_INVALID_TARGET_NUM; 1346 phy->target = NULL; 1347 if (!IS_ROOT_PHY(phy)) { 1348 pmcs_dec_phy_ref_count(phy); 1349 } 1350 mutex_exit(&tgt->statlock); 1351 pmcs_unlock_phy(phy); 1352 mutex_exit(&pwp->lock); 1353 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt->unit_address); 1354 ddi_prop_free(tgt_port); 1355 return (DDI_FAILURE); 1356 } 1357 1358 static void 1359 pmcs_smp_free(dev_info_t *self, dev_info_t *child, 1360 smp_hba_tran_t *tran, smp_device_t *smp) 1361 { 1362 _NOTE(ARGUNUSED(tran, smp)); 1363 pmcs_iport_t *iport; 1364 pmcs_hw_t *pwp; 1365 pmcs_xscsi_t *tgt; 1366 pmcs_phy_t *phyp; 1367 char *tgt_port; 1368 1369 iport = ddi_get_soft_state(pmcs_iport_softstate, 1370 ddi_get_instance(self)); 1371 ASSERT(iport); 1372 if (iport == NULL) 1373 return; 1374 1375 pwp = iport->pwp; 1376 if (pwp == NULL) 1377 return; 1378 ASSERT(pwp); 1379 1380 /* Get "target-port" prop from devinfo node */ 1381 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child, 1382 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1383 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) { 1384 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to " 1385 "lookup prop ("SCSI_ADDR_PROP_TARGET_PORT")", __func__); 1386 return; 1387 } 1388 1389 /* Retrieve softstate using unit-address */ 1390 mutex_enter(&pwp->lock); 1391 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port); 1392 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, "%s: %s (%s)", __func__, 1393 ddi_get_name(child), tgt_port); 1394 ddi_prop_free(tgt_port); 1395 1396 if (tgt == NULL) { 1397 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1398 "%s: tgt softstate not found", __func__); 1399 mutex_exit(&pwp->lock); 1400 return; 1401 } 1402 1403 phyp = tgt->phy; 1404 if (phyp) { 1405 mutex_enter(&phyp->phy_lock); 1406 if (!IS_ROOT_PHY(phyp)) { 1407 pmcs_dec_phy_ref_count(phyp); 1408 } 1409 } 1410 mutex_enter(&tgt->statlock); 1411 1412 if (--tgt->ref_count == 0) { 1413 /* 1414 * Remove this target from our list. The softstate 1415 * will remain, and the device will remain registered 1416 * with the hardware unless/until we're told that the 1417 * device physically went away. 1418 */ 1419 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, 1420 "Removing target 0x%p (vtgt %d) from target list", 1421 (void *)tgt, tgt->target_num); 1422 pwp->targets[tgt->target_num] = NULL; 1423 tgt->target_num = PMCS_INVALID_TARGET_NUM; 1424 if (phyp) { 1425 phyp->target = NULL; 1426 } 1427 tgt->phy = NULL; 1428 pmcs_destroy_target(tgt); 1429 } else { 1430 mutex_exit(&tgt->statlock); 1431 } 1432 1433 if (phyp) { 1434 mutex_exit(&phyp->phy_lock); 1435 } 1436 mutex_exit(&pwp->lock); 1437 } 1438 1439 static int 1440 pmcs_scsi_quiesce(dev_info_t *dip) 1441 { 1442 pmcs_hw_t *pwp; 1443 int totactive = -1; 1444 pmcs_xscsi_t *xp; 1445 uint16_t target; 1446 1447 if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip))) 1448 return (0); /* iport */ 1449 1450 pwp = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip)); 1451 if (pwp == NULL) { 1452 return (-1); 1453 } 1454 mutex_enter(&pwp->lock); 1455 if (pwp->state != STATE_RUNNING) { 1456 mutex_exit(&pwp->lock); 1457 return (-1); 1458 } 1459 1460 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s called", __func__); 1461 pwp->quiesced = pwp->blocked = 1; 1462 while (totactive) { 1463 totactive = 0; 1464 for (target = 0; target < pwp->max_dev; target++) { 1465 xp = pwp->targets[target]; 1466 if (xp == NULL) { 1467 continue; 1468 } 1469 mutex_enter(&xp->statlock); 1470 if (xp->actv_cnt) { 1471 totactive += xp->actv_cnt; 1472 xp->draining = 1; 1473 } 1474 mutex_exit(&xp->statlock); 1475 } 1476 if (totactive) { 1477 cv_wait(&pwp->drain_cv, &pwp->lock); 1478 } 1479 /* 1480 * The pwp->blocked may have been reset. e.g a SCSI bus reset 1481 */ 1482 pwp->blocked = 1; 1483 } 1484 1485 for (target = 0; target < pwp->max_dev; target++) { 1486 xp = pwp->targets[target]; 1487 if (xp == NULL) { 1488 continue; 1489 } 1490 mutex_enter(&xp->statlock); 1491 xp->draining = 0; 1492 mutex_exit(&xp->statlock); 1493 } 1494 1495 mutex_exit(&pwp->lock); 1496 if (totactive == 0) { 1497 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1498 "%s drain complete", __func__); 1499 } 1500 return (0); 1501 } 1502 1503 static int 1504 pmcs_scsi_unquiesce(dev_info_t *dip) 1505 { 1506 pmcs_hw_t *pwp; 1507 1508 if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip))) 1509 return (0); /* iport */ 1510 1511 pwp = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip)); 1512 if (pwp == NULL) { 1513 return (-1); 1514 } 1515 mutex_enter(&pwp->lock); 1516 if (pwp->state != STATE_RUNNING) { 1517 mutex_exit(&pwp->lock); 1518 return (-1); 1519 } 1520 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s called", __func__); 1521 pwp->blocked = pwp->quiesced = 0; 1522 mutex_exit(&pwp->lock); 1523 1524 /* 1525 * Run all pending commands. 1526 */ 1527 pmcs_scsa_wq_run(pwp); 1528 1529 /* 1530 * Complete all completed commands. 1531 * This also unlocks us. 1532 */ 1533 PMCS_CQ_RUN(pwp); 1534 return (0); 1535 } 1536 1537 /* 1538 * Start commands for a particular device 1539 * If the actual start of a command fails, return B_FALSE. Any other result 1540 * is a B_TRUE return. 1541 */ 1542 boolean_t 1543 pmcs_scsa_wq_run_one(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 1544 { 1545 pmcs_cmd_t *sp; 1546 pmcs_phy_t *phyp; 1547 pmcwork_t *pwrk; 1548 boolean_t run_one, blocked; 1549 int rval; 1550 1551 /* 1552 * First, check to see if we're blocked or resource limited 1553 */ 1554 mutex_enter(&pwp->lock); 1555 blocked = pwp->blocked; 1556 /* 1557 * If resource_limited is set, we're resource constrained and 1558 * we will run only one work request for this target. 1559 */ 1560 run_one = pwp->resource_limited; 1561 mutex_exit(&pwp->lock); 1562 1563 if (blocked) { 1564 /* Queues will get restarted when we get unblocked */ 1565 return (B_TRUE); 1566 } 1567 1568 /* 1569 * Might as well verify the queue is not empty before moving on 1570 */ 1571 mutex_enter(&xp->wqlock); 1572 if (STAILQ_EMPTY(&xp->wq)) { 1573 mutex_exit(&xp->wqlock); 1574 return (B_TRUE); 1575 } 1576 mutex_exit(&xp->wqlock); 1577 1578 /* 1579 * If we're draining or resetting, just reschedule work queue and bail. 1580 */ 1581 mutex_enter(&xp->statlock); 1582 if (xp->draining || xp->resetting || xp->special_running || 1583 xp->special_needed) { 1584 mutex_exit(&xp->statlock); 1585 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1586 return (B_TRUE); 1587 } 1588 1589 /* 1590 * Next, check to see if the target is gone. 1591 */ 1592 if (xp->dev_gone) { 1593 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1594 "%s: Flushing wait queue for dead tgt 0x%p", __func__, 1595 (void *)xp); 1596 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_WAIT_QUEUE); 1597 mutex_exit(&xp->statlock); 1598 return (B_TRUE); 1599 } 1600 1601 /* 1602 * Increment the PHY's ref_count now so we know it won't go away 1603 * after we drop the target lock. Drop it before returning. If the 1604 * PHY dies, the commands we attempt to send will fail, but at least 1605 * we know we have a real PHY pointer. 1606 */ 1607 phyp = xp->phy; 1608 pmcs_inc_phy_ref_count(phyp); 1609 mutex_exit(&xp->statlock); 1610 1611 mutex_enter(&xp->wqlock); 1612 while ((sp = STAILQ_FIRST(&xp->wq)) != NULL) { 1613 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_CBACK, phyp); 1614 if (pwrk == NULL) { 1615 mutex_exit(&xp->wqlock); 1616 mutex_enter(&pwp->lock); 1617 if (pwp->resource_limited == 0) { 1618 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1619 "%s: out of work structures", __func__); 1620 } 1621 pwp->resource_limited = 1; 1622 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1623 mutex_exit(&pwp->lock); 1624 return (B_FALSE); 1625 } 1626 STAILQ_REMOVE_HEAD(&xp->wq, cmd_next); 1627 mutex_exit(&xp->wqlock); 1628 1629 pwrk->xp = xp; 1630 pwrk->arg = sp; 1631 sp->cmd_tag = pwrk->htag; 1632 pwrk->timer = US2WT(CMD2PKT(sp)->pkt_time * 1000000); 1633 if (pwrk->timer == 0) { 1634 pwrk->timer = US2WT(1000000); 1635 } 1636 1637 pwrk->dtype = xp->dtype; 1638 1639 if (xp->dtype == SAS) { 1640 pwrk->ptr = (void *) pmcs_SAS_done; 1641 if ((rval = pmcs_SAS_run(sp, pwrk)) != 0) { 1642 sp->cmd_tag = NULL; 1643 pmcs_dec_phy_ref_count(phyp); 1644 pmcs_pwork(pwp, pwrk); 1645 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1646 if (rval == PMCS_WQ_RUN_FAIL_RES) { 1647 return (B_FALSE); 1648 } else { 1649 return (B_TRUE); 1650 } 1651 } 1652 } else { 1653 ASSERT(xp->dtype == SATA); 1654 pwrk->ptr = (void *) pmcs_SATA_done; 1655 if ((rval = pmcs_SATA_run(sp, pwrk)) != 0) { 1656 sp->cmd_tag = NULL; 1657 pmcs_dec_phy_ref_count(phyp); 1658 pmcs_pwork(pwp, pwrk); 1659 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1660 if (rval == PMCS_WQ_RUN_FAIL_RES) { 1661 return (B_FALSE); 1662 } else { 1663 return (B_TRUE); 1664 } 1665 } 1666 } 1667 1668 if (run_one) { 1669 goto wq_out; 1670 } 1671 mutex_enter(&xp->wqlock); 1672 } 1673 1674 mutex_exit(&xp->wqlock); 1675 1676 wq_out: 1677 pmcs_dec_phy_ref_count(phyp); 1678 return (B_TRUE); 1679 } 1680 1681 /* 1682 * Start commands for all devices. 1683 */ 1684 void 1685 pmcs_scsa_wq_run(pmcs_hw_t *pwp) 1686 { 1687 pmcs_xscsi_t *xp; 1688 uint16_t target_start, target; 1689 boolean_t rval = B_TRUE; 1690 1691 mutex_enter(&pwp->lock); 1692 target_start = pwp->last_wq_dev; 1693 target = target_start; 1694 1695 do { 1696 xp = pwp->targets[target]; 1697 if ((xp == NULL) || (STAILQ_EMPTY(&xp->wq))) { 1698 if (++target == pwp->max_dev) { 1699 target = 0; 1700 } 1701 continue; 1702 } 1703 1704 mutex_exit(&pwp->lock); 1705 rval = pmcs_scsa_wq_run_one(pwp, xp); 1706 mutex_enter(&pwp->lock); 1707 1708 if (rval == B_FALSE) { 1709 break; 1710 } 1711 1712 if (++target == pwp->max_dev) { 1713 target = 0; 1714 } 1715 } while (target != target_start); 1716 1717 if (rval) { 1718 /* 1719 * If we were resource limited, but apparently are not now, 1720 * reschedule the work queues anyway. 1721 */ 1722 if (pwp->resource_limited) { 1723 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1724 } 1725 pwp->resource_limited = 0; /* Not resource-constrained */ 1726 } else { 1727 /* 1728 * Give everybody a chance, and reschedule to run the queues 1729 * again as long as we're limited. 1730 */ 1731 pwp->resource_limited = 1; 1732 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1733 } 1734 1735 pwp->last_wq_dev = target; 1736 mutex_exit(&pwp->lock); 1737 } 1738 1739 /* 1740 * Pull the completion queue, drop the lock and complete all elements. 1741 */ 1742 1743 void 1744 pmcs_scsa_cq_run(void *arg) 1745 { 1746 pmcs_cq_thr_info_t *cqti = (pmcs_cq_thr_info_t *)arg; 1747 pmcs_hw_t *pwp = cqti->cq_pwp; 1748 pmcs_cmd_t *sp, *nxt; 1749 struct scsi_pkt *pkt; 1750 pmcs_xscsi_t *tgt; 1751 pmcs_iocomp_cb_t *ioccb, *ioccb_next; 1752 pmcs_cb_t callback; 1753 1754 DTRACE_PROBE1(pmcs__scsa__cq__run__start, pmcs_cq_thr_info_t *, cqti); 1755 1756 mutex_enter(&pwp->cq_lock); 1757 1758 while (!pwp->cq_info.cq_stop) { 1759 /* 1760 * First, check the I/O completion callback queue. 1761 */ 1762 ioccb = pwp->iocomp_cb_head; 1763 pwp->iocomp_cb_head = NULL; 1764 pwp->iocomp_cb_tail = NULL; 1765 mutex_exit(&pwp->cq_lock); 1766 1767 while (ioccb) { 1768 /* 1769 * Grab the lock on the work structure. The callback 1770 * routine is responsible for clearing it. 1771 */ 1772 mutex_enter(&ioccb->pwrk->lock); 1773 ioccb_next = ioccb->next; 1774 callback = (pmcs_cb_t)ioccb->pwrk->ptr; 1775 (*callback)(pwp, ioccb->pwrk, 1776 (uint32_t *)((void *)ioccb->iomb)); 1777 kmem_cache_free(pwp->iocomp_cb_cache, ioccb); 1778 ioccb = ioccb_next; 1779 } 1780 1781 /* 1782 * Next, run the completion queue 1783 */ 1784 mutex_enter(&pwp->cq_lock); 1785 sp = STAILQ_FIRST(&pwp->cq); 1786 STAILQ_INIT(&pwp->cq); 1787 mutex_exit(&pwp->cq_lock); 1788 1789 DTRACE_PROBE1(pmcs__scsa__cq__run__start__loop, 1790 pmcs_cq_thr_info_t *, cqti); 1791 1792 if (sp && pmcs_check_acc_dma_handle(pwp)) { 1793 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED); 1794 } 1795 1796 while (sp) { 1797 nxt = STAILQ_NEXT(sp, cmd_next); 1798 pkt = CMD2PKT(sp); 1799 tgt = sp->cmd_target; 1800 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, tgt, 1801 "%s: calling completion on %p for tgt %p", __func__, 1802 (void *)sp, (void *)tgt); 1803 if (tgt) { 1804 mutex_enter(&tgt->statlock); 1805 ASSERT(tgt->actv_pkts != 0); 1806 tgt->actv_pkts--; 1807 mutex_exit(&tgt->statlock); 1808 } 1809 scsi_hba_pkt_comp(pkt); 1810 sp = nxt; 1811 } 1812 1813 DTRACE_PROBE1(pmcs__scsa__cq__run__end__loop, 1814 pmcs_cq_thr_info_t *, cqti); 1815 1816 /* 1817 * Check if there are more completions to do. If so, and we've 1818 * not been told to stop, skip the wait and cycle through again. 1819 */ 1820 mutex_enter(&pwp->cq_lock); 1821 if ((pwp->iocomp_cb_head == NULL) && STAILQ_EMPTY(&pwp->cq) && 1822 !pwp->cq_info.cq_stop) { 1823 mutex_exit(&pwp->cq_lock); 1824 mutex_enter(&cqti->cq_thr_lock); 1825 cv_wait(&cqti->cq_cv, &cqti->cq_thr_lock); 1826 mutex_exit(&cqti->cq_thr_lock); 1827 mutex_enter(&pwp->cq_lock); 1828 } 1829 } 1830 1831 mutex_exit(&pwp->cq_lock); 1832 DTRACE_PROBE1(pmcs__scsa__cq__run__stop, pmcs_cq_thr_info_t *, cqti); 1833 thread_exit(); 1834 } 1835 1836 /* 1837 * Run a SAS command. Called with pwrk->lock held, returns unlocked. 1838 */ 1839 static int 1840 pmcs_SAS_run(pmcs_cmd_t *sp, pmcwork_t *pwrk) 1841 { 1842 pmcs_hw_t *pwp = CMD2PMC(sp); 1843 struct scsi_pkt *pkt = CMD2PKT(sp); 1844 pmcs_xscsi_t *xp = pwrk->xp; 1845 uint32_t iq, *ptr; 1846 sas_ssp_cmd_iu_t sc; 1847 1848 ASSERT(xp != NULL); 1849 mutex_enter(&xp->statlock); 1850 if (!xp->assigned) { 1851 mutex_exit(&xp->statlock); 1852 return (PMCS_WQ_RUN_FAIL_OTHER); 1853 } 1854 if ((xp->actv_cnt >= xp->qdepth) || xp->recover_wait) { 1855 mutex_exit(&xp->statlock); 1856 mutex_enter(&xp->wqlock); 1857 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1858 mutex_exit(&xp->wqlock); 1859 return (PMCS_WQ_RUN_FAIL_OTHER); 1860 } 1861 GET_IO_IQ_ENTRY(pwp, ptr, pwrk->phy->device_id, iq); 1862 if (ptr == NULL) { 1863 mutex_exit(&xp->statlock); 1864 /* 1865 * This is a temporary failure not likely to unblocked by 1866 * commands completing as the test for scheduling the 1867 * restart of work is a per-device test. 1868 */ 1869 mutex_enter(&xp->wqlock); 1870 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1871 mutex_exit(&xp->wqlock); 1872 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1873 "%s: Failed to get IO IQ entry for tgt %d", 1874 __func__, xp->target_num); 1875 return (PMCS_WQ_RUN_FAIL_RES); 1876 1877 } 1878 1879 ptr[0] = 1880 LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SSP_INI_IO_START)); 1881 ptr[1] = LE_32(pwrk->htag); 1882 ptr[2] = LE_32(pwrk->phy->device_id); 1883 ptr[3] = LE_32(pkt->pkt_dma_len); 1884 if (ptr[3]) { 1885 ASSERT(pkt->pkt_numcookies); 1886 if (pkt->pkt_dma_flags & DDI_DMA_READ) { 1887 ptr[4] = LE_32(PMCIN_DATADIR_2_INI); 1888 } else { 1889 ptr[4] = LE_32(PMCIN_DATADIR_2_DEV); 1890 } 1891 if (pmcs_dma_load(pwp, sp, ptr)) { 1892 mutex_exit(&pwp->iqp_lock[iq]); 1893 mutex_exit(&xp->statlock); 1894 mutex_enter(&xp->wqlock); 1895 if (STAILQ_EMPTY(&xp->wq)) { 1896 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1897 mutex_exit(&xp->wqlock); 1898 } else { 1899 mutex_exit(&xp->wqlock); 1900 CMD2PKT(sp)->pkt_scbp[0] = STATUS_QFULL; 1901 CMD2PKT(sp)->pkt_reason = CMD_CMPLT; 1902 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | 1903 STATE_GOT_TARGET | STATE_SENT_CMD | 1904 STATE_GOT_STATUS; 1905 mutex_enter(&pwp->cq_lock); 1906 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 1907 PMCS_CQ_RUN_LOCKED(pwp); 1908 mutex_exit(&pwp->cq_lock); 1909 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1910 "%s: Failed to dma_load for tgt %d (QF)", 1911 __func__, xp->target_num); 1912 } 1913 return (PMCS_WQ_RUN_FAIL_RES); 1914 } 1915 } else { 1916 ptr[4] = LE_32(PMCIN_DATADIR_NONE); 1917 CLEAN_MESSAGE(ptr, 12); 1918 } 1919 xp->actv_cnt++; 1920 if (xp->actv_cnt > xp->maxdepth) { 1921 xp->maxdepth = xp->actv_cnt; 1922 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pwrk->phy, xp, "%s: max depth " 1923 "now %u", pwrk->phy->path, xp->maxdepth); 1924 } 1925 mutex_exit(&xp->statlock); 1926 1927 1928 #ifdef DEBUG 1929 /* 1930 * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED 1931 * event when this goes out on the wire. 1932 */ 1933 ptr[4] |= PMCIN_MESSAGE_REPORT; 1934 #endif 1935 /* 1936 * Fill in the SSP IU 1937 */ 1938 1939 bzero(&sc, sizeof (sas_ssp_cmd_iu_t)); 1940 bcopy((uint8_t *)&sp->cmd_lun->scsi_lun, sc.lun, sizeof (scsi_lun_t)); 1941 1942 switch (pkt->pkt_flags & FLAG_TAGMASK) { 1943 case FLAG_HTAG: 1944 sc.task_attribute = SAS_CMD_TASK_ATTR_HEAD; 1945 break; 1946 case FLAG_OTAG: 1947 sc.task_attribute = SAS_CMD_TASK_ATTR_ORDERED; 1948 break; 1949 case FLAG_STAG: 1950 default: 1951 sc.task_attribute = SAS_CMD_TASK_ATTR_SIMPLE; 1952 break; 1953 } 1954 (void) memcpy(sc.cdb, pkt->pkt_cdbp, 1955 min(SCSA_CDBLEN(sp), sizeof (sc.cdb))); 1956 (void) memcpy(&ptr[5], &sc, sizeof (sas_ssp_cmd_iu_t)); 1957 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1958 mutex_exit(&pwrk->lock); 1959 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 1960 "%s: giving pkt %p (tag %x) to the hardware", __func__, 1961 (void *)pkt, pwrk->htag); 1962 #ifdef DEBUG 1963 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SAS INI Message", ptr); 1964 #endif 1965 mutex_enter(&xp->aqlock); 1966 STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next); 1967 mutex_exit(&xp->aqlock); 1968 INC_IQ_ENTRY(pwp, iq); 1969 1970 /* 1971 * If we just submitted the last command queued from device state 1972 * recovery, clear the wq_recovery_tail pointer. 1973 */ 1974 mutex_enter(&xp->wqlock); 1975 if (xp->wq_recovery_tail == sp) { 1976 xp->wq_recovery_tail = NULL; 1977 } 1978 mutex_exit(&xp->wqlock); 1979 1980 return (PMCS_WQ_RUN_SUCCESS); 1981 } 1982 1983 /* 1984 * Complete a SAS command 1985 * 1986 * Called with pwrk lock held. 1987 * The free of pwrk releases the lock. 1988 */ 1989 1990 static void 1991 pmcs_SAS_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg) 1992 { 1993 pmcs_cmd_t *sp = pwrk->arg; 1994 pmcs_phy_t *pptr = pwrk->phy; 1995 pmcs_xscsi_t *xp = pwrk->xp; 1996 struct scsi_pkt *pkt = CMD2PKT(sp); 1997 int dead; 1998 uint32_t sts; 1999 boolean_t aborted = B_FALSE; 2000 boolean_t do_ds_recovery = B_FALSE; 2001 2002 ASSERT(xp != NULL); 2003 ASSERT(sp != NULL); 2004 ASSERT(pptr != NULL); 2005 2006 DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int, 2007 (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start, 2008 hrtime_t, gethrtime()); 2009 2010 dead = pwrk->dead; 2011 2012 if (msg) { 2013 sts = LE_32(msg[2]); 2014 } else { 2015 sts = 0; 2016 } 2017 2018 if (dead != 0) { 2019 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: dead cmd tag " 2020 "0x%x for %s", __func__, pwrk->htag, pptr->path); 2021 goto out; 2022 } 2023 2024 if (sts == PMCOUT_STATUS_ABORTED) { 2025 aborted = B_TRUE; 2026 } 2027 2028 if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) { 2029 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2030 "%s: cmd 0x%p (tag 0x%x) timed out for %s", 2031 __func__, (void *)sp, pwrk->htag, pptr->path); 2032 CMD2PKT(sp)->pkt_scbp[0] = STATUS_GOOD; 2033 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 2034 STATE_SENT_CMD; 2035 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 2036 goto out; 2037 } 2038 2039 /* 2040 * If the status isn't okay but not underflow, 2041 * step to the side and parse the (possible) error. 2042 */ 2043 #ifdef DEBUG 2044 if (msg) { 2045 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg); 2046 } 2047 #endif 2048 if (!msg) { 2049 goto out; 2050 } 2051 2052 switch (sts) { 2053 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2054 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: 2055 case PMCOUT_STATUS_IO_DS_IN_RECOVERY: 2056 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2057 "%s: PHY %s requires DS recovery (status=%d)", 2058 __func__, pptr->path, sts); 2059 do_ds_recovery = B_TRUE; 2060 break; 2061 case PMCOUT_STATUS_UNDERFLOW: 2062 (void) pmcs_set_resid(pkt, pkt->pkt_dma_len, LE_32(msg[3])); 2063 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, NULL, NULL, 2064 "%s: underflow %u for cdb 0x%x", 2065 __func__, LE_32(msg[3]), pkt->pkt_cdbp[0] & 0xff); 2066 sts = PMCOUT_STATUS_OK; 2067 msg[3] = 0; 2068 break; 2069 case PMCOUT_STATUS_OK: 2070 pkt->pkt_resid = 0; 2071 break; 2072 } 2073 2074 if (sts != PMCOUT_STATUS_OK) { 2075 pmcs_ioerror(pwp, SAS, pwrk, msg, sts); 2076 } else { 2077 if (msg[3]) { 2078 uint8_t local[PMCS_QENTRY_SIZE << 1], *xd; 2079 sas_ssp_rsp_iu_t *rptr = (void *)local; 2080 const int lim = 2081 (PMCS_QENTRY_SIZE << 1) - SAS_RSP_HDR_SIZE; 2082 static const uint8_t ssp_rsp_evec[] = { 2083 0x58, 0x61, 0x56, 0x72, 0x00 2084 }; 2085 2086 /* 2087 * Transform the the first part of the response 2088 * to host canonical form. This gives us enough 2089 * information to figure out what to do with the 2090 * rest (which remains unchanged in the incoming 2091 * message which can be up to two queue entries 2092 * in length). 2093 */ 2094 pmcs_endian_transform(pwp, local, &msg[5], 2095 ssp_rsp_evec); 2096 xd = (uint8_t *)(&msg[5]); 2097 xd += SAS_RSP_HDR_SIZE; 2098 2099 if (rptr->datapres == SAS_RSP_DATAPRES_RESPONSE_DATA) { 2100 if (rptr->response_data_length != 4) { 2101 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 2102 "Bad SAS RESPONSE DATA LENGTH", 2103 msg); 2104 pkt->pkt_reason = CMD_TRAN_ERR; 2105 goto out; 2106 } 2107 (void) memcpy(&sts, xd, sizeof (uint32_t)); 2108 sts = BE_32(sts); 2109 /* 2110 * The only response code we should legally get 2111 * here is an INVALID FRAME response code. 2112 */ 2113 if (sts == SAS_RSP_INVALID_FRAME) { 2114 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2115 "%s: pkt %p tgt %u path %s " 2116 "completed: INVALID FRAME response", 2117 __func__, (void *)pkt, 2118 xp->target_num, pptr->path); 2119 } else { 2120 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2121 "%s: pkt %p tgt %u path %s " 2122 "completed: illegal response 0x%x", 2123 __func__, (void *)pkt, 2124 xp->target_num, pptr->path, sts); 2125 } 2126 pkt->pkt_reason = CMD_TRAN_ERR; 2127 goto out; 2128 } 2129 if (rptr->datapres == SAS_RSP_DATAPRES_SENSE_DATA) { 2130 uint32_t slen; 2131 slen = rptr->sense_data_length; 2132 if (slen > lim) { 2133 slen = lim; 2134 } 2135 pmcs_latch_status(pwp, sp, rptr->status, xd, 2136 slen, pptr->path); 2137 } else if (rptr->datapres == SAS_RSP_DATAPRES_NO_DATA) { 2138 pmcout_ssp_comp_t *sspcp; 2139 sspcp = (pmcout_ssp_comp_t *)msg; 2140 uint32_t *residp; 2141 /* 2142 * This is the case for a plain SCSI status. 2143 * Note: If RESC_V is set and we're here, there 2144 * is a residual. We need to find it and update 2145 * the packet accordingly. 2146 */ 2147 pmcs_latch_status(pwp, sp, rptr->status, NULL, 2148 0, pptr->path); 2149 2150 if (sspcp->resc_v) { 2151 /* 2152 * Point residual to the SSP_RESP_IU 2153 */ 2154 residp = (uint32_t *)(sspcp + 1); 2155 /* 2156 * param contains the number of bytes 2157 * between where the SSP_RESP_IU may 2158 * or may not be and the residual. 2159 * Increment residp by the appropriate 2160 * number of words: (param+resc_pad)/4). 2161 */ 2162 residp += (LE_32(sspcp->param) + 2163 sspcp->resc_pad) / 2164 sizeof (uint32_t); 2165 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, 2166 pptr, xp, "%s: tgt 0x%p " 2167 "residual %d for pkt 0x%p", 2168 __func__, (void *) xp, *residp, 2169 (void *) pkt); 2170 ASSERT(LE_32(*residp) <= 2171 pkt->pkt_dma_len); 2172 (void) pmcs_set_resid(pkt, 2173 pkt->pkt_dma_len, LE_32(*residp)); 2174 } 2175 } else { 2176 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 2177 "illegal SAS response", msg); 2178 pkt->pkt_reason = CMD_TRAN_ERR; 2179 goto out; 2180 } 2181 } else { 2182 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2183 pptr->path); 2184 } 2185 if (pkt->pkt_dma_len) { 2186 pkt->pkt_state |= STATE_XFERRED_DATA; 2187 } 2188 } 2189 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2190 "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x", 2191 __func__, (void *)pkt, xp->target_num, pkt->pkt_reason, 2192 pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]); 2193 2194 if (pwrk->state == PMCS_WORK_STATE_ABORTED) { 2195 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2196 "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p", 2197 __func__, (void *)pkt, pptr->path, (void *)pwrk); 2198 aborted = B_TRUE; 2199 } 2200 2201 out: 2202 pmcs_dma_unload(pwp, sp); 2203 mutex_enter(&xp->statlock); 2204 2205 /* 2206 * If the device no longer has a PHY pointer, clear the PHY pointer 2207 * from the work structure before we free it. Otherwise, pmcs_pwork 2208 * may decrement the ref_count on a PHY that's been freed. 2209 */ 2210 if (xp->phy == NULL) { 2211 pwrk->phy = NULL; 2212 } 2213 2214 /* 2215 * We may arrive here due to a command timing out, which in turn 2216 * could be addressed in a different context. So, free the work 2217 * back, but only after confirming it's not already been freed 2218 * elsewhere. 2219 */ 2220 if (pwrk->htag != PMCS_TAG_FREE) { 2221 pmcs_pwork(pwp, pwrk); 2222 } 2223 2224 /* 2225 * If the device is gone, we only put this command on the completion 2226 * queue if the work structure is not marked dead. If it's marked 2227 * dead, it will already have been put there. 2228 */ 2229 if (xp->dev_gone) { 2230 mutex_exit(&xp->statlock); 2231 if (!dead) { 2232 mutex_enter(&xp->aqlock); 2233 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2234 mutex_exit(&xp->aqlock); 2235 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2236 "%s: Removing cmd 0x%p (htag 0x%x) from aq", 2237 __func__, (void *)sp, sp->cmd_tag); 2238 mutex_enter(&pwp->cq_lock); 2239 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2240 PMCS_CQ_RUN_LOCKED(pwp); 2241 mutex_exit(&pwp->cq_lock); 2242 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2243 "%s: Completing command for dead target 0x%p", 2244 __func__, (void *)xp); 2245 } 2246 return; 2247 } 2248 2249 ASSERT(xp->actv_cnt > 0); 2250 if (--(xp->actv_cnt) == 0) { 2251 if (xp->draining) { 2252 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, 2253 "%s: waking up drain waiters", __func__); 2254 cv_signal(&pwp->drain_cv); 2255 } 2256 } 2257 mutex_exit(&xp->statlock); 2258 2259 /* 2260 * If the status is other than OK, determine if it's something that 2261 * is worth re-attempting enumeration. If so, mark the PHY. 2262 */ 2263 if (sts != PMCOUT_STATUS_OK) { 2264 pmcs_status_disposition(pptr, sts); 2265 } 2266 2267 if (dead == 0) { 2268 #ifdef DEBUG 2269 pmcs_cmd_t *wp; 2270 mutex_enter(&xp->aqlock); 2271 STAILQ_FOREACH(wp, &xp->aq, cmd_next) { 2272 if (wp == sp) { 2273 break; 2274 } 2275 } 2276 ASSERT(wp != NULL); 2277 #else 2278 mutex_enter(&xp->aqlock); 2279 #endif 2280 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2281 "%s: Removing cmd 0x%p (htag 0x%x) from aq", __func__, 2282 (void *)sp, sp->cmd_tag); 2283 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2284 if (aborted) { 2285 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2286 "%s: Aborted cmd for tgt 0x%p, signaling waiters", 2287 __func__, (void *)xp); 2288 cv_signal(&xp->abort_cv); 2289 } 2290 mutex_exit(&xp->aqlock); 2291 } 2292 2293 /* 2294 * If do_ds_recovery is set, we need to initiate device state 2295 * recovery. In this case, we put this I/O back on the head of 2296 * the wait queue to run again after recovery is complete 2297 */ 2298 if (do_ds_recovery) { 2299 mutex_enter(&xp->statlock); 2300 pmcs_start_dev_state_recovery(xp, pptr); 2301 mutex_exit(&xp->statlock); 2302 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, "%s: Putting cmd 0x%p " 2303 "back on wq during recovery for tgt 0x%p", __func__, 2304 (void *)sp, (void *)xp); 2305 mutex_enter(&xp->wqlock); 2306 if (xp->wq_recovery_tail == NULL) { 2307 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2308 } else { 2309 /* 2310 * If there are other I/Os waiting at the head due to 2311 * device state recovery, add this one in the right spot 2312 * to maintain proper order. 2313 */ 2314 STAILQ_INSERT_AFTER(&xp->wq, xp->wq_recovery_tail, sp, 2315 cmd_next); 2316 } 2317 xp->wq_recovery_tail = sp; 2318 mutex_exit(&xp->wqlock); 2319 } else { 2320 /* 2321 * If we're not initiating device state recovery and this 2322 * command was not "dead", put it on the completion queue 2323 */ 2324 if (!dead) { 2325 mutex_enter(&pwp->cq_lock); 2326 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2327 PMCS_CQ_RUN_LOCKED(pwp); 2328 mutex_exit(&pwp->cq_lock); 2329 } 2330 } 2331 } 2332 2333 /* 2334 * Run a SATA command (normal reads and writes), 2335 * or block and schedule a SATL interpretation 2336 * of the command. 2337 * 2338 * Called with pwrk lock held, returns unlocked. 2339 */ 2340 2341 static int 2342 pmcs_SATA_run(pmcs_cmd_t *sp, pmcwork_t *pwrk) 2343 { 2344 pmcs_hw_t *pwp = CMD2PMC(sp); 2345 struct scsi_pkt *pkt = CMD2PKT(sp); 2346 pmcs_xscsi_t *xp; 2347 uint8_t cdb_base, asc, tag; 2348 uint32_t *ptr, iq, nblk, i, mtype; 2349 fis_t fis; 2350 size_t amt; 2351 uint64_t lba; 2352 2353 xp = pwrk->xp; 2354 ASSERT(xp != NULL); 2355 2356 /* 2357 * First, see if this is just a plain read/write command. 2358 * If not, we have to queue it up for processing, block 2359 * any additional commands from coming in, and wake up 2360 * the thread that will process this command. 2361 */ 2362 cdb_base = pkt->pkt_cdbp[0] & 0x1f; 2363 if (cdb_base != SCMD_READ && cdb_base != SCMD_WRITE) { 2364 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, NULL, 2365 "%s: special SATA cmd %p", __func__, (void *)sp); 2366 2367 ASSERT(xp->phy != NULL); 2368 pmcs_pwork(pwp, pwrk); 2369 pmcs_lock_phy(xp->phy); 2370 mutex_enter(&xp->statlock); 2371 xp->special_needed = 1; /* Set the special_needed flag */ 2372 STAILQ_INSERT_TAIL(&xp->sq, sp, cmd_next); 2373 if (pmcs_run_sata_special(pwp, xp)) { 2374 SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN); 2375 } 2376 mutex_exit(&xp->statlock); 2377 pmcs_unlock_phy(xp->phy); 2378 2379 return (PMCS_WQ_RUN_SUCCESS); 2380 } 2381 2382 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: regular cmd", __func__); 2383 2384 mutex_enter(&xp->statlock); 2385 if (!xp->assigned) { 2386 mutex_exit(&xp->statlock); 2387 return (PMCS_WQ_RUN_FAIL_OTHER); 2388 } 2389 if (xp->special_running || xp->special_needed || xp->recover_wait) { 2390 mutex_exit(&xp->statlock); 2391 mutex_enter(&xp->wqlock); 2392 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2393 mutex_exit(&xp->wqlock); 2394 /* 2395 * By the time we get here the special 2396 * commands running or waiting to be run 2397 * may have come and gone, so kick our 2398 * worker to run the waiting queues 2399 * just in case. 2400 */ 2401 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 2402 return (PMCS_WQ_RUN_FAIL_OTHER); 2403 } 2404 lba = xp->capacity; 2405 mutex_exit(&xp->statlock); 2406 2407 /* 2408 * Extract data length and lba parameters out of the command. The 2409 * function pmcs_SATA_rwparm returns a non-zero ASC value if the CDB 2410 * values are considered illegal. 2411 */ 2412 asc = pmcs_SATA_rwparm(pkt->pkt_cdbp, &nblk, &lba, lba); 2413 if (asc) { 2414 uint8_t sns[18]; 2415 bzero(sns, sizeof (sns)); 2416 sns[0] = 0xf0; 2417 sns[2] = 0x5; 2418 sns[12] = asc; 2419 pmcs_latch_status(pwp, sp, STATUS_CHECK, sns, sizeof (sns), 2420 pwrk->phy->path); 2421 pmcs_pwork(pwp, pwrk); 2422 mutex_enter(&pwp->cq_lock); 2423 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2424 PMCS_CQ_RUN_LOCKED(pwp); 2425 mutex_exit(&pwp->cq_lock); 2426 return (PMCS_WQ_RUN_SUCCESS); 2427 } 2428 2429 /* 2430 * If the command decodes as not moving any data, complete it here. 2431 */ 2432 amt = nblk; 2433 amt <<= 9; 2434 amt = pmcs_set_resid(pkt, amt, nblk << 9); 2435 if (amt == 0) { 2436 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2437 pwrk->phy->path); 2438 pmcs_pwork(pwp, pwrk); 2439 mutex_enter(&pwp->cq_lock); 2440 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2441 PMCS_CQ_RUN_LOCKED(pwp); 2442 mutex_exit(&pwp->cq_lock); 2443 return (PMCS_WQ_RUN_SUCCESS); 2444 } 2445 2446 /* 2447 * Get an inbound queue entry for this I/O 2448 */ 2449 GET_IO_IQ_ENTRY(pwp, ptr, xp->phy->device_id, iq); 2450 if (ptr == NULL) { 2451 /* 2452 * This is a temporary failure not likely to unblocked by 2453 * commands completing as the test for scheduling the 2454 * restart of work is a per-device test. 2455 */ 2456 mutex_enter(&xp->wqlock); 2457 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2458 mutex_exit(&xp->wqlock); 2459 pmcs_dma_unload(pwp, sp); 2460 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 2461 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 2462 "%s: Failed to get IO IQ entry for tgt %d", 2463 __func__, xp->target_num); 2464 return (PMCS_WQ_RUN_FAIL_RES); 2465 } 2466 2467 /* 2468 * Get a tag. At this point, hold statlock until the tagmap is 2469 * updated (just prior to sending the cmd to the hardware). 2470 */ 2471 mutex_enter(&xp->statlock); 2472 for (tag = 0; tag < xp->qdepth; tag++) { 2473 if ((xp->tagmap & (1 << tag)) == 0) { 2474 break; 2475 } 2476 } 2477 2478 if (tag == xp->qdepth) { 2479 mutex_exit(&xp->statlock); 2480 mutex_exit(&pwp->iqp_lock[iq]); 2481 mutex_enter(&xp->wqlock); 2482 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2483 mutex_exit(&xp->wqlock); 2484 return (PMCS_WQ_RUN_FAIL_OTHER); 2485 } 2486 2487 sp->cmd_satltag = (uint8_t)tag; 2488 2489 /* 2490 * Set up the command 2491 */ 2492 bzero(fis, sizeof (fis)); 2493 ptr[0] = 2494 LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SATA_HOST_IO_START)); 2495 ptr[1] = LE_32(pwrk->htag); 2496 ptr[2] = LE_32(pwrk->phy->device_id); 2497 ptr[3] = LE_32(amt); 2498 2499 if (xp->ncq) { 2500 mtype = SATA_PROTOCOL_FPDMA | (tag << 16); 2501 fis[0] = ((nblk & 0xff) << 24) | (C_BIT << 8) | FIS_REG_H2DEV; 2502 if (cdb_base == SCMD_READ) { 2503 fis[0] |= (READ_FPDMA_QUEUED << 16); 2504 } else { 2505 fis[0] |= (WRITE_FPDMA_QUEUED << 16); 2506 } 2507 fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff); 2508 fis[2] = ((nblk & 0xff00) << 16) | ((lba >> 24) & 0xffffff); 2509 fis[3] = tag << 3; 2510 } else { 2511 int op; 2512 fis[0] = (C_BIT << 8) | FIS_REG_H2DEV; 2513 if (xp->pio) { 2514 mtype = SATA_PROTOCOL_PIO; 2515 if (cdb_base == SCMD_READ) { 2516 op = READ_SECTORS_EXT; 2517 } else { 2518 op = WRITE_SECTORS_EXT; 2519 } 2520 } else { 2521 mtype = SATA_PROTOCOL_DMA; 2522 if (cdb_base == SCMD_READ) { 2523 op = READ_DMA_EXT; 2524 } else { 2525 op = WRITE_DMA_EXT; 2526 } 2527 } 2528 fis[0] |= (op << 16); 2529 fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff); 2530 fis[2] = (lba >> 24) & 0xffffff; 2531 fis[3] = nblk; 2532 } 2533 2534 if (cdb_base == SCMD_READ) { 2535 ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_INI); 2536 } else { 2537 ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_DEV); 2538 } 2539 #ifdef DEBUG 2540 /* 2541 * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED 2542 * event when this goes out on the wire. 2543 */ 2544 ptr[4] |= PMCIN_MESSAGE_REPORT; 2545 #endif 2546 for (i = 0; i < (sizeof (fis_t))/(sizeof (uint32_t)); i++) { 2547 ptr[i+5] = LE_32(fis[i]); 2548 } 2549 if (pmcs_dma_load(pwp, sp, ptr)) { 2550 mutex_exit(&xp->statlock); 2551 mutex_exit(&pwp->iqp_lock[iq]); 2552 mutex_enter(&xp->wqlock); 2553 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2554 mutex_exit(&xp->wqlock); 2555 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 2556 "%s: Failed to dma_load for tgt %d", 2557 __func__, xp->target_num); 2558 return (PMCS_WQ_RUN_FAIL_RES); 2559 2560 } 2561 2562 pwrk->state = PMCS_WORK_STATE_ONCHIP; 2563 mutex_exit(&pwrk->lock); 2564 xp->tagmap |= (1 << tag); 2565 xp->actv_cnt++; 2566 if (xp->actv_cnt > xp->maxdepth) { 2567 xp->maxdepth = xp->actv_cnt; 2568 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pwrk->phy, xp, 2569 "%s: max depth now %u", pwrk->phy->path, xp->maxdepth); 2570 } 2571 mutex_exit(&xp->statlock); 2572 mutex_enter(&xp->aqlock); 2573 STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next); 2574 mutex_exit(&xp->aqlock); 2575 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 2576 "%s: giving pkt %p to hardware", __func__, (void *)pkt); 2577 #ifdef DEBUG 2578 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SATA INI Message", ptr); 2579 #endif 2580 INC_IQ_ENTRY(pwp, iq); 2581 2582 return (PMCS_WQ_RUN_SUCCESS); 2583 } 2584 2585 /* 2586 * Complete a SATA command. Called with pwrk lock held. 2587 */ 2588 void 2589 pmcs_SATA_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg) 2590 { 2591 pmcs_cmd_t *sp = pwrk->arg; 2592 struct scsi_pkt *pkt = CMD2PKT(sp); 2593 pmcs_phy_t *pptr = pwrk->phy; 2594 int dead; 2595 uint32_t sts; 2596 pmcs_xscsi_t *xp; 2597 boolean_t aborted = B_FALSE; 2598 2599 xp = pwrk->xp; 2600 ASSERT(xp != NULL); 2601 2602 DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int, 2603 (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start, 2604 hrtime_t, gethrtime()); 2605 2606 dead = pwrk->dead; 2607 2608 if (msg) { 2609 sts = LE_32(msg[2]); 2610 } else { 2611 sts = 0; 2612 } 2613 2614 if (dead != 0) { 2615 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: dead cmd tag " 2616 "0x%x for %s", __func__, pwrk->htag, pptr->path); 2617 goto out; 2618 } 2619 if ((pwrk->state == PMCS_WORK_STATE_TIMED_OUT) && 2620 (sts != PMCOUT_STATUS_ABORTED)) { 2621 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2622 "%s: cmd 0x%p (tag 0x%x) timed out for %s", 2623 __func__, (void *)sp, pwrk->htag, pptr->path); 2624 CMD2PKT(sp)->pkt_scbp[0] = STATUS_GOOD; 2625 /* pkt_reason already set to CMD_TIMEOUT */ 2626 ASSERT(CMD2PKT(sp)->pkt_reason == CMD_TIMEOUT); 2627 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 2628 STATE_SENT_CMD; 2629 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 2630 goto out; 2631 } 2632 2633 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, "%s: pkt %p tgt %u done", 2634 __func__, (void *)pkt, xp->target_num); 2635 2636 /* 2637 * If the status isn't okay but not underflow, 2638 * step to the side and parse the (possible) error. 2639 */ 2640 #ifdef DEBUG 2641 if (msg) { 2642 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg); 2643 } 2644 #endif 2645 if (!msg) { 2646 goto out; 2647 } 2648 2649 /* 2650 * If the status isn't okay or we got a FIS response of some kind, 2651 * step to the side and parse the (possible) error. 2652 */ 2653 if ((sts != PMCOUT_STATUS_OK) || (LE_32(msg[3]) != 0)) { 2654 if (sts == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) { 2655 mutex_exit(&pwrk->lock); 2656 pmcs_lock_phy(pptr); 2657 mutex_enter(&xp->statlock); 2658 if ((xp->resetting == 0) && (xp->reset_success != 0) && 2659 (xp->reset_wait == 0)) { 2660 mutex_exit(&xp->statlock); 2661 if (pmcs_reset_phy(pwp, pptr, 2662 PMCS_PHYOP_LINK_RESET) != 0) { 2663 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2664 "%s: PHY (%s) Local Control/Link " 2665 "Reset FAILED as part of error " 2666 "recovery", __func__, pptr->path); 2667 } 2668 mutex_enter(&xp->statlock); 2669 } 2670 mutex_exit(&xp->statlock); 2671 pmcs_unlock_phy(pptr); 2672 mutex_enter(&pwrk->lock); 2673 } 2674 pmcs_ioerror(pwp, SATA, pwrk, msg, sts); 2675 } else { 2676 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2677 pwrk->phy->path); 2678 pkt->pkt_state |= STATE_XFERRED_DATA; 2679 pkt->pkt_resid = 0; 2680 } 2681 2682 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2683 "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x", 2684 __func__, (void *)pkt, xp->target_num, pkt->pkt_reason, 2685 pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]); 2686 2687 if (pwrk->state == PMCS_WORK_STATE_ABORTED) { 2688 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2689 "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p", 2690 __func__, (void *)pkt, pptr->path, (void *)pwrk); 2691 aborted = B_TRUE; 2692 } 2693 2694 out: 2695 pmcs_dma_unload(pwp, sp); 2696 mutex_enter(&xp->statlock); 2697 xp->tagmap &= ~(1 << sp->cmd_satltag); 2698 2699 /* 2700 * If the device no longer has a PHY pointer, clear the PHY pointer 2701 * from the work structure before we free it. Otherwise, pmcs_pwork 2702 * may decrement the ref_count on a PHY that's been freed. 2703 */ 2704 if (xp->phy == NULL) { 2705 pwrk->phy = NULL; 2706 } 2707 2708 /* 2709 * We may arrive here due to a command timing out, which in turn 2710 * could be addressed in a different context. So, free the work 2711 * back, but only after confirming it's not already been freed 2712 * elsewhere. 2713 */ 2714 if (pwrk->htag != PMCS_TAG_FREE) { 2715 pmcs_pwork(pwp, pwrk); 2716 } 2717 2718 if (xp->dev_gone) { 2719 mutex_exit(&xp->statlock); 2720 if (!dead) { 2721 mutex_enter(&xp->aqlock); 2722 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2723 mutex_exit(&xp->aqlock); 2724 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2725 "%s: Removing cmd 0x%p (htag 0x%x) from aq", 2726 __func__, (void *)sp, sp->cmd_tag); 2727 mutex_enter(&pwp->cq_lock); 2728 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2729 PMCS_CQ_RUN_LOCKED(pwp); 2730 mutex_exit(&pwp->cq_lock); 2731 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2732 "%s: Completing command for dead target 0x%p", 2733 __func__, (void *)xp); 2734 } 2735 return; 2736 } 2737 2738 ASSERT(xp->actv_cnt > 0); 2739 if (--(xp->actv_cnt) == 0) { 2740 if (xp->draining) { 2741 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, 2742 "%s: waking up drain waiters", __func__); 2743 cv_signal(&pwp->drain_cv); 2744 } else if (xp->special_needed) { 2745 SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN); 2746 } 2747 } 2748 mutex_exit(&xp->statlock); 2749 2750 /* 2751 * If the status is other than OK, determine if it's something that 2752 * is worth re-attempting enumeration. If so, mark the PHY. 2753 */ 2754 if (sts != PMCOUT_STATUS_OK) { 2755 pmcs_status_disposition(pptr, sts); 2756 } 2757 2758 if (dead == 0) { 2759 #ifdef DEBUG 2760 pmcs_cmd_t *wp; 2761 mutex_enter(&xp->aqlock); 2762 STAILQ_FOREACH(wp, &xp->aq, cmd_next) { 2763 if (wp == sp) { 2764 break; 2765 } 2766 } 2767 ASSERT(wp != NULL); 2768 #else 2769 mutex_enter(&xp->aqlock); 2770 #endif 2771 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2772 if (aborted) { 2773 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2774 "%s: Aborted cmd for tgt 0x%p, signaling waiters", 2775 __func__, (void *)xp); 2776 cv_signal(&xp->abort_cv); 2777 } 2778 mutex_exit(&xp->aqlock); 2779 mutex_enter(&pwp->cq_lock); 2780 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2781 PMCS_CQ_RUN_LOCKED(pwp); 2782 mutex_exit(&pwp->cq_lock); 2783 } 2784 } 2785 2786 static uint8_t 2787 pmcs_SATA_rwparm(uint8_t *cdb, uint32_t *xfr, uint64_t *lba, uint64_t lbamax) 2788 { 2789 uint8_t asc = 0; 2790 switch (cdb[0]) { 2791 case SCMD_READ_G5: 2792 case SCMD_WRITE_G5: 2793 *xfr = 2794 (((uint32_t)cdb[10]) << 24) | 2795 (((uint32_t)cdb[11]) << 16) | 2796 (((uint32_t)cdb[12]) << 8) | 2797 ((uint32_t)cdb[13]); 2798 *lba = 2799 (((uint64_t)cdb[2]) << 56) | 2800 (((uint64_t)cdb[3]) << 48) | 2801 (((uint64_t)cdb[4]) << 40) | 2802 (((uint64_t)cdb[5]) << 32) | 2803 (((uint64_t)cdb[6]) << 24) | 2804 (((uint64_t)cdb[7]) << 16) | 2805 (((uint64_t)cdb[8]) << 8) | 2806 ((uint64_t)cdb[9]); 2807 /* Check for illegal bits */ 2808 if (cdb[15]) { 2809 asc = 0x24; /* invalid field in cdb */ 2810 } 2811 break; 2812 case SCMD_READ_G4: 2813 case SCMD_WRITE_G4: 2814 *xfr = 2815 (((uint32_t)cdb[6]) << 16) | 2816 (((uint32_t)cdb[7]) << 8) | 2817 ((uint32_t)cdb[8]); 2818 *lba = 2819 (((uint32_t)cdb[2]) << 24) | 2820 (((uint32_t)cdb[3]) << 16) | 2821 (((uint32_t)cdb[4]) << 8) | 2822 ((uint32_t)cdb[5]); 2823 /* Check for illegal bits */ 2824 if (cdb[11]) { 2825 asc = 0x24; /* invalid field in cdb */ 2826 } 2827 break; 2828 case SCMD_READ_G1: 2829 case SCMD_WRITE_G1: 2830 *xfr = (((uint32_t)cdb[7]) << 8) | ((uint32_t)cdb[8]); 2831 *lba = 2832 (((uint32_t)cdb[2]) << 24) | 2833 (((uint32_t)cdb[3]) << 16) | 2834 (((uint32_t)cdb[4]) << 8) | 2835 ((uint32_t)cdb[5]); 2836 /* Check for illegal bits */ 2837 if (cdb[9]) { 2838 asc = 0x24; /* invalid field in cdb */ 2839 } 2840 break; 2841 case SCMD_READ: 2842 case SCMD_WRITE: 2843 *xfr = cdb[4]; 2844 if (*xfr == 0) { 2845 *xfr = 256; 2846 } 2847 *lba = 2848 (((uint32_t)cdb[1] & 0x1f) << 16) | 2849 (((uint32_t)cdb[2]) << 8) | 2850 ((uint32_t)cdb[3]); 2851 /* Check for illegal bits */ 2852 if (cdb[5]) { 2853 asc = 0x24; /* invalid field in cdb */ 2854 } 2855 break; 2856 } 2857 2858 if (asc == 0) { 2859 if ((*lba + *xfr) > lbamax) { 2860 asc = 0x21; /* logical block out of range */ 2861 } 2862 } 2863 return (asc); 2864 } 2865 2866 /* 2867 * Called with pwrk lock held. 2868 */ 2869 static void 2870 pmcs_ioerror(pmcs_hw_t *pwp, pmcs_dtype_t t, pmcwork_t *pwrk, uint32_t *w, 2871 uint32_t status) 2872 { 2873 static uint8_t por[] = { 2874 0xf0, 0x0, 0x6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x28 2875 }; 2876 static uint8_t parity[] = { 2877 0xf0, 0x0, 0xb, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x47, 5 2878 }; 2879 const char *msg; 2880 char buf[20]; 2881 pmcs_cmd_t *sp = pwrk->arg; 2882 pmcs_phy_t *phyp = pwrk->phy; 2883 struct scsi_pkt *pkt = CMD2PKT(sp); 2884 uint32_t resid; 2885 2886 ASSERT(w != NULL); 2887 resid = LE_32(w[3]); 2888 2889 msg = pmcs_status_str(status); 2890 if (msg == NULL) { 2891 (void) snprintf(buf, sizeof (buf), "Error 0x%x", status); 2892 msg = buf; 2893 } 2894 2895 if (status != PMCOUT_STATUS_OK) { 2896 pmcs_prt(pwp, PMCS_PRT_DEBUG1, phyp, NULL, 2897 "%s: device %s tag 0x%x status %s @ %llu", __func__, 2898 phyp->path, pwrk->htag, msg, 2899 (unsigned long long)gethrtime()); 2900 } 2901 2902 pkt->pkt_reason = CMD_CMPLT; /* default reason */ 2903 2904 switch (status) { 2905 case PMCOUT_STATUS_OK: 2906 if (t == SATA) { 2907 int i; 2908 fis_t fis; 2909 for (i = 0; i < sizeof (fis) / sizeof (fis[0]); i++) { 2910 fis[i] = LE_32(w[4+i]); 2911 } 2912 if ((fis[0] & 0xff) != FIS_REG_D2H) { 2913 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2914 "unexpected fis code 0x%x", fis[0] & 0xff); 2915 } else { 2916 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2917 "FIS ERROR"); 2918 pmcs_fis_dump(pwp, fis); 2919 } 2920 pkt->pkt_reason = CMD_TRAN_ERR; 2921 break; 2922 } 2923 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path); 2924 break; 2925 2926 case PMCOUT_STATUS_ABORTED: 2927 /* 2928 * Command successfully aborted. 2929 */ 2930 if (phyp->dead) { 2931 pkt->pkt_reason = CMD_DEV_GONE; 2932 pkt->pkt_state = STATE_GOT_BUS; 2933 } else if (pwrk->ssp_event != 0) { 2934 pkt->pkt_reason = CMD_TRAN_ERR; 2935 pkt->pkt_state = STATE_GOT_BUS; 2936 } else if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) { 2937 pkt->pkt_reason = CMD_TIMEOUT; 2938 pkt->pkt_statistics |= STAT_TIMEOUT; 2939 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 2940 STATE_SENT_CMD; 2941 } else { 2942 pkt->pkt_reason = CMD_ABORTED; 2943 pkt->pkt_statistics |= STAT_ABORTED; 2944 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 2945 STATE_SENT_CMD; 2946 } 2947 2948 /* 2949 * PMCS_WORK_STATE_TIMED_OUT doesn't need to be preserved past 2950 * this point, so go ahead and mark it as aborted. 2951 */ 2952 pwrk->state = PMCS_WORK_STATE_ABORTED; 2953 break; 2954 2955 case PMCOUT_STATUS_UNDERFLOW: 2956 /* 2957 * This will only get called for SATA 2958 */ 2959 pkt->pkt_resid = resid; 2960 if (pkt->pkt_dma_len < pkt->pkt_resid) { 2961 (void) pmcs_set_resid(pkt, pkt->pkt_dma_len, resid); 2962 } 2963 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path); 2964 break; 2965 2966 case PMCOUT_STATUS_NO_DEVICE: 2967 case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT: 2968 pkt->pkt_reason = CMD_DEV_GONE; 2969 break; 2970 2971 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION: 2972 /* 2973 * Need to do rediscovery. We probably have 2974 * the wrong device (disk swap), so kill 2975 * this one. 2976 */ 2977 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED: 2978 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION: 2979 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 2980 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR: 2981 /* 2982 * Need to do rediscovery. 2983 */ 2984 if (!phyp->dead) { 2985 mutex_exit(&pwrk->lock); 2986 pmcs_lock_phy(pwrk->phy); 2987 pmcs_kill_changed(pwp, pwrk->phy, 0); 2988 pmcs_unlock_phy(pwrk->phy); 2989 mutex_enter(&pwrk->lock); 2990 pkt->pkt_reason = CMD_INCOMPLETE; 2991 pkt->pkt_state = STATE_GOT_BUS; 2992 } else { 2993 pkt->pkt_reason = CMD_DEV_GONE; 2994 } 2995 break; 2996 2997 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK: 2998 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2999 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION: 3000 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED: 3001 /* cmd is pending on the target */ 3002 case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH: 3003 case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE: 3004 /* transitory - commands sent while in NCQ failure mode */ 3005 case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE: 3006 /* NCQ failure */ 3007 case PMCOUT_STATUS_IO_PORT_IN_RESET: 3008 case PMCOUT_STATUS_XFER_ERR_BREAK: 3009 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 3010 pkt->pkt_reason = CMD_INCOMPLETE; 3011 pkt->pkt_state = STATE_GOT_BUS; 3012 break; 3013 3014 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 3015 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 3016 "STATUS_BUSY for htag 0x%08x", sp->cmd_tag); 3017 pmcs_latch_status(pwp, sp, STATUS_BUSY, NULL, 0, phyp->path); 3018 break; 3019 3020 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 3021 /* synthesize a RESERVATION CONFLICT */ 3022 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 3023 "%s: Potential affiliation active on 0x%" PRIx64, __func__, 3024 pmcs_barray2wwn(phyp->sas_address)); 3025 pmcs_latch_status(pwp, sp, STATUS_RESERVATION_CONFLICT, NULL, 3026 0, phyp->path); 3027 break; 3028 3029 case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST: 3030 /* synthesize a power-on/reset */ 3031 pmcs_latch_status(pwp, sp, STATUS_CHECK, por, sizeof (por), 3032 phyp->path); 3033 break; 3034 3035 case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE: 3036 case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN: 3037 case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED: 3038 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: 3039 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK: 3040 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK: 3041 /* synthesize a PARITY ERROR */ 3042 pmcs_latch_status(pwp, sp, STATUS_CHECK, parity, 3043 sizeof (parity), phyp->path); 3044 break; 3045 3046 case PMCOUT_STATUS_IO_XFER_ERROR_DMA: 3047 case PMCOUT_STATUS_IO_NOT_VALID: 3048 case PMCOUT_STATUS_PROG_ERROR: 3049 case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED: 3050 case PMCOUT_STATUS_XFER_ERROR_SATA: /* non-NCQ failure */ 3051 default: 3052 pkt->pkt_reason = CMD_TRAN_ERR; 3053 break; 3054 } 3055 } 3056 3057 /* 3058 * Latch up SCSI status 3059 */ 3060 3061 void 3062 pmcs_latch_status(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint8_t status, 3063 uint8_t *snsp, size_t snslen, char *path) 3064 { 3065 static const char c1[] = 3066 "%s: Status Byte 0x%02x for CDB0=0x%02x (%02x %02x %02x) " 3067 "HTAG 0x%x @ %llu"; 3068 static const char c2[] = 3069 "%s: Status Byte 0x%02x for CDB0=0x%02x HTAG 0x%x @ %llu"; 3070 3071 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 3072 STATE_SENT_CMD | STATE_GOT_STATUS; 3073 CMD2PKT(sp)->pkt_scbp[0] = status; 3074 3075 if (status == STATUS_CHECK && snsp && 3076 (size_t)SCSA_STSLEN(sp) >= sizeof (struct scsi_arq_status)) { 3077 struct scsi_arq_status *aqp = 3078 (void *) CMD2PKT(sp)->pkt_scbp; 3079 size_t amt = sizeof (struct scsi_extended_sense); 3080 uint8_t key = scsi_sense_key(snsp); 3081 uint8_t asc = scsi_sense_asc(snsp); 3082 uint8_t ascq = scsi_sense_ascq(snsp); 3083 if (amt > snslen) { 3084 amt = snslen; 3085 } 3086 pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, NULL, NULL, c1, path, 3087 status, CMD2PKT(sp)->pkt_cdbp[0] & 0xff, key, asc, ascq, 3088 sp->cmd_tag, (unsigned long long)gethrtime()); 3089 CMD2PKT(sp)->pkt_state |= STATE_ARQ_DONE; 3090 (*(uint8_t *)&aqp->sts_rqpkt_status) = STATUS_GOOD; 3091 aqp->sts_rqpkt_statistics = 0; 3092 aqp->sts_rqpkt_reason = CMD_CMPLT; 3093 aqp->sts_rqpkt_state = STATE_GOT_BUS | 3094 STATE_GOT_TARGET | STATE_SENT_CMD | 3095 STATE_XFERRED_DATA | STATE_GOT_STATUS; 3096 (void) memcpy(&aqp->sts_sensedata, snsp, amt); 3097 if (aqp->sts_sensedata.es_class != CLASS_EXTENDED_SENSE) { 3098 aqp->sts_rqpkt_reason = CMD_TRAN_ERR; 3099 aqp->sts_rqpkt_state = 0; 3100 aqp->sts_rqpkt_resid = 3101 sizeof (struct scsi_extended_sense); 3102 } else { 3103 aqp->sts_rqpkt_resid = 3104 sizeof (struct scsi_extended_sense) - amt; 3105 } 3106 } else if (status) { 3107 pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, NULL, NULL, c2, 3108 path, status, CMD2PKT(sp)->pkt_cdbp[0] & 0xff, 3109 sp->cmd_tag, (unsigned long long)gethrtime()); 3110 } 3111 3112 CMD2PKT(sp)->pkt_reason = CMD_CMPLT; 3113 } 3114 3115 /* 3116 * Calculate and set packet residual and return the amount 3117 * left over after applying various filters. 3118 */ 3119 size_t 3120 pmcs_set_resid(struct scsi_pkt *pkt, size_t amt, uint32_t cdbamt) 3121 { 3122 pkt->pkt_resid = cdbamt; 3123 if (amt > pkt->pkt_resid) { 3124 amt = pkt->pkt_resid; 3125 } 3126 if (amt > pkt->pkt_dma_len) { 3127 amt = pkt->pkt_dma_len; 3128 } 3129 return (amt); 3130 } 3131 3132 /* 3133 * Return the existing target softstate (unlocked) if there is one. If so, 3134 * the PHY is locked and that lock must be freed by the caller after the 3135 * target/PHY linkage is established. If there isn't one, and alloc_tgt is 3136 * TRUE, then allocate one. 3137 */ 3138 pmcs_xscsi_t * 3139 pmcs_get_target(pmcs_iport_t *iport, char *tgt_port, boolean_t alloc_tgt) 3140 { 3141 pmcs_hw_t *pwp = iport->pwp; 3142 pmcs_phy_t *phyp; 3143 pmcs_xscsi_t *tgt; 3144 uint64_t wwn; 3145 char unit_address[PMCS_MAX_UA_SIZE]; 3146 int ua_form = 1; 3147 3148 /* 3149 * Find the PHY for this target 3150 */ 3151 phyp = pmcs_find_phy_by_sas_address(pwp, iport, NULL, tgt_port); 3152 if (phyp == NULL) { 3153 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 3154 "%s: No PHY for target @ %s", __func__, tgt_port); 3155 return (NULL); 3156 } 3157 3158 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port); 3159 3160 if (tgt) { 3161 mutex_enter(&tgt->statlock); 3162 /* 3163 * There's already a target. Check its PHY pointer to see 3164 * if we need to clear the old linkages 3165 */ 3166 if (tgt->phy && (tgt->phy != phyp)) { 3167 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3168 "%s: Target PHY updated from %p to %p", __func__, 3169 (void *)tgt->phy, (void *)phyp); 3170 if (!IS_ROOT_PHY(tgt->phy)) { 3171 pmcs_dec_phy_ref_count(tgt->phy); 3172 pmcs_inc_phy_ref_count(phyp); 3173 } 3174 tgt->phy->target = NULL; 3175 } 3176 3177 /* 3178 * If this target has no PHY pointer and alloc_tgt is FALSE, 3179 * that implies we expect the target to already exist. This 3180 * implies that there has already been a tran_tgt_init on at 3181 * least one LU. 3182 */ 3183 if ((tgt->phy == NULL) && !alloc_tgt) { 3184 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, tgt, 3185 "%s: Establish linkage from new PHY to old target @" 3186 "%s", __func__, tgt->unit_address); 3187 for (int idx = 0; idx < tgt->ref_count; idx++) { 3188 pmcs_inc_phy_ref_count(phyp); 3189 } 3190 } 3191 3192 tgt->phy = phyp; 3193 phyp->target = tgt; 3194 3195 mutex_exit(&tgt->statlock); 3196 return (tgt); 3197 } 3198 3199 /* 3200 * Make sure the PHY we found is on the correct iport 3201 */ 3202 if (phyp->iport != iport) { 3203 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 3204 "%s: No target at %s on this iport", __func__, tgt_port); 3205 pmcs_unlock_phy(phyp); 3206 return (NULL); 3207 } 3208 3209 /* 3210 * If this was just a lookup (i.e. alloc_tgt is false), return now. 3211 */ 3212 if (alloc_tgt == B_FALSE) { 3213 pmcs_unlock_phy(phyp); 3214 return (NULL); 3215 } 3216 3217 /* 3218 * Allocate the new softstate 3219 */ 3220 wwn = pmcs_barray2wwn(phyp->sas_address); 3221 (void) scsi_wwn_to_wwnstr(wwn, ua_form, unit_address); 3222 3223 if (ddi_soft_state_bystr_zalloc(iport->tgt_sstate, unit_address) != 3224 DDI_SUCCESS) { 3225 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3226 "%s: Couldn't alloc softstate for device at %s", 3227 __func__, unit_address); 3228 pmcs_unlock_phy(phyp); 3229 return (NULL); 3230 } 3231 3232 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, unit_address); 3233 ASSERT(tgt != NULL); 3234 STAILQ_INIT(&tgt->wq); 3235 STAILQ_INIT(&tgt->aq); 3236 STAILQ_INIT(&tgt->sq); 3237 mutex_init(&tgt->statlock, NULL, MUTEX_DRIVER, 3238 DDI_INTR_PRI(pwp->intr_pri)); 3239 mutex_init(&tgt->wqlock, NULL, MUTEX_DRIVER, 3240 DDI_INTR_PRI(pwp->intr_pri)); 3241 mutex_init(&tgt->aqlock, NULL, MUTEX_DRIVER, 3242 DDI_INTR_PRI(pwp->intr_pri)); 3243 cv_init(&tgt->reset_cv, NULL, CV_DRIVER, NULL); 3244 cv_init(&tgt->abort_cv, NULL, CV_DRIVER, NULL); 3245 list_create(&tgt->lun_list, sizeof (pmcs_lun_t), 3246 offsetof(pmcs_lun_t, lun_list_next)); 3247 tgt->qdepth = 1; 3248 tgt->target_num = PMCS_INVALID_TARGET_NUM; 3249 bcopy(unit_address, tgt->unit_address, PMCS_MAX_UA_SIZE); 3250 tgt->pwp = pwp; 3251 tgt->ua = strdup(iport->ua); 3252 tgt->phy = phyp; 3253 ASSERT((phyp->target == NULL) || (phyp->target == tgt)); 3254 if (phyp->target == NULL) { 3255 phyp->target = tgt; 3256 } 3257 3258 /* 3259 * Don't allocate LUN softstate for SMP targets 3260 */ 3261 if (phyp->dtype == EXPANDER) { 3262 return (tgt); 3263 } 3264 3265 if (ddi_soft_state_bystr_init(&tgt->lun_sstate, 3266 sizeof (pmcs_lun_t), PMCS_LUN_SSTATE_SZ) != 0) { 3267 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3268 "%s: LUN soft_state_bystr_init failed", __func__); 3269 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port); 3270 pmcs_unlock_phy(phyp); 3271 return (NULL); 3272 } 3273 3274 return (tgt); 3275 } 3276