1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* Copyright 2008 QLogic Corporation */ 23 24 /* 25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29 #pragma ident "Copyright 2008 QLogic Corporation; ql_isr.c" 30 31 /* 32 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file. 33 * 34 * *********************************************************************** 35 * * ** 36 * * NOTICE ** 37 * * COPYRIGHT (C) 1996-2008 QLOGIC CORPORATION ** 38 * * ALL RIGHTS RESERVED ** 39 * * ** 40 * *********************************************************************** 41 * 42 */ 43 44 #include <ql_apps.h> 45 #include <ql_api.h> 46 #include <ql_debug.h> 47 #include <ql_iocb.h> 48 #include <ql_isr.h> 49 #include <ql_init.h> 50 #include <ql_mbx.h> 51 #include <ql_xioctl.h> 52 53 /* 54 * Local Function Prototypes. 55 */ 56 static void ql_spurious_intr(ql_adapter_state_t *, int); 57 static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint32_t *, 58 uint32_t *, int); 59 static void ql_async_event(ql_adapter_state_t *, uint32_t, ql_head_t *, 60 uint32_t *, uint32_t *, int); 61 static void ql_fast_fcp_post(ql_srb_t *); 62 static void ql_response_pkt(ql_adapter_state_t *, ql_head_t *, uint32_t *, 63 uint32_t *, int); 64 static void ql_error_entry(ql_adapter_state_t *, response_t *, ql_head_t *, 65 uint32_t *, uint32_t *); 66 static int ql_status_entry(ql_adapter_state_t *, sts_entry_t *, ql_head_t *, 67 uint32_t *, uint32_t *); 68 static int ql_24xx_status_entry(ql_adapter_state_t *, sts_24xx_entry_t *, 69 ql_head_t *, uint32_t *, uint32_t *); 70 static int ql_status_error(ql_adapter_state_t *, ql_srb_t *, sts_entry_t *, 71 ql_head_t *, uint32_t *, uint32_t *); 72 static void ql_status_cont_entry(ql_adapter_state_t *, sts_cont_entry_t *, 73 ql_head_t *, uint32_t *, uint32_t *); 74 static void ql_immediate_notify_entry(ql_adapter_state_t *, 75 immediate_notify_entry_t *, ql_head_t *, uint32_t *, uint32_t *); 76 static void ql_notify_acknowledge_entry(ql_adapter_state_t *, 77 notify_acknowledge_entry_t *, ql_head_t *, uint32_t *, uint32_t *); 78 static void ql_accept_target_io_entry(ql_adapter_state_t *, 79 atio_entry_t *, ql_head_t *, uint32_t *, uint32_t *); 80 static void ql_continue_target_io_entry(ql_adapter_state_t *, 81 ctio_entry_t *, ql_head_t *, uint32_t *, uint32_t *); 82 static void ql_ip_entry(ql_adapter_state_t *, ip_entry_t *, ql_head_t *, 83 uint32_t *, uint32_t *); 84 static void ql_ip_rcv_entry(ql_adapter_state_t *, ip_rcv_entry_t *, 85 ql_head_t *, uint32_t *, uint32_t *); 86 static void ql_ip_rcv_cont_entry(ql_adapter_state_t *, 87 ip_rcv_cont_entry_t *, ql_head_t *, uint32_t *, uint32_t *); 88 static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ip_rcv_24xx_entry_t *, 89 ql_head_t *, uint32_t *, uint32_t *); 90 static void ql_ms_entry(ql_adapter_state_t *, ms_entry_t *, ql_head_t *, 91 uint32_t *, uint32_t *); 92 static void ql_report_id_entry(ql_adapter_state_t *, report_id_1_t *, 93 ql_head_t *, uint32_t *, uint32_t *); 94 95 /* TODO: temporary define until defined in LV */ 96 #ifndef FC_STATE_8GBIT_SPEED 97 #define FC_STATE_8GBIT_SPEED 0x0700 /* 8 Gbit/sec */ 98 #endif 99 100 /* 101 * ql_isr 102 * Process all INTX intr types. 103 * 104 * Input: 105 * arg1: adapter state pointer. 106 * 107 * Returns: 108 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED 109 * 110 * Context: 111 * Interrupt or Kernel context, no mailbox commands allowed. 112 */ 113 /* ARGSUSED */ 114 uint_t 115 ql_isr(caddr_t arg1) 116 { 117 return (ql_isr_aif(arg1, 0)); 118 } 119 120 /* 121 * ql_isr_default 122 * Process unknown/unvectored intr types 123 * 124 * Input: 125 * arg1: adapter state pointer. 126 * arg2: interrupt vector. 127 * 128 * Returns: 129 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED 130 * 131 * Context: 132 * Interrupt or Kernel context, no mailbox commands allowed. 133 */ 134 /* ARGSUSED */ 135 uint_t 136 ql_isr_default(caddr_t arg1, caddr_t arg2) 137 { 138 ql_adapter_state_t *ha = (void *)arg1; 139 140 EL(ha, "isr_default called: idx=%x\n", arg2); 141 return (ql_isr_aif(arg1, arg2)); 142 } 143 144 /* 145 * ql_isr_aif 146 * Process mailbox and I/O command completions. 147 * 148 * Input: 149 * arg: adapter state pointer. 150 * intvec: interrupt vector. 151 * 152 * Returns: 153 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED 154 * 155 * Context: 156 * Interrupt or Kernel context, no mailbox commands allowed. 157 */ 158 /* ARGSUSED */ 159 uint_t 160 ql_isr_aif(caddr_t arg, caddr_t intvec) 161 { 162 uint16_t mbx; 163 uint32_t stat; 164 ql_adapter_state_t *ha = (void *)arg; 165 uint32_t set_flags = 0; 166 uint32_t reset_flags = 0; 167 ql_head_t isr_done_q = {NULL, NULL}; 168 uint_t rval = DDI_INTR_UNCLAIMED; 169 int spurious_intr = 0; 170 boolean_t intr = B_FALSE, daemon = B_FALSE; 171 int intr_loop = 4; 172 173 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 174 175 QL_PM_LOCK(ha); 176 if (ha->power_level != PM_LEVEL_D0) { 177 /* 178 * Looks like we are about to go down soon, exit early. 179 */ 180 QL_PM_UNLOCK(ha); 181 QL_PRINT_3(CE_CONT, "(%d): power down exit\n", ha->instance); 182 return (DDI_INTR_UNCLAIMED); 183 } 184 ha->busy++; 185 QL_PM_UNLOCK(ha); 186 187 /* Acquire interrupt lock. */ 188 INTR_LOCK(ha); 189 190 if (CFG_IST(ha, CFG_CTRL_2200)) { 191 while (RD16_IO_REG(ha, istatus) & RISC_INT) { 192 /* Reset idle timer. */ 193 ha->idle_timer = 0; 194 rval = DDI_INTR_CLAIMED; 195 if (intr_loop) { 196 intr_loop--; 197 } 198 199 /* Special Fast Post 2200. */ 200 stat = 0; 201 if (ha->task_daemon_flags & FIRMWARE_LOADED && 202 ha->flags & ONLINE) { 203 ql_srb_t *sp; 204 205 mbx = RD16_IO_REG(ha, mailbox[23]); 206 207 if ((mbx & 3) == MBX23_SCSI_COMPLETION) { 208 /* Release mailbox registers. */ 209 WRT16_IO_REG(ha, semaphore, 0); 210 211 if (intr_loop) { 212 WRT16_IO_REG(ha, hccr, 213 HC_CLR_RISC_INT); 214 } 215 216 /* Get handle. */ 217 mbx >>= 4; 218 stat = mbx & OSC_INDEX_MASK; 219 220 /* Validate handle. */ 221 sp = stat < MAX_OUTSTANDING_COMMANDS ? 222 ha->outstanding_cmds[stat] : NULL; 223 224 if (sp != NULL && (sp->handle & 0xfff) 225 == mbx) { 226 ha->outstanding_cmds[stat] = 227 NULL; 228 sp->handle = 0; 229 sp->flags &= 230 ~SRB_IN_TOKEN_ARRAY; 231 232 /* Set completed status. */ 233 sp->flags |= SRB_ISP_COMPLETED; 234 235 /* Set completion status */ 236 sp->pkt->pkt_reason = 237 CS_COMPLETE; 238 239 ql_fast_fcp_post(sp); 240 } else if (mbx != 241 (QL_FCA_BRAND & 0xfff)) { 242 if (sp == NULL) { 243 EL(ha, "unknown IOCB" 244 " handle=%xh\n", 245 mbx); 246 } else { 247 EL(ha, "mismatch IOCB" 248 " handle pkt=%xh, " 249 "sp=%xh\n", mbx, 250 sp->handle & 0xfff); 251 } 252 253 (void) ql_binary_fw_dump(ha, 254 FALSE); 255 256 if (!(ha->task_daemon_flags & 257 (ISP_ABORT_NEEDED | 258 ABORT_ISP_ACTIVE))) { 259 EL(ha, "ISP Invalid " 260 "handle, " 261 "isp_abort_needed" 262 "\n"); 263 set_flags |= 264 ISP_ABORT_NEEDED; 265 } 266 } 267 } 268 } 269 270 if (stat == 0) { 271 /* Check for mailbox interrupt. */ 272 mbx = RD16_IO_REG(ha, semaphore); 273 if (mbx & BIT_0) { 274 /* Release mailbox registers. */ 275 WRT16_IO_REG(ha, semaphore, 0); 276 277 /* Get mailbox data. */ 278 mbx = RD16_IO_REG(ha, mailbox[0]); 279 if (mbx > 0x3fff && mbx < 0x8000) { 280 ql_mbx_completion(ha, mbx, 281 &set_flags, &reset_flags, 282 intr_loop); 283 } else if (mbx > 0x7fff && 284 mbx < 0xc000) { 285 ql_async_event(ha, mbx, 286 &isr_done_q, &set_flags, 287 &reset_flags, intr_loop); 288 } else { 289 EL(ha, "UNKNOWN interrupt " 290 "type\n"); 291 intr = B_TRUE; 292 } 293 } else { 294 ha->isp_rsp_index = RD16_IO_REG(ha, 295 resp_in); 296 297 if (ha->isp_rsp_index != 298 ha->rsp_ring_index) { 299 ql_response_pkt(ha, 300 &isr_done_q, &set_flags, 301 &reset_flags, intr_loop); 302 } else if (++spurious_intr == 303 MAX_SPURIOUS_INTR) { 304 /* 305 * Process excessive 306 * spurious intrrupts 307 */ 308 ql_spurious_intr(ha, 309 intr_loop); 310 EL(ha, "excessive spurious " 311 "interrupts, " 312 "isp_abort_needed\n"); 313 set_flags |= ISP_ABORT_NEEDED; 314 } else { 315 intr = B_TRUE; 316 } 317 } 318 } 319 320 /* Clear RISC interrupt */ 321 if (intr || intr_loop == 0) { 322 intr = B_FALSE; 323 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 324 } 325 326 if (set_flags != 0 || reset_flags != 0) { 327 TASK_DAEMON_LOCK(ha); 328 ha->task_daemon_flags |= set_flags; 329 ha->task_daemon_flags &= ~reset_flags; 330 TASK_DAEMON_UNLOCK(ha); 331 set_flags = 0; 332 reset_flags = 0; 333 daemon = B_TRUE; 334 } 335 } 336 } else { 337 while ((stat = RD32_IO_REG(ha, intr_info_lo)) & BIT_15) { 338 mbx = MSW(stat); 339 340 /* Reset idle timer. */ 341 ha->idle_timer = 0; 342 rval = DDI_INTR_CLAIMED; 343 if (intr_loop) { 344 intr_loop--; 345 } 346 347 switch (stat & 0x1ff) { 348 case ROM_MBX_SUCCESS: 349 case ROM_MBX_ERR: 350 ql_mbx_completion(ha, mbx, &set_flags, 351 &reset_flags, intr_loop); 352 353 /* Release mailbox registers. */ 354 if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) { 355 WRT16_IO_REG(ha, semaphore, 0); 356 } 357 break; 358 359 case MBX_SUCCESS: 360 case MBX_ERR: 361 /* Sun FW, Release mailbox registers. */ 362 if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) { 363 WRT16_IO_REG(ha, semaphore, 0); 364 } 365 ql_mbx_completion(ha, mbx, &set_flags, 366 &reset_flags, intr_loop); 367 break; 368 369 case ASYNC_EVENT: 370 /* Sun FW, Release mailbox registers. */ 371 if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) { 372 WRT16_IO_REG(ha, semaphore, 0); 373 } 374 ql_async_event(ha, (uint32_t)mbx, &isr_done_q, 375 &set_flags, &reset_flags, intr_loop); 376 break; 377 378 case RESP_UPDATE: 379 if (mbx != ha->rsp_ring_index) { 380 ha->isp_rsp_index = mbx; 381 ql_response_pkt(ha, &isr_done_q, 382 &set_flags, &reset_flags, 383 intr_loop); 384 } else if (++spurious_intr == 385 MAX_SPURIOUS_INTR) { 386 /* Process excessive spurious intr. */ 387 ql_spurious_intr(ha, intr_loop); 388 EL(ha, "excessive spurious " 389 "interrupts, isp_abort_needed\n"); 390 set_flags |= ISP_ABORT_NEEDED; 391 } else { 392 intr = B_TRUE; 393 } 394 break; 395 396 case SCSI_FAST_POST_16: 397 stat = (stat & 0xffff0000) | MBA_CMPLT_1_16BIT; 398 ql_async_event(ha, stat, &isr_done_q, 399 &set_flags, &reset_flags, intr_loop); 400 break; 401 402 case SCSI_FAST_POST_32: 403 stat = (stat & 0xffff0000) | MBA_CMPLT_1_32BIT; 404 ql_async_event(ha, stat, &isr_done_q, 405 &set_flags, &reset_flags, intr_loop); 406 break; 407 408 case CTIO_FAST_POST: 409 stat = (stat & 0xffff0000) | 410 MBA_CTIO_COMPLETION; 411 ql_async_event(ha, stat, &isr_done_q, 412 &set_flags, &reset_flags, intr_loop); 413 break; 414 415 case IP_FAST_POST_XMT: 416 stat = (stat & 0xffff0000) | MBA_IP_COMPLETION; 417 ql_async_event(ha, stat, &isr_done_q, 418 &set_flags, &reset_flags, intr_loop); 419 break; 420 421 case IP_FAST_POST_RCV: 422 stat = (stat & 0xffff0000) | MBA_IP_RECEIVE; 423 ql_async_event(ha, stat, &isr_done_q, 424 &set_flags, &reset_flags, intr_loop); 425 break; 426 427 case IP_FAST_POST_BRD: 428 stat = (stat & 0xffff0000) | MBA_IP_BROADCAST; 429 ql_async_event(ha, stat, &isr_done_q, 430 &set_flags, &reset_flags, intr_loop); 431 break; 432 433 case IP_FAST_POST_RCV_ALN: 434 stat = (stat & 0xffff0000) | 435 MBA_IP_HDR_DATA_SPLIT; 436 ql_async_event(ha, stat, &isr_done_q, 437 &set_flags, &reset_flags, intr_loop); 438 break; 439 440 case ATIO_UPDATE: 441 EL(ha, "unsupported ATIO queue update" 442 " interrupt, status=%xh\n", stat); 443 intr = B_TRUE; 444 break; 445 446 case ATIO_RESP_UPDATE: 447 EL(ha, "unsupported ATIO response queue " 448 "update interrupt, status=%xh\n", stat); 449 intr = B_TRUE; 450 break; 451 452 default: 453 mbx = RD16_IO_REG(ha, hccr); 454 if (stat & BIT_8 || 455 mbx & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) { 456 457 ADAPTER_STATE_LOCK(ha); 458 ha->flags |= PARITY_ERROR; 459 ADAPTER_STATE_UNLOCK(ha); 460 461 if (ha->parity_pause_errors == 0 || 462 ha->parity_hccr_err != mbx || 463 ha->parity_stat_err != stat) { 464 cmn_err(CE_WARN, "qlc(%d): " 465 "isr, Internal Parity/" 466 "Pause Error - hccr=%xh, " 467 "stat=%xh, count=%d", 468 ha->instance, mbx, stat, 469 ha->parity_pause_errors); 470 ha->parity_hccr_err = mbx; 471 ha->parity_stat_err = stat; 472 } 473 474 EL(ha, "parity/pause error, " 475 "isp_abort_needed\n"); 476 477 if (ql_binary_fw_dump(ha, FALSE) != 478 QL_SUCCESS) { 479 /* Reset ISP chip. */ 480 ql_reset_chip(ha); 481 } 482 483 if (ha->parity_pause_errors == 0) { 484 (void) ql_flash_errlog(ha, 485 FLASH_ERRLOG_PARITY_ERR, 0, 486 MSW(stat), LSW(stat)); 487 } 488 489 if (ha->parity_pause_errors < 490 0xffffffff) { 491 ha->parity_pause_errors++; 492 } 493 494 set_flags |= ISP_ABORT_NEEDED; 495 496 /* Disable ISP interrupts. */ 497 WRT16_IO_REG(ha, ictrl, 0); 498 ADAPTER_STATE_LOCK(ha); 499 ha->flags &= ~INTERRUPTS_ENABLED; 500 ADAPTER_STATE_UNLOCK(ha); 501 } else { 502 EL(ha, "UNKNOWN interrupt status=%xh," 503 " hccr=%xh\n", stat, mbx); 504 } 505 intr = B_TRUE; 506 break; 507 } 508 509 /* Clear RISC interrupt */ 510 if (intr || intr_loop == 0) { 511 intr = B_FALSE; 512 CFG_IST(ha, CFG_CTRL_2425) ? 513 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) : 514 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 515 } 516 517 if (set_flags != 0 || reset_flags != 0) { 518 TASK_DAEMON_LOCK(ha); 519 ha->task_daemon_flags |= set_flags; 520 ha->task_daemon_flags &= ~reset_flags; 521 TASK_DAEMON_UNLOCK(ha); 522 set_flags = 0; 523 reset_flags = 0; 524 daemon = B_TRUE; 525 } 526 527 if (ha->flags & PARITY_ERROR) { 528 EL(ha, "parity/pause exit\n"); 529 mbx = RD16_IO_REG(ha, hccr); /* PCI posting */ 530 break; 531 } 532 } 533 } 534 535 /* Process claimed interrupts during polls. */ 536 if (rval == DDI_INTR_UNCLAIMED && ha->intr_claimed == B_TRUE) { 537 ha->intr_claimed = B_FALSE; 538 rval = DDI_INTR_CLAIMED; 539 } 540 541 /* Release interrupt lock. */ 542 INTR_UNLOCK(ha); 543 544 if (daemon) { 545 ql_awaken_task_daemon(ha, NULL, 0, 0); 546 } 547 548 if (isr_done_q.first != NULL) { 549 ql_done(isr_done_q.first); 550 } 551 552 if (rval == DDI_INTR_CLAIMED) { 553 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 554 ha->xioctl->TotalInterrupts++; 555 } else { 556 /*EMPTY*/ 557 QL_PRINT_3(CE_CONT, "(%d): interrupt not claimed\n", 558 ha->instance); 559 } 560 561 QL_PM_LOCK(ha); 562 ha->busy--; 563 QL_PM_UNLOCK(ha); 564 565 return (rval); 566 } 567 568 /* 569 * ql_spurious_intr 570 * Inform Solaris of spurious interrupts. 571 * 572 * Input: 573 * ha: adapter state pointer. 574 * intr_clr: early interrupt clear 575 * 576 * Context: 577 * Interrupt or Kernel context, no mailbox commands allowed. 578 */ 579 static void 580 ql_spurious_intr(ql_adapter_state_t *ha, int intr_clr) 581 { 582 ddi_devstate_t state; 583 584 EL(ha, "Spurious interrupt\n"); 585 586 /* Disable ISP interrupts. */ 587 WRT16_IO_REG(ha, ictrl, 0); 588 ADAPTER_STATE_LOCK(ha); 589 ha->flags &= ~INTERRUPTS_ENABLED; 590 ADAPTER_STATE_UNLOCK(ha); 591 592 /* Clear RISC interrupt */ 593 if (intr_clr) { 594 CFG_IST(ha, CFG_CTRL_2425) ? 595 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) : 596 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 597 } 598 599 state = ddi_get_devstate(ha->dip); 600 if (state == DDI_DEVSTATE_UP) { 601 /*EMPTY*/ 602 ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED, 603 DDI_DEVICE_FAULT, "spurious interrupts"); 604 } 605 } 606 607 /* 608 * ql_mbx_completion 609 * Processes mailbox completions. 610 * 611 * Input: 612 * ha: adapter state pointer. 613 * mb0: Mailbox 0 contents. 614 * set_flags: task daemon flags to set. 615 * reset_flags: task daemon flags to reset. 616 * intr_clr: early interrupt clear 617 * 618 * Context: 619 * Interrupt context. 620 */ 621 /* ARGSUSED */ 622 static void 623 ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint32_t *set_flags, 624 uint32_t *reset_flags, int intr_clr) 625 { 626 uint32_t index; 627 uint16_t cnt; 628 629 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 630 631 /* Load return mailbox registers. */ 632 MBX_REGISTER_LOCK(ha); 633 634 if (ha->mcp != NULL) { 635 ha->mcp->mb[0] = mb0; 636 index = ha->mcp->in_mb & ~MBX_0; 637 638 for (cnt = 1; cnt < MAX_MBOX_COUNT && index != 0; cnt++) { 639 index >>= 1; 640 if (index & MBX_0) { 641 ha->mcp->mb[cnt] = RD16_IO_REG(ha, 642 mailbox[cnt]); 643 } 644 } 645 646 } else { 647 EL(ha, "mcp == NULL\n"); 648 } 649 650 if (intr_clr) { 651 /* Clear RISC interrupt. */ 652 CFG_IST(ha, CFG_CTRL_2425) ? 653 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) : 654 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 655 } 656 657 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_INTERRUPT); 658 if (ha->flags & INTERRUPTS_ENABLED) { 659 cv_broadcast(&ha->cv_mbx_intr); 660 } 661 662 MBX_REGISTER_UNLOCK(ha); 663 664 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 665 } 666 667 /* 668 * ql_async_event 669 * Processes asynchronous events. 670 * 671 * Input: 672 * ha: adapter state pointer. 673 * mbx: Mailbox 0 register. 674 * done_q: head pointer to done queue. 675 * set_flags: task daemon flags to set. 676 * reset_flags: task daemon flags to reset. 677 * intr_clr: early interrupt clear 678 * 679 * Context: 680 * Interrupt or Kernel context, no mailbox commands allowed. 681 */ 682 static void 683 ql_async_event(ql_adapter_state_t *ha, uint32_t mbx, ql_head_t *done_q, 684 uint32_t *set_flags, uint32_t *reset_flags, int intr_clr) 685 { 686 uint32_t handle; 687 uint32_t index; 688 uint16_t cnt; 689 uint16_t mb[MAX_MBOX_COUNT]; 690 ql_srb_t *sp; 691 port_id_t s_id; 692 ql_tgt_t *tq; 693 boolean_t intr = B_TRUE; 694 ql_adapter_state_t *vha; 695 696 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 697 698 /* Setup to process fast completion. */ 699 mb[0] = LSW(mbx); 700 switch (mb[0]) { 701 case MBA_SCSI_COMPLETION: 702 handle = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox[1]), 703 RD16_IO_REG(ha, mailbox[2])); 704 break; 705 706 case MBA_CMPLT_1_16BIT: 707 handle = MSW(mbx); 708 mb[0] = MBA_SCSI_COMPLETION; 709 break; 710 711 case MBA_CMPLT_1_32BIT: 712 handle = SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox[2])); 713 mb[0] = MBA_SCSI_COMPLETION; 714 break; 715 716 case MBA_CTIO_COMPLETION: 717 case MBA_IP_COMPLETION: 718 handle = CFG_IST(ha, CFG_CTRL_2200) ? SHORT_TO_LONG( 719 RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2])) : 720 SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox[2])); 721 mb[0] = MBA_SCSI_COMPLETION; 722 break; 723 724 default: 725 break; 726 } 727 728 /* Handle asynchronous event */ 729 switch (mb[0]) { 730 case MBA_SCSI_COMPLETION: 731 QL_PRINT_5(CE_CONT, "(%d): Fast post completion\n", 732 ha->instance); 733 734 if (intr_clr) { 735 /* Clear RISC interrupt */ 736 CFG_IST(ha, CFG_CTRL_2425) ? 737 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) : 738 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 739 intr = B_FALSE; 740 } 741 742 if ((ha->flags & ONLINE) == 0) { 743 break; 744 } 745 746 /* Get handle. */ 747 index = handle & OSC_INDEX_MASK; 748 749 /* Validate handle. */ 750 sp = index < MAX_OUTSTANDING_COMMANDS ? 751 ha->outstanding_cmds[index] : NULL; 752 753 if (sp != NULL && sp->handle == handle) { 754 ha->outstanding_cmds[index] = NULL; 755 sp->handle = 0; 756 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 757 758 /* Set completed status. */ 759 sp->flags |= SRB_ISP_COMPLETED; 760 761 /* Set completion status */ 762 sp->pkt->pkt_reason = CS_COMPLETE; 763 764 if (!(sp->flags & SRB_FCP_CMD_PKT)) { 765 /* Place block on done queue */ 766 ql_add_link_b(done_q, &sp->cmd); 767 } else { 768 ql_fast_fcp_post(sp); 769 } 770 } else if (handle != QL_FCA_BRAND) { 771 if (sp == NULL) { 772 EL(ha, "%xh unknown IOCB handle=%xh\n", 773 mb[0], handle); 774 } else { 775 EL(ha, "%xh mismatch IOCB handle pkt=%xh, " 776 "sp=%xh\n", mb[0], handle, sp->handle); 777 } 778 779 EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, mbx3=%xh," 780 "mbx6=%xh, mbx7=%xh\n", mb[0], 781 RD16_IO_REG(ha, mailbox[1]), 782 RD16_IO_REG(ha, mailbox[2]), 783 RD16_IO_REG(ha, mailbox[3]), 784 RD16_IO_REG(ha, mailbox[6]), 785 RD16_IO_REG(ha, mailbox[7])); 786 787 (void) ql_binary_fw_dump(ha, FALSE); 788 789 if (!(ha->task_daemon_flags & 790 (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) { 791 EL(ha, "%xh ISP Invalid handle, " 792 "isp_abort_needed\n", mb[0]); 793 *set_flags |= ISP_ABORT_NEEDED; 794 } 795 } 796 break; 797 798 case MBA_RESET: /* Reset */ 799 EL(ha, "%xh Reset received\n", mb[0]); 800 *set_flags |= RESET_MARKER_NEEDED; 801 break; 802 803 case MBA_SYSTEM_ERR: /* System Error */ 804 mb[1] = RD16_IO_REG(ha, mailbox[1]); 805 mb[2] = RD16_IO_REG(ha, mailbox[2]); 806 mb[3] = RD16_IO_REG(ha, mailbox[3]); 807 808 EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx1=%xh, " 809 "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh,\n " 810 "mbx7=%xh, mbx8=%xh, mbx9=%xh, mbx10=%xh, mbx11=%xh, " 811 "mbx12=%xh,\n", mb[0], mb[1], mb[2], mb[3], 812 RD16_IO_REG(ha, mailbox[4]), RD16_IO_REG(ha, mailbox[5]), 813 RD16_IO_REG(ha, mailbox[6]), RD16_IO_REG(ha, mailbox[7]), 814 RD16_IO_REG(ha, mailbox[8]), RD16_IO_REG(ha, mailbox[9]), 815 RD16_IO_REG(ha, mailbox[10]), RD16_IO_REG(ha, mailbox[11]), 816 RD16_IO_REG(ha, mailbox[12])); 817 818 EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx13=%xh, " 819 "mbx14=%xh, mbx15=%xh, mbx16=%xh, mbx17=%xh, mbx18=%xh,\n" 820 "mbx19=%xh, mbx20=%xh, mbx21=%xh, mbx22=%xh, mbx23=%xh\n", 821 mb[0], RD16_IO_REG(ha, mailbox[13]), 822 RD16_IO_REG(ha, mailbox[14]), RD16_IO_REG(ha, mailbox[15]), 823 RD16_IO_REG(ha, mailbox[16]), RD16_IO_REG(ha, mailbox[17]), 824 RD16_IO_REG(ha, mailbox[18]), RD16_IO_REG(ha, mailbox[19]), 825 RD16_IO_REG(ha, mailbox[20]), RD16_IO_REG(ha, mailbox[21]), 826 RD16_IO_REG(ha, mailbox[22]), 827 RD16_IO_REG(ha, mailbox[23])); 828 829 if (ha->reg_off->mbox_cnt > 24) { 830 EL(ha, "%xh ISP System Error, mbx24=%xh, mbx25=%xh, " 831 "mbx26=%xh,\n mbx27=%xh, mbx28=%xh, mbx29=%xh, " 832 "mbx30=%xh, mbx31=%xh\n", mb[0], 833 RD16_IO_REG(ha, mailbox[24]), 834 RD16_IO_REG(ha, mailbox[25]), 835 RD16_IO_REG(ha, mailbox[26]), 836 RD16_IO_REG(ha, mailbox[27]), 837 RD16_IO_REG(ha, mailbox[28]), 838 RD16_IO_REG(ha, mailbox[29]), 839 RD16_IO_REG(ha, mailbox[30]), 840 RD16_IO_REG(ha, mailbox[31])); 841 } 842 843 (void) ql_binary_fw_dump(ha, FALSE); 844 845 (void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8002, mb[1], 846 mb[2], mb[3]); 847 848 *set_flags |= ISP_ABORT_NEEDED; 849 ha->xioctl->ControllerErrorCount++; 850 break; 851 852 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 853 EL(ha, "%xh Request Transfer Error received, " 854 "isp_abort_needed\n", mb[0]); 855 856 (void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8003, 857 RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]), 858 RD16_IO_REG(ha, mailbox[3])); 859 860 *set_flags |= ISP_ABORT_NEEDED; 861 ha->xioctl->ControllerErrorCount++; 862 break; 863 864 case MBA_RSP_TRANSFER_ERR: /* Response Xfer Err */ 865 EL(ha, "%xh Response Transfer Error received," 866 " isp_abort_needed\n", mb[0]); 867 868 (void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8004, 869 RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]), 870 RD16_IO_REG(ha, mailbox[3])); 871 872 *set_flags |= ISP_ABORT_NEEDED; 873 ha->xioctl->ControllerErrorCount++; 874 break; 875 876 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 877 EL(ha, "%xh Request Queue Wake-up received\n", 878 mb[0]); 879 break; 880 881 case MBA_MENLO_ALERT: /* Menlo Alert Notification */ 882 mb[1] = RD16_IO_REG(ha, mailbox[1]); 883 mb[2] = RD16_IO_REG(ha, mailbox[2]); 884 mb[3] = RD16_IO_REG(ha, mailbox[3]); 885 886 EL(ha, "%xh Menlo Alert Notification received, mbx1=%xh," 887 " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]); 888 889 switch (mb[1]) { 890 case MLA_LOGIN_OPERATIONAL_FW: 891 ADAPTER_STATE_LOCK(ha); 892 ha->flags |= MENLO_LOGIN_OPERATIONAL; 893 ADAPTER_STATE_UNLOCK(ha); 894 break; 895 case MLA_PANIC_RECOVERY: 896 case MLA_LOGIN_DIAGNOSTIC_FW: 897 case MLA_LOGIN_GOLDEN_FW: 898 case MLA_REJECT_RESPONSE: 899 default: 900 break; 901 } 902 break; 903 904 case MBA_LIP_F8: /* Received a LIP F8. */ 905 case MBA_LIP_RESET: /* LIP reset occurred. */ 906 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 907 EL(ha, "%xh LIP received\n", mb[0]); 908 909 ADAPTER_STATE_LOCK(ha); 910 ha->flags &= ~POINT_TO_POINT; 911 ADAPTER_STATE_UNLOCK(ha); 912 913 if (!(ha->task_daemon_flags & LOOP_DOWN)) { 914 *set_flags |= LOOP_DOWN; 915 } 916 ql_port_state(ha, FC_STATE_OFFLINE, 917 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN); 918 919 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) { 920 ha->loop_down_timer = LOOP_DOWN_TIMER_START; 921 } 922 923 ha->adapter_stats->lip_count++; 924 925 /* Update AEN queue. */ 926 ha->xioctl->TotalLipResets++; 927 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) { 928 ql_enqueue_aen(ha, mb[0], NULL); 929 } 930 break; 931 932 case MBA_LOOP_UP: 933 if (CFG_IST(ha, (CFG_CTRL_2300|CFG_CTRL_6322|CFG_CTRL_2425))) { 934 mb[1] = RD16_IO_REG(ha, mailbox[1]); 935 if (mb[1] == 0) { /* 1GB */ 936 ha->state = FC_PORT_STATE_MASK( 937 ha->state) | FC_STATE_1GBIT_SPEED; 938 index = 1; 939 } else if (mb[1] == 1) { /* 2GB */ 940 ha->state = FC_PORT_STATE_MASK( 941 ha->state) | FC_STATE_2GBIT_SPEED; 942 index = 2; 943 } else if (mb[1] == 3) { /* 4GB */ 944 ha->state = FC_PORT_STATE_MASK( 945 ha->state) | FC_STATE_4GBIT_SPEED; 946 index = 4; 947 } else if (mb[1] == 4) { /* 8GB */ 948 ha->state = FC_PORT_STATE_MASK( 949 ha->state) | FC_STATE_8GBIT_SPEED; 950 index = 8; 951 } else { 952 ha->state = FC_PORT_STATE_MASK( 953 ha->state); 954 index = 0; 955 } 956 } else { 957 ha->state = FC_PORT_STATE_MASK(ha->state) | 958 FC_STATE_FULL_SPEED; 959 index = 1; 960 } 961 962 for (vha = ha; vha != NULL; vha = vha->vp_next) { 963 vha->state = FC_PORT_STATE_MASK(vha->state) | 964 FC_PORT_SPEED_MASK(ha->state); 965 } 966 EL(ha, "%d GB %xh Loop Up received\n", index, mb[0]); 967 968 /* Update AEN queue. */ 969 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) { 970 ql_enqueue_aen(ha, mb[0], NULL); 971 } 972 break; 973 974 case MBA_LOOP_DOWN: 975 EL(ha, "%xh Loop Down received, mbx1=%xh, mbx2=%xh, " 976 "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]), 977 RD16_IO_REG(ha, mailbox[2]), RD16_IO_REG(ha, mailbox[3])); 978 979 if (!(ha->task_daemon_flags & LOOP_DOWN)) { 980 *set_flags |= LOOP_DOWN; 981 } 982 ql_port_state(ha, FC_STATE_OFFLINE, 983 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN); 984 985 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) { 986 ha->loop_down_timer = LOOP_DOWN_TIMER_START; 987 } 988 989 if (CFG_IST(ha, CFG_CTRL_25XX)) { 990 ha->sfp_stat = RD16_IO_REG(ha, mailbox[2]); 991 } 992 993 /* Update AEN queue. */ 994 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) { 995 ql_enqueue_aen(ha, mb[0], NULL); 996 } 997 break; 998 999 case MBA_PORT_UPDATE: 1000 mb[1] = RD16_IO_REG(ha, mailbox[1]); 1001 mb[2] = RD16_IO_REG(ha, mailbox[2]); 1002 mb[3] = (uint16_t)(ha->flags & VP_ENABLED ? 1003 RD16_IO_REG(ha, mailbox[3]) : 0); 1004 1005 /* Locate port state structure. */ 1006 for (vha = ha; vha != NULL; vha = vha->vp_next) { 1007 if (vha->vp_index == LSB(mb[3])) { 1008 break; 1009 } 1010 } 1011 1012 if (vha == NULL) { 1013 break; 1014 } 1015 1016 if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_OFFLINE || 1017 (CFG_IST(ha, CFG_CTRL_2425) && (mb[1] != 0xffff || 1018 mb[2] != 6 || mb[3] != 0))) { 1019 EL(ha, "%xh Port Database Update, Login/Logout " 1020 "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n", 1021 mb[0], mb[1], mb[2], mb[3]); 1022 } else { 1023 EL(ha, "%xh Port Database Update received, mbx1=%xh," 1024 " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], 1025 mb[3]); 1026 *set_flags |= LOOP_RESYNC_NEEDED; 1027 *set_flags &= ~LOOP_DOWN; 1028 *reset_flags |= LOOP_DOWN; 1029 *reset_flags &= ~LOOP_RESYNC_NEEDED; 1030 vha->loop_down_timer = LOOP_DOWN_TIMER_OFF; 1031 TASK_DAEMON_LOCK(ha); 1032 vha->task_daemon_flags |= LOOP_RESYNC_NEEDED; 1033 vha->task_daemon_flags &= ~LOOP_DOWN; 1034 TASK_DAEMON_UNLOCK(ha); 1035 ADAPTER_STATE_LOCK(ha); 1036 vha->flags &= ~COMMAND_ABORT_TIMEOUT; 1037 ADAPTER_STATE_UNLOCK(ha); 1038 } 1039 1040 /* Update AEN queue. */ 1041 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) { 1042 ql_enqueue_aen(ha, mb[0], NULL); 1043 } 1044 break; 1045 1046 case MBA_RSCN_UPDATE: 1047 mb[1] = RD16_IO_REG(ha, mailbox[1]); 1048 mb[2] = RD16_IO_REG(ha, mailbox[2]); 1049 mb[3] = (uint16_t)(ha->flags & VP_ENABLED ? 1050 RD16_IO_REG(ha, mailbox[3]) : 0); 1051 1052 /* Locate port state structure. */ 1053 for (vha = ha; vha != NULL; vha = vha->vp_next) { 1054 if (vha->vp_index == LSB(mb[3])) { 1055 break; 1056 } 1057 } 1058 1059 if (vha == NULL) { 1060 break; 1061 } 1062 1063 if (LSB(mb[1]) == vha->d_id.b.domain && 1064 MSB(mb[2]) == vha->d_id.b.area && 1065 LSB(mb[2]) == vha->d_id.b.al_pa) { 1066 EL(ha, "%xh RSCN match adapter, mbx1=%xh, mbx2=%xh, " 1067 "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]); 1068 } else { 1069 EL(ha, "%xh RSCN received, mbx1=%xh, mbx2=%xh, " 1070 "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]); 1071 if (FC_PORT_STATE_MASK(vha->state) != 1072 FC_STATE_OFFLINE) { 1073 ql_rcv_rscn_els(vha, &mb[0], done_q); 1074 TASK_DAEMON_LOCK(ha); 1075 vha->task_daemon_flags |= RSCN_UPDATE_NEEDED; 1076 TASK_DAEMON_UNLOCK(ha); 1077 *set_flags |= RSCN_UPDATE_NEEDED; 1078 } 1079 } 1080 1081 /* Update AEN queue. */ 1082 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) { 1083 ql_enqueue_aen(ha, mb[0], NULL); 1084 } 1085 break; 1086 1087 case MBA_LIP_ERROR: /* Loop initialization errors. */ 1088 EL(ha, "%xh LIP error received, mbx1=%xh\n", mb[0], 1089 RD16_IO_REG(ha, mailbox[1])); 1090 break; 1091 1092 case MBA_IP_RECEIVE: 1093 case MBA_IP_BROADCAST: 1094 mb[1] = RD16_IO_REG(ha, mailbox[1]); 1095 mb[2] = RD16_IO_REG(ha, mailbox[2]); 1096 mb[3] = RD16_IO_REG(ha, mailbox[3]); 1097 1098 EL(ha, "%xh IP packet/broadcast received, mbx1=%xh, " 1099 "mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]); 1100 1101 /* Locate device queue. */ 1102 s_id.b.al_pa = LSB(mb[2]); 1103 s_id.b.area = MSB(mb[2]); 1104 s_id.b.domain = LSB(mb[1]); 1105 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) { 1106 EL(ha, "Unknown IP device=%xh\n", s_id.b24); 1107 break; 1108 } 1109 1110 cnt = (uint16_t)(CFG_IST(ha, CFG_CTRL_2425) ? 1111 CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb24.buf_size[0], 1112 ha->ip_init_ctrl_blk.cb24.buf_size[1]) : 1113 CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb.buf_size[0], 1114 ha->ip_init_ctrl_blk.cb.buf_size[1])); 1115 1116 tq->ub_sequence_length = mb[3]; 1117 tq->ub_total_seg_cnt = (uint8_t)(mb[3] / cnt); 1118 if (mb[3] % cnt) { 1119 tq->ub_total_seg_cnt++; 1120 } 1121 cnt = (uint16_t)(tq->ub_total_seg_cnt + 10); 1122 1123 for (index = 10; index < ha->reg_off->mbox_cnt && index < cnt; 1124 index++) { 1125 mb[index] = RD16_IO_REG(ha, mailbox[index]); 1126 } 1127 1128 tq->ub_seq_id = ++ha->ub_seq_id; 1129 tq->ub_seq_cnt = 0; 1130 tq->ub_frame_ro = 0; 1131 tq->ub_loop_id = (uint16_t)(mb[0] == MBA_IP_BROADCAST ? 1132 (CFG_IST(ha, CFG_CTRL_2425) ? BROADCAST_24XX_HDL : 1133 IP_BROADCAST_LOOP_ID) : tq->loop_id); 1134 ha->rcv_dev_q = tq; 1135 1136 for (cnt = 10; cnt < ha->reg_off->mbox_cnt && 1137 tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) { 1138 if (ql_ub_frame_hdr(ha, tq, mb[cnt], done_q) != 1139 QL_SUCCESS) { 1140 EL(ha, "ql_ub_frame_hdr failed, " 1141 "isp_abort_needed\n"); 1142 *set_flags |= ISP_ABORT_NEEDED; 1143 break; 1144 } 1145 } 1146 break; 1147 1148 case MBA_IP_LOW_WATER_MARK: 1149 case MBA_IP_RCV_BUFFER_EMPTY: 1150 EL(ha, "%xh IP low water mark / RCV buffer empty received\n", 1151 mb[0]); 1152 *set_flags |= NEED_UNSOLICITED_BUFFERS; 1153 break; 1154 1155 case MBA_IP_HDR_DATA_SPLIT: 1156 EL(ha, "%xh IP HDR data split received\n", mb[0]); 1157 break; 1158 1159 case MBA_POINT_TO_POINT: 1160 EL(ha, "%xh Point to Point Mode received\n", mb[0]); 1161 ADAPTER_STATE_LOCK(ha); 1162 ha->flags |= POINT_TO_POINT; 1163 ADAPTER_STATE_UNLOCK(ha); 1164 1165 if (!(ha->task_daemon_flags & LOOP_DOWN)) { 1166 *set_flags |= LOOP_DOWN; 1167 } 1168 ql_port_state(ha, FC_STATE_OFFLINE, 1169 FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN); 1170 1171 if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) { 1172 ha->loop_down_timer = LOOP_DOWN_TIMER_START; 1173 } 1174 break; 1175 1176 case MBA_CHG_IN_CONNECTION: 1177 mb[1] = RD16_IO_REG(ha, mailbox[1]); 1178 if (mb[1] == 2) { 1179 EL(ha, "%xh Change In Connection received, " 1180 "mbx1=%xh\n", mb[0], mb[1]); 1181 ADAPTER_STATE_LOCK(ha); 1182 ha->flags &= ~POINT_TO_POINT; 1183 ADAPTER_STATE_UNLOCK(ha); 1184 if (ha->topology & QL_N_PORT) { 1185 ha->topology = (uint8_t)(ha->topology & 1186 ~QL_N_PORT); 1187 ha->topology = (uint8_t)(ha->topology | 1188 QL_NL_PORT); 1189 } 1190 } else { 1191 EL(ha, "%xh Change In Connection received, " 1192 "mbx1=%xh, isp_abort_needed\n", mb[0], mb[1]); 1193 *set_flags |= ISP_ABORT_NEEDED; 1194 } 1195 break; 1196 1197 case MBA_ZIO_UPDATE: 1198 EL(ha, "%xh ZIO response received\n", mb[0]); 1199 1200 ha->isp_rsp_index = RD16_IO_REG(ha, resp_in); 1201 ql_response_pkt(ha, done_q, set_flags, reset_flags, intr_clr); 1202 intr = B_FALSE; 1203 break; 1204 1205 case MBA_PORT_BYPASS_CHANGED: 1206 EL(ha, "%xh Port Bypass Changed received, mbx1=%xh\n", 1207 mb[0], RD16_IO_REG(ha, mailbox[1])); 1208 /* 1209 * Event generated when there is a transition on 1210 * port bypass of crystal+. 1211 * Mailbox 1: Bit 0 - External. 1212 * Bit 2 - Internal. 1213 * When the bit is 0, the port is bypassed. 1214 * 1215 * For now we will generate a LIP for all cases. 1216 */ 1217 *set_flags |= HANDLE_PORT_BYPASS_CHANGE; 1218 break; 1219 1220 case MBA_RECEIVE_ERROR: 1221 EL(ha, "%xh Receive Error received, mbx1=%xh, mbx2=%xh\n", 1222 mb[0], RD16_IO_REG(ha, mailbox[1]), 1223 RD16_IO_REG(ha, mailbox[2])); 1224 break; 1225 1226 case MBA_LS_RJT_SENT: 1227 EL(ha, "%xh LS_RJT Response Sent ELS=%xh\n", mb[0], 1228 RD16_IO_REG(ha, mailbox[1])); 1229 break; 1230 1231 case MBA_FW_RESTART_COMP: 1232 EL(ha, "%xh firmware restart complete received mb1=%xh\n", 1233 mb[0], RD16_IO_REG(ha, mailbox[1])); 1234 break; 1235 1236 default: 1237 EL(ha, "%xh UNKNOWN event received, mbx1=%xh, mbx2=%xh, " 1238 "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]), 1239 RD16_IO_REG(ha, mailbox[2]), RD16_IO_REG(ha, mailbox[3])); 1240 break; 1241 } 1242 1243 /* Clear RISC interrupt */ 1244 if (intr && intr_clr) { 1245 CFG_IST(ha, CFG_CTRL_2425) ? 1246 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) : 1247 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 1248 } 1249 1250 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1251 } 1252 1253 /* 1254 * ql_fast_fcp_post 1255 * Fast path for good SCSI I/O completion. 1256 * 1257 * Input: 1258 * sp: SRB pointer. 1259 * 1260 * Context: 1261 * Interrupt or Kernel context, no mailbox commands allowed. 1262 */ 1263 static void 1264 ql_fast_fcp_post(ql_srb_t *sp) 1265 { 1266 ql_adapter_state_t *ha = sp->ha; 1267 ql_lun_t *lq = sp->lun_queue; 1268 ql_tgt_t *tq = lq->target_queue; 1269 1270 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1271 1272 ASSERT(sp->flags & SRB_FCP_CMD_PKT && ha && 1273 sp->pkt->pkt_reason == CS_COMPLETE); 1274 1275 /* Acquire device queue lock. */ 1276 DEVICE_QUEUE_LOCK(tq); 1277 1278 /* Decrement outstanding commands on device. */ 1279 if (tq->outcnt != 0) { 1280 tq->outcnt--; 1281 } 1282 1283 if (sp->flags & SRB_FCP_CMD_PKT) { 1284 if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_UNTAGGED) { 1285 /* 1286 * Clear the flag for this LUN so that 1287 * untagged commands can be submitted 1288 * for it. 1289 */ 1290 lq->flags &= ~LQF_UNTAGGED_PENDING; 1291 } 1292 1293 if (lq->lun_outcnt != 0) { 1294 lq->lun_outcnt--; 1295 } 1296 } 1297 1298 /* Reset port down retry count on good completion. */ 1299 tq->port_down_retry_count = ha->port_down_retry_count; 1300 tq->qfull_retry_count = ha->qfull_retry_count; 1301 1302 /* Remove command from watchdog queue. */ 1303 if (sp->flags & SRB_WATCHDOG_ENABLED) { 1304 ql_remove_link(&tq->wdg, &sp->wdg); 1305 sp->flags &= ~SRB_WATCHDOG_ENABLED; 1306 } 1307 1308 if (lq->cmd.first != NULL) { 1309 ql_next(ha, lq); 1310 } else { 1311 /* Release LU queue specific lock. */ 1312 DEVICE_QUEUE_UNLOCK(tq); 1313 if (ha->pha->pending_cmds.first != NULL) { 1314 ql_start_iocb(ha, NULL); 1315 } 1316 } 1317 1318 /* Sync buffers if required. */ 1319 if (sp->flags & SRB_MS_PKT) { 1320 (void) ddi_dma_sync(sp->pkt->pkt_resp_dma, 0, 0, 1321 DDI_DMA_SYNC_FORCPU); 1322 } 1323 1324 /* Map ISP completion codes. */ 1325 sp->pkt->pkt_expln = FC_EXPLN_NONE; 1326 sp->pkt->pkt_action = FC_ACTION_RETRYABLE; 1327 sp->pkt->pkt_state = FC_PKT_SUCCESS; 1328 1329 /* Now call the pkt completion callback */ 1330 if (sp->flags & SRB_POLL) { 1331 sp->flags &= ~SRB_POLL; 1332 } else if (sp->pkt->pkt_comp) { 1333 INTR_UNLOCK(ha); 1334 (*sp->pkt->pkt_comp)(sp->pkt); 1335 INTR_LOCK(ha); 1336 } 1337 1338 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1339 } 1340 1341 /* 1342 * ql_response_pkt 1343 * Processes response entry. 1344 * 1345 * Input: 1346 * ha: adapter state pointer. 1347 * done_q: head pointer to done queue. 1348 * set_flags: task daemon flags to set. 1349 * reset_flags: task daemon flags to reset. 1350 * intr_clr: early interrupt clear 1351 * 1352 * Context: 1353 * Interrupt or Kernel context, no mailbox commands allowed. 1354 */ 1355 static void 1356 ql_response_pkt(ql_adapter_state_t *ha, ql_head_t *done_q, uint32_t *set_flags, 1357 uint32_t *reset_flags, int intr_clr) 1358 { 1359 response_t *pkt; 1360 uint32_t dma_sync_size_1 = 0; 1361 uint32_t dma_sync_size_2 = 0; 1362 int status = 0; 1363 1364 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1365 1366 /* Clear RISC interrupt */ 1367 if (intr_clr) { 1368 CFG_IST(ha, CFG_CTRL_2425) ? 1369 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) : 1370 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 1371 } 1372 1373 if (ha->isp_rsp_index >= RESPONSE_ENTRY_CNT) { 1374 EL(ha, "index error = %xh, isp_abort_needed", 1375 ha->isp_rsp_index); 1376 *set_flags |= ISP_ABORT_NEEDED; 1377 return; 1378 } 1379 1380 if ((ha->flags & ONLINE) == 0) { 1381 QL_PRINT_3(CE_CONT, "(%d): not onlne, done\n", ha->instance); 1382 return; 1383 } 1384 1385 /* Calculate size of response queue entries to sync. */ 1386 if (ha->isp_rsp_index > ha->rsp_ring_index) { 1387 dma_sync_size_1 = (uint32_t) 1388 ((uint32_t)(ha->isp_rsp_index - ha->rsp_ring_index) * 1389 RESPONSE_ENTRY_SIZE); 1390 } else if (ha->isp_rsp_index == 0) { 1391 dma_sync_size_1 = (uint32_t) 1392 ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) * 1393 RESPONSE_ENTRY_SIZE); 1394 } else { 1395 /* Responses wrap around the Q */ 1396 dma_sync_size_1 = (uint32_t) 1397 ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) * 1398 RESPONSE_ENTRY_SIZE); 1399 dma_sync_size_2 = (uint32_t) 1400 (ha->isp_rsp_index * RESPONSE_ENTRY_SIZE); 1401 } 1402 1403 /* Sync DMA buffer. */ 1404 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 1405 (off_t)(ha->rsp_ring_index * RESPONSE_ENTRY_SIZE + 1406 RESPONSE_Q_BUFFER_OFFSET), dma_sync_size_1, 1407 DDI_DMA_SYNC_FORKERNEL); 1408 if (dma_sync_size_2) { 1409 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 1410 RESPONSE_Q_BUFFER_OFFSET, dma_sync_size_2, 1411 DDI_DMA_SYNC_FORKERNEL); 1412 } 1413 1414 while (ha->rsp_ring_index != ha->isp_rsp_index) { 1415 pkt = ha->response_ring_ptr; 1416 1417 QL_PRINT_5(CE_CONT, "(%d): ha->rsp_rg_idx=%xh, mbx[5]=%xh\n", 1418 ha->instance, ha->rsp_ring_index, ha->isp_rsp_index); 1419 QL_DUMP_5((uint8_t *)ha->response_ring_ptr, 8, 1420 RESPONSE_ENTRY_SIZE); 1421 1422 /* Adjust ring index. */ 1423 ha->rsp_ring_index++; 1424 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) { 1425 ha->rsp_ring_index = 0; 1426 ha->response_ring_ptr = ha->response_ring_bp; 1427 } else { 1428 ha->response_ring_ptr++; 1429 } 1430 1431 /* Process packet. */ 1432 if (ha->status_srb != NULL && pkt->entry_type != 1433 STATUS_CONT_TYPE) { 1434 ql_add_link_b(done_q, &ha->status_srb->cmd); 1435 ha->status_srb = NULL; 1436 } 1437 1438 pkt->entry_status = (uint8_t)(CFG_IST(ha, CFG_CTRL_2425) ? 1439 pkt->entry_status & 0x3c : pkt->entry_status & 0x7e); 1440 1441 if (pkt->entry_status != 0) { 1442 ql_error_entry(ha, pkt, done_q, set_flags, 1443 reset_flags); 1444 } else { 1445 switch (pkt->entry_type) { 1446 case STATUS_TYPE: 1447 status |= CFG_IST(ha, CFG_CTRL_2425) ? 1448 ql_24xx_status_entry(ha, 1449 (sts_24xx_entry_t *)pkt, done_q, set_flags, 1450 reset_flags) : 1451 ql_status_entry(ha, (sts_entry_t *)pkt, 1452 done_q, set_flags, reset_flags); 1453 break; 1454 1455 case STATUS_CONT_TYPE: 1456 ql_status_cont_entry(ha, 1457 (sts_cont_entry_t *)pkt, done_q, set_flags, 1458 reset_flags); 1459 break; 1460 1461 case IMMEDIATE_NOTIFY_TYPE: 1462 if (CFG_IST(ha, CFG_ENABLE_TARGET_MODE)) { 1463 ql_immediate_notify_entry(ha, 1464 (immediate_notify_entry_t *)pkt, 1465 done_q, set_flags, reset_flags); 1466 } 1467 break; 1468 1469 case NOTIFY_ACKNOWLEDGE_TYPE: 1470 if (CFG_IST(ha, CFG_ENABLE_TARGET_MODE)) { 1471 ql_notify_acknowledge_entry(ha, 1472 (notify_acknowledge_entry_t *)pkt, 1473 done_q, set_flags, reset_flags); 1474 } 1475 break; 1476 1477 case ATIO_TYPE: 1478 if (CFG_IST(ha, CFG_ENABLE_TARGET_MODE)) { 1479 ql_accept_target_io_entry(ha, 1480 (atio_entry_t *)pkt, done_q, 1481 set_flags, reset_flags); 1482 } 1483 break; 1484 1485 case CTIO_TYPE_2: 1486 case CTIO_TYPE_3: 1487 if (CFG_IST(ha, CFG_ENABLE_TARGET_MODE)) { 1488 ql_continue_target_io_entry(ha, 1489 (ctio_entry_t *)pkt, done_q, 1490 set_flags, reset_flags); 1491 } 1492 break; 1493 1494 case IP_TYPE: 1495 case IP_A64_TYPE: 1496 case IP_CMD_TYPE: 1497 ql_ip_entry(ha, (ip_entry_t *)pkt, done_q, 1498 set_flags, reset_flags); 1499 break; 1500 case IP_RECEIVE_TYPE: 1501 ql_ip_rcv_entry(ha, 1502 (ip_rcv_entry_t *)pkt, done_q, set_flags, 1503 reset_flags); 1504 break; 1505 case IP_RECEIVE_CONT_TYPE: 1506 ql_ip_rcv_cont_entry(ha, 1507 (ip_rcv_cont_entry_t *)pkt, done_q, 1508 set_flags, reset_flags); 1509 break; 1510 case IP_24XX_RECEIVE_TYPE: 1511 ql_ip_24xx_rcv_entry(ha, 1512 (ip_rcv_24xx_entry_t *)pkt, done_q, 1513 set_flags, reset_flags); 1514 break; 1515 case MS_TYPE: 1516 ql_ms_entry(ha, (ms_entry_t *)pkt, done_q, 1517 set_flags, reset_flags); 1518 break; 1519 case REPORT_ID_TYPE: 1520 ql_report_id_entry(ha, (report_id_1_t *)pkt, 1521 done_q, set_flags, reset_flags); 1522 break; 1523 case IP_BUF_POOL_TYPE: 1524 case MARKER_TYPE: 1525 case VP_MODIFY_TYPE: 1526 case VP_CONTROL_TYPE: 1527 break; 1528 default: 1529 EL(ha, "Unknown IOCB entry type=%xh\n", 1530 pkt->entry_type); 1531 break; 1532 } 1533 } 1534 } 1535 1536 /* Inform RISC of processed responses. */ 1537 WRT16_IO_REG(ha, resp_out, ha->rsp_ring_index); 1538 1539 /* RESET packet received delay for possible async event. */ 1540 if (status & BIT_0) { 1541 drv_usecwait(500000); 1542 } 1543 1544 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1545 } 1546 1547 /* 1548 * ql_error_entry 1549 * Processes error entry. 1550 * 1551 * Input: 1552 * ha = adapter state pointer. 1553 * pkt = entry pointer. 1554 * done_q = head pointer to done queue. 1555 * set_flags = task daemon flags to set. 1556 * reset_flags = task daemon flags to reset. 1557 * 1558 * Context: 1559 * Interrupt or Kernel context, no mailbox commands allowed. 1560 */ 1561 /* ARGSUSED */ 1562 static void 1563 ql_error_entry(ql_adapter_state_t *ha, response_t *pkt, ql_head_t *done_q, 1564 uint32_t *set_flags, uint32_t *reset_flags) 1565 { 1566 ql_srb_t *sp; 1567 uint32_t index, cnt; 1568 1569 if (pkt->entry_type == INVALID_ENTRY_TYPE) { 1570 EL(ha, "Aborted command\n"); 1571 return; 1572 } 1573 1574 QL_PRINT_2(CE_CONT, "(%d): started, packet:\n", ha->instance); 1575 QL_DUMP_2((uint8_t *)pkt, 8, RESPONSE_ENTRY_SIZE); 1576 1577 if (pkt->entry_status & BIT_6) { 1578 EL(ha, "Request Queue DMA error\n"); 1579 } else if (pkt->entry_status & BIT_5) { 1580 EL(ha, "Invalid Entry Order\n"); 1581 } else if (pkt->entry_status & BIT_4) { 1582 EL(ha, "Invalid Entry Count\n"); 1583 } else if (pkt->entry_status & BIT_3) { 1584 EL(ha, "Invalid Entry Parameter\n"); 1585 } else if (pkt->entry_status & BIT_2) { 1586 EL(ha, "Invalid Entry Type\n"); 1587 } else if (pkt->entry_status & BIT_1) { 1588 EL(ha, "Busy\n"); 1589 } else { 1590 EL(ha, "UNKNOWN flag = %xh error\n", pkt->entry_status); 1591 } 1592 1593 /* Get handle. */ 1594 cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle); 1595 index = cnt & OSC_INDEX_MASK; 1596 1597 /* Validate handle. */ 1598 sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] : 1599 NULL; 1600 1601 if (sp != NULL && sp->handle == cnt) { 1602 ha->outstanding_cmds[index] = NULL; 1603 sp->handle = 0; 1604 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 1605 1606 /* Bad payload or header */ 1607 if (pkt->entry_status & (BIT_5 + BIT_4 + BIT_3 + BIT_2)) { 1608 /* Bad payload or header, set error status. */ 1609 sp->pkt->pkt_reason = CS_BAD_PAYLOAD; 1610 } else if (pkt->entry_status & BIT_1) /* FULL flag */ { 1611 sp->pkt->pkt_reason = CS_QUEUE_FULL; 1612 } else { 1613 /* Set error status. */ 1614 sp->pkt->pkt_reason = CS_UNKNOWN; 1615 } 1616 1617 /* Set completed status. */ 1618 sp->flags |= SRB_ISP_COMPLETED; 1619 1620 /* Place command on done queue. */ 1621 ql_add_link_b(done_q, &sp->cmd); 1622 1623 } else { 1624 if (sp == NULL) { 1625 EL(ha, "unknown IOCB handle=%xh\n", cnt); 1626 } else { 1627 EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n", 1628 cnt, sp->handle); 1629 } 1630 1631 (void) ql_binary_fw_dump(ha, FALSE); 1632 1633 if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED | 1634 ABORT_ISP_ACTIVE))) { 1635 EL(ha, "ISP Invalid handle, isp_abort_needed\n"); 1636 *set_flags |= ISP_ABORT_NEEDED; 1637 } 1638 } 1639 1640 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1641 } 1642 1643 /* 1644 * ql_status_entry 1645 * Processes received ISP2200-2300 status entry. 1646 * 1647 * Input: 1648 * ha: adapter state pointer. 1649 * pkt: entry pointer. 1650 * done_q: done queue pointer. 1651 * set_flags: task daemon flags to set. 1652 * reset_flags: task daemon flags to reset. 1653 * 1654 * Returns: 1655 * BIT_0 = CS_RESET status received. 1656 * 1657 * Context: 1658 * Interrupt or Kernel context, no mailbox commands allowed. 1659 */ 1660 /* ARGSUSED */ 1661 static int 1662 ql_status_entry(ql_adapter_state_t *ha, sts_entry_t *pkt, 1663 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 1664 { 1665 ql_srb_t *sp; 1666 uint32_t index, cnt; 1667 uint16_t comp_status; 1668 int rval = 0; 1669 1670 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1671 1672 /* Get handle. */ 1673 cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle); 1674 index = cnt & OSC_INDEX_MASK; 1675 1676 /* Validate handle. */ 1677 sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] : 1678 NULL; 1679 1680 if (sp != NULL && sp->handle == cnt) { 1681 comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 1682 &pkt->comp_status); 1683 1684 /* 1685 * We dont care about SCSI QFULLs. 1686 */ 1687 if (comp_status == CS_QUEUE_FULL) { 1688 EL(ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n", 1689 sp->lun_queue->target_queue->d_id.b24, 1690 sp->lun_queue->lun_no); 1691 comp_status = CS_COMPLETE; 1692 } 1693 1694 /* 1695 * 2300 firmware marks completion status as data underrun 1696 * for scsi qfulls. Make it transport complete. 1697 */ 1698 if ((CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) && 1699 (comp_status == CS_DATA_UNDERRUN) && 1700 (pkt->scsi_status_l != 0)) { 1701 comp_status = CS_COMPLETE; 1702 } 1703 1704 /* 1705 * Workaround T3 issue where we do not get any data xferred 1706 * but get back a good status. 1707 */ 1708 if ((pkt->state_flags_h & SF_XFERRED_DATA) == 0 && 1709 comp_status == CS_COMPLETE && 1710 pkt->scsi_status_l == 0 && 1711 (pkt->scsi_status_h & FCP_RSP_MASK) == 0 && 1712 pkt->residual_length == 0 && 1713 sp->fcp && 1714 sp->fcp->fcp_data_len != 0 && 1715 (pkt->state_flags_l & (SF_DATA_OUT | SF_DATA_IN)) == 1716 SF_DATA_OUT) { 1717 comp_status = CS_ABORTED; 1718 } 1719 1720 if (sp->flags & SRB_MS_PKT) { 1721 /* 1722 * Ideally it should never be true. But there 1723 * is a bug in FW which upon receiving invalid 1724 * parameters in MS IOCB returns it as 1725 * status entry and not as ms entry type. 1726 */ 1727 ql_ms_entry(ha, (ms_entry_t *)pkt, done_q, 1728 set_flags, reset_flags); 1729 QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n", 1730 ha->instance); 1731 return (0); 1732 } 1733 1734 ha->outstanding_cmds[index] = NULL; 1735 sp->handle = 0; 1736 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 1737 1738 /* 1739 * Fast path to good SCSI I/O completion 1740 */ 1741 if ((comp_status == CS_COMPLETE) & 1742 (!pkt->scsi_status_l) & 1743 (!(pkt->scsi_status_h & FCP_RSP_MASK))) { 1744 /* Set completed status. */ 1745 sp->flags |= SRB_ISP_COMPLETED; 1746 sp->pkt->pkt_reason = comp_status; 1747 ql_fast_fcp_post(sp); 1748 QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n", 1749 ha->instance); 1750 return (0); 1751 } 1752 rval = ql_status_error(ha, sp, pkt, done_q, set_flags, 1753 reset_flags); 1754 } else { 1755 if (sp == NULL) { 1756 EL(ha, "unknown IOCB handle=%xh\n", cnt); 1757 } else { 1758 EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n", 1759 cnt, sp->handle); 1760 } 1761 1762 (void) ql_binary_fw_dump(ha, FALSE); 1763 1764 if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED | 1765 ABORT_ISP_ACTIVE))) { 1766 EL(ha, "ISP Invalid handle, isp_abort_needed\n"); 1767 *set_flags |= ISP_ABORT_NEEDED; 1768 } 1769 } 1770 1771 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1772 1773 return (rval); 1774 } 1775 1776 /* 1777 * ql_24xx_status_entry 1778 * Processes received ISP24xx status entry. 1779 * 1780 * Input: 1781 * ha: adapter state pointer. 1782 * pkt: entry pointer. 1783 * done_q: done queue pointer. 1784 * set_flags: task daemon flags to set. 1785 * reset_flags: task daemon flags to reset. 1786 * 1787 * Returns: 1788 * BIT_0 = CS_RESET status received. 1789 * 1790 * Context: 1791 * Interrupt or Kernel context, no mailbox commands allowed. 1792 */ 1793 /* ARGSUSED */ 1794 static int 1795 ql_24xx_status_entry(ql_adapter_state_t *ha, sts_24xx_entry_t *pkt, 1796 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 1797 { 1798 ql_srb_t *sp; 1799 uint32_t index, cnt; 1800 uint16_t comp_status; 1801 int rval = 0; 1802 1803 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1804 1805 /* Get handle. */ 1806 cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle); 1807 index = cnt & OSC_INDEX_MASK; 1808 1809 /* Validate handle. */ 1810 sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] : 1811 NULL; 1812 1813 if (sp != NULL && sp->handle == cnt) { 1814 comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 1815 &pkt->comp_status); 1816 1817 /* 1818 * We dont care about SCSI QFULLs. 1819 */ 1820 if (comp_status == CS_QUEUE_FULL) { 1821 EL(sp->ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n", 1822 sp->lun_queue->target_queue->d_id.b24, 1823 sp->lun_queue->lun_no); 1824 comp_status = CS_COMPLETE; 1825 } 1826 1827 /* 1828 * 2300 firmware marks completion status as data underrun 1829 * for scsi qfulls. Make it transport complete. 1830 */ 1831 if ((comp_status == CS_DATA_UNDERRUN) && 1832 (pkt->scsi_status_l != 0)) { 1833 comp_status = CS_COMPLETE; 1834 } 1835 1836 /* 1837 * Workaround T3 issue where we do not get any data xferred 1838 * but get back a good status. 1839 */ 1840 if (comp_status == CS_COMPLETE && 1841 pkt->scsi_status_l == 0 && 1842 (pkt->scsi_status_h & FCP_RSP_MASK) == 0 && 1843 pkt->residual_length != 0 && 1844 sp->fcp && 1845 sp->fcp->fcp_data_len != 0 && 1846 sp->fcp->fcp_cntl.cntl_write_data) { 1847 comp_status = CS_ABORTED; 1848 } 1849 1850 if (sp->flags & SRB_MS_PKT) { 1851 /* 1852 * Ideally it should never be true. But there 1853 * is a bug in FW which upon receiving invalid 1854 * parameters in MS IOCB returns it as 1855 * status entry and not as ms entry type. 1856 */ 1857 ql_ms_entry(ha, (ms_entry_t *)pkt, done_q, 1858 set_flags, reset_flags); 1859 QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n", 1860 ha->instance); 1861 return (0); 1862 } 1863 1864 ha->outstanding_cmds[index] = NULL; 1865 sp->handle = 0; 1866 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 1867 1868 /* 1869 * Fast path to good SCSI I/O completion 1870 */ 1871 if ((comp_status == CS_COMPLETE) & 1872 (!pkt->scsi_status_l) & 1873 (!(pkt->scsi_status_h & FCP_RSP_MASK))) { 1874 /* Set completed status. */ 1875 sp->flags |= SRB_ISP_COMPLETED; 1876 sp->pkt->pkt_reason = comp_status; 1877 ql_fast_fcp_post(sp); 1878 QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n", 1879 ha->instance); 1880 return (0); 1881 } 1882 rval = ql_status_error(ha, sp, (sts_entry_t *)pkt, done_q, 1883 set_flags, reset_flags); 1884 } else { 1885 if (sp == NULL) { 1886 EL(ha, "unknown IOCB handle=%xh\n", cnt); 1887 } else { 1888 EL(sp->ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n", 1889 cnt, sp->handle); 1890 } 1891 1892 (void) ql_binary_fw_dump(ha, FALSE); 1893 1894 if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED | 1895 ABORT_ISP_ACTIVE))) { 1896 EL(ha, "ISP Invalid handle, isp_abort_needed\n"); 1897 *set_flags |= ISP_ABORT_NEEDED; 1898 } 1899 } 1900 1901 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1902 1903 return (rval); 1904 } 1905 1906 /* 1907 * ql_status_error 1908 * Processes received ISP status entry error. 1909 * 1910 * Input: 1911 * ha: adapter state pointer. 1912 * sp: SRB pointer. 1913 * pkt: entry pointer. 1914 * done_q: done queue pointer. 1915 * set_flags: task daemon flags to set. 1916 * reset_flags: task daemon flags to reset. 1917 * 1918 * Returns: 1919 * BIT_0 = CS_RESET status received. 1920 * 1921 * Context: 1922 * Interrupt or Kernel context, no mailbox commands allowed. 1923 */ 1924 /* ARGSUSED */ 1925 static int 1926 ql_status_error(ql_adapter_state_t *ha, ql_srb_t *sp, sts_entry_t *pkt23, 1927 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 1928 { 1929 uint32_t sense_sz = 0; 1930 uint32_t cnt; 1931 ql_tgt_t *tq; 1932 fcp_rsp_t *fcpr; 1933 struct fcp_rsp_info *rsp; 1934 int rval = 0; 1935 1936 struct { 1937 uint8_t *rsp_info; 1938 uint8_t *req_sense_data; 1939 uint32_t residual_length; 1940 uint32_t fcp_residual_length; 1941 uint32_t rsp_info_length; 1942 uint32_t req_sense_length; 1943 uint16_t comp_status; 1944 uint8_t state_flags_l; 1945 uint8_t state_flags_h; 1946 uint8_t scsi_status_l; 1947 uint8_t scsi_status_h; 1948 } sts; 1949 1950 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1951 1952 if (CFG_IST(ha, CFG_CTRL_2425)) { 1953 sts_24xx_entry_t *pkt24 = (sts_24xx_entry_t *)pkt23; 1954 1955 /* Setup status. */ 1956 sts.comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 1957 &pkt24->comp_status); 1958 sts.scsi_status_l = pkt24->scsi_status_l; 1959 sts.scsi_status_h = pkt24->scsi_status_h; 1960 1961 /* Setup firmware residuals. */ 1962 sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ? 1963 ddi_get32(ha->hba_buf.acc_handle, 1964 (uint32_t *)&pkt24->residual_length) : 0; 1965 1966 /* Setup FCP residuals. */ 1967 sts.fcp_residual_length = sts.scsi_status_h & 1968 (FCP_RESID_UNDER | FCP_RESID_OVER) ? 1969 ddi_get32(ha->hba_buf.acc_handle, 1970 (uint32_t *)&pkt24->fcp_rsp_residual_count) : 0; 1971 1972 if ((sts.comp_status == CS_DATA_UNDERRUN) && 1973 (sts.scsi_status_h & FCP_RESID_UNDER) && 1974 (sts.residual_length != pkt24->fcp_rsp_residual_count)) { 1975 1976 EL(sp->ha, "mismatch resid's: fw=%xh, pkt=%xh\n", 1977 sts.residual_length, 1978 pkt24->fcp_rsp_residual_count); 1979 sts.scsi_status_h = (uint8_t) 1980 (sts.scsi_status_h & ~FCP_RESID_UNDER); 1981 } 1982 1983 /* Setup state flags. */ 1984 sts.state_flags_l = pkt24->state_flags_l; 1985 sts.state_flags_h = pkt24->state_flags_h; 1986 1987 if (sp->fcp->fcp_data_len && 1988 (sts.comp_status != CS_DATA_UNDERRUN || 1989 sts.residual_length != sp->fcp->fcp_data_len)) { 1990 sts.state_flags_h = (uint8_t) 1991 (sts.state_flags_h | SF_GOT_BUS | 1992 SF_GOT_TARGET | SF_SENT_CMD | 1993 SF_XFERRED_DATA | SF_GOT_STATUS); 1994 } else { 1995 sts.state_flags_h = (uint8_t) 1996 (sts.state_flags_h | SF_GOT_BUS | 1997 SF_GOT_TARGET | SF_SENT_CMD | 1998 SF_GOT_STATUS); 1999 } 2000 if (sp->fcp->fcp_cntl.cntl_write_data) { 2001 sts.state_flags_l = (uint8_t) 2002 (sts.state_flags_l | SF_DATA_OUT); 2003 } else if (sp->fcp->fcp_cntl.cntl_read_data) { 2004 sts.state_flags_l = (uint8_t) 2005 (sts.state_flags_l | SF_DATA_IN); 2006 } 2007 if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) { 2008 sts.state_flags_l = (uint8_t) 2009 (sts.state_flags_l | SF_HEAD_OF_Q); 2010 } else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) { 2011 sts.state_flags_l = (uint8_t) 2012 (sts.state_flags_l | SF_ORDERED_Q); 2013 } else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) { 2014 sts.state_flags_l = (uint8_t) 2015 (sts.state_flags_l | SF_SIMPLE_Q); 2016 } 2017 2018 /* Setup FCP response info. */ 2019 sts.rsp_info = &pkt24->rsp_sense_data[0]; 2020 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) { 2021 sts.rsp_info_length = ddi_get32(ha->hba_buf.acc_handle, 2022 (uint32_t *)&pkt24->fcp_rsp_data_length); 2023 if (sts.rsp_info_length > 2024 sizeof (struct fcp_rsp_info)) { 2025 sts.rsp_info_length = 2026 sizeof (struct fcp_rsp_info); 2027 } 2028 for (cnt = 0; cnt < sts.rsp_info_length; cnt += 4) { 2029 ql_chg_endian(sts.rsp_info + cnt, 4); 2030 } 2031 } else { 2032 sts.rsp_info_length = 0; 2033 } 2034 2035 /* Setup sense data. */ 2036 sts.req_sense_data = 2037 &pkt24->rsp_sense_data[sts.rsp_info_length]; 2038 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) { 2039 sts.req_sense_length = 2040 ddi_get32(ha->hba_buf.acc_handle, 2041 (uint32_t *)&pkt24->fcp_sense_length); 2042 sts.state_flags_h = (uint8_t) 2043 (sts.state_flags_h | SF_ARQ_DONE); 2044 sense_sz = (uint32_t) 2045 (((uintptr_t)pkt24 + sizeof (sts_24xx_entry_t)) - 2046 (uintptr_t)sts.req_sense_data); 2047 for (cnt = 0; cnt < sense_sz; cnt += 4) { 2048 ql_chg_endian(sts.req_sense_data + cnt, 4); 2049 } 2050 } else { 2051 sts.req_sense_length = 0; 2052 } 2053 } else { 2054 /* Setup status. */ 2055 sts.comp_status = (uint16_t)ddi_get16( 2056 ha->hba_buf.acc_handle, &pkt23->comp_status); 2057 sts.scsi_status_l = pkt23->scsi_status_l; 2058 sts.scsi_status_h = pkt23->scsi_status_h; 2059 2060 /* Setup firmware residuals. */ 2061 sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ? 2062 ddi_get32(ha->hba_buf.acc_handle, 2063 (uint32_t *)&pkt23->residual_length) : 0; 2064 2065 /* Setup FCP residuals. */ 2066 sts.fcp_residual_length = sts.scsi_status_h & 2067 (FCP_RESID_UNDER | FCP_RESID_OVER) ? 2068 sts.residual_length : 0; 2069 2070 /* Setup state flags. */ 2071 sts.state_flags_l = pkt23->state_flags_l; 2072 sts.state_flags_h = pkt23->state_flags_h; 2073 2074 /* Setup FCP response info. */ 2075 sts.rsp_info = &pkt23->rsp_info[0]; 2076 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) { 2077 sts.rsp_info_length = ddi_get16( 2078 ha->hba_buf.acc_handle, 2079 (uint16_t *)&pkt23->rsp_info_length); 2080 if (sts.rsp_info_length > 2081 sizeof (struct fcp_rsp_info)) { 2082 sts.rsp_info_length = 2083 sizeof (struct fcp_rsp_info); 2084 } 2085 } else { 2086 sts.rsp_info_length = 0; 2087 } 2088 2089 /* Setup sense data. */ 2090 sts.req_sense_data = &pkt23->req_sense_data[0]; 2091 sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ? 2092 ddi_get16(ha->hba_buf.acc_handle, 2093 (uint16_t *)&pkt23->req_sense_length) : 0; 2094 } 2095 2096 bzero(sp->pkt->pkt_resp, sp->pkt->pkt_rsplen); 2097 2098 fcpr = (fcp_rsp_t *)sp->pkt->pkt_resp; 2099 rsp = (struct fcp_rsp_info *)(sp->pkt->pkt_resp + 2100 sizeof (fcp_rsp_t)); 2101 2102 tq = sp->lun_queue->target_queue; 2103 2104 fcpr->fcp_u.fcp_status.scsi_status = sts.scsi_status_l; 2105 if (sts.scsi_status_h & FCP_RSP_LEN_VALID) { 2106 fcpr->fcp_u.fcp_status.rsp_len_set = 1; 2107 } 2108 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) { 2109 fcpr->fcp_u.fcp_status.sense_len_set = 1; 2110 } 2111 if (sts.scsi_status_h & FCP_RESID_OVER) { 2112 fcpr->fcp_u.fcp_status.resid_over = 1; 2113 } 2114 if (sts.scsi_status_h & FCP_RESID_UNDER) { 2115 fcpr->fcp_u.fcp_status.resid_under = 1; 2116 } 2117 fcpr->fcp_u.fcp_status.reserved_1 = 0; 2118 2119 /* Set ISP completion status */ 2120 sp->pkt->pkt_reason = sts.comp_status; 2121 2122 /* Update statistics. */ 2123 if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) && 2124 (sp->pkt->pkt_rsplen > sizeof (fcp_rsp_t))) { 2125 2126 sense_sz = sp->pkt->pkt_rsplen - (uint32_t)sizeof (fcp_rsp_t); 2127 if (sense_sz > sts.rsp_info_length) { 2128 sense_sz = sts.rsp_info_length; 2129 } 2130 2131 /* copy response information data. */ 2132 if (sense_sz) { 2133 ddi_rep_get8(ha->hba_buf.acc_handle, (uint8_t *)rsp, 2134 sts.rsp_info, sense_sz, DDI_DEV_AUTOINCR); 2135 } 2136 fcpr->fcp_response_len = sense_sz; 2137 2138 rsp = (struct fcp_rsp_info *)((caddr_t)rsp + 2139 fcpr->fcp_response_len); 2140 2141 switch (*(sts.rsp_info + 3)) { 2142 case FCP_NO_FAILURE: 2143 break; 2144 case FCP_DL_LEN_MISMATCH: 2145 ha->adapter_stats->d_stats[lobyte( 2146 tq->loop_id)].dl_len_mismatches++; 2147 break; 2148 case FCP_CMND_INVALID: 2149 break; 2150 case FCP_DATA_RO_MISMATCH: 2151 ha->adapter_stats->d_stats[lobyte( 2152 tq->loop_id)].data_ro_mismatches++; 2153 break; 2154 case FCP_TASK_MGMT_NOT_SUPPTD: 2155 break; 2156 case FCP_TASK_MGMT_FAILED: 2157 ha->adapter_stats->d_stats[lobyte( 2158 tq->loop_id)].task_mgmt_failures++; 2159 break; 2160 default: 2161 break; 2162 } 2163 } else { 2164 /* 2165 * EL(sp->ha, "scsi_h=%xh, pkt_rsplen=%xh\n", 2166 * sts.scsi_status_h, sp->pkt->pkt_rsplen); 2167 */ 2168 fcpr->fcp_response_len = 0; 2169 } 2170 2171 /* Set reset status received. */ 2172 if (sts.comp_status == CS_RESET && LOOP_READY(ha)) { 2173 rval |= BIT_0; 2174 } 2175 2176 if (!(tq->flags & TQF_TAPE_DEVICE) && 2177 (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) || 2178 ha->loop_down_abort_time < LOOP_DOWN_TIMER_START) && 2179 ha->task_daemon_flags & LOOP_DOWN) { 2180 EL(sp->ha, "Loop Not Ready Retry, d_id=%xh, lun=%xh\n", 2181 tq->d_id.b24, sp->lun_queue->lun_no); 2182 2183 /* Set retry status. */ 2184 sp->flags |= SRB_RETRY; 2185 } else if (!(tq->flags & TQF_TAPE_DEVICE) && 2186 tq->port_down_retry_count != 0 && 2187 (sts.comp_status == CS_INCOMPLETE || 2188 sts.comp_status == CS_PORT_UNAVAILABLE || 2189 sts.comp_status == CS_PORT_LOGGED_OUT || 2190 sts.comp_status == CS_PORT_CONFIG_CHG || 2191 sts.comp_status == CS_PORT_BUSY)) { 2192 EL(sp->ha, "Port Down Retry=%xh, d_id=%xh, lun=%xh, count=%d" 2193 "\n", sts.comp_status, tq->d_id.b24, sp->lun_queue->lun_no, 2194 tq->port_down_retry_count); 2195 2196 /* Set retry status. */ 2197 sp->flags |= SRB_RETRY; 2198 2199 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) { 2200 /* Acquire device queue lock. */ 2201 DEVICE_QUEUE_LOCK(tq); 2202 2203 tq->flags |= TQF_QUEUE_SUSPENDED; 2204 2205 /* Decrement port down count. */ 2206 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) { 2207 tq->port_down_retry_count--; 2208 } 2209 2210 DEVICE_QUEUE_UNLOCK(tq); 2211 2212 if ((ha->task_daemon_flags & ABORT_ISP_ACTIVE) 2213 == 0 && 2214 (sts.comp_status == CS_PORT_LOGGED_OUT || 2215 sts.comp_status == CS_PORT_UNAVAILABLE)) { 2216 sp->ha->adapter_stats->d_stats[lobyte( 2217 tq->loop_id)].logouts_recvd++; 2218 ql_send_logo(sp->ha, tq, done_q); 2219 } 2220 2221 ADAPTER_STATE_LOCK(ha); 2222 if (ha->port_retry_timer == 0) { 2223 if ((ha->port_retry_timer = 2224 ha->port_down_retry_delay) == 0) { 2225 *set_flags |= 2226 PORT_RETRY_NEEDED; 2227 } 2228 } 2229 ADAPTER_STATE_UNLOCK(ha); 2230 } 2231 } else if (!(tq->flags & TQF_TAPE_DEVICE) && 2232 (sts.comp_status == CS_RESET || 2233 (sts.comp_status == CS_QUEUE_FULL && tq->qfull_retry_count != 0) || 2234 (sts.comp_status == CS_ABORTED && !(sp->flags & SRB_ABORTING)))) { 2235 if (sts.comp_status == CS_RESET) { 2236 EL(sp->ha, "Reset Retry, d_id=%xh, lun=%xh\n", 2237 tq->d_id.b24, sp->lun_queue->lun_no); 2238 } else if (sts.comp_status == CS_QUEUE_FULL) { 2239 EL(sp->ha, "Queue Full Retry, d_id=%xh, lun=%xh, " 2240 "cnt=%d\n", tq->d_id.b24, sp->lun_queue->lun_no, 2241 tq->qfull_retry_count); 2242 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) { 2243 tq->flags |= TQF_QUEUE_SUSPENDED; 2244 2245 tq->qfull_retry_count--; 2246 2247 ADAPTER_STATE_LOCK(ha); 2248 if (ha->port_retry_timer == 0) { 2249 if ((ha->port_retry_timer = 2250 ha->qfull_retry_delay) == 2251 0) { 2252 *set_flags |= 2253 PORT_RETRY_NEEDED; 2254 } 2255 } 2256 ADAPTER_STATE_UNLOCK(ha); 2257 } 2258 } else { 2259 EL(sp->ha, "Abort Retry, d_id=%xh, lun=%xh\n", 2260 tq->d_id.b24, sp->lun_queue->lun_no); 2261 } 2262 2263 /* Set retry status. */ 2264 sp->flags |= SRB_RETRY; 2265 } else { 2266 fcpr->fcp_resid = 2267 sts.fcp_residual_length > sp->fcp->fcp_data_len ? 2268 sp->fcp->fcp_data_len : sts.fcp_residual_length; 2269 2270 if ((sts.comp_status == CS_DATA_UNDERRUN) && 2271 (sts.scsi_status_h & FCP_RESID_UNDER) == 0) { 2272 2273 if (sts.scsi_status_l == STATUS_CHECK) { 2274 sp->pkt->pkt_reason = CS_COMPLETE; 2275 } else { 2276 EL(ha, "transport error - " 2277 "underrun & invalid resid\n"); 2278 EL(ha, "ssh=%xh, ssl=%xh\n", 2279 sts.scsi_status_h, sts.scsi_status_l); 2280 sp->pkt->pkt_reason = CS_FCP_RESPONSE_ERROR; 2281 } 2282 } 2283 2284 /* Ignore firmware underrun error. */ 2285 if (sts.comp_status == CS_DATA_UNDERRUN && 2286 (sts.scsi_status_h & FCP_RESID_UNDER || 2287 (sts.scsi_status_l != STATUS_CHECK && 2288 sts.scsi_status_l != STATUS_GOOD))) { 2289 sp->pkt->pkt_reason = CS_COMPLETE; 2290 } 2291 2292 if (sp->pkt->pkt_reason != CS_COMPLETE) { 2293 ha->xioctl->DeviceErrorCount++; 2294 EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh" 2295 "\n", sts.comp_status, tq->d_id.b24, 2296 sp->lun_queue->lun_no); 2297 } 2298 2299 /* Set target request sense data. */ 2300 if (sts.scsi_status_l == STATUS_CHECK) { 2301 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) { 2302 2303 if (sp->pkt->pkt_reason == CS_COMPLETE && 2304 sts.req_sense_data[2] != KEY_NO_SENSE && 2305 sts.req_sense_data[2] != 2306 KEY_UNIT_ATTENTION) { 2307 ha->xioctl->DeviceErrorCount++; 2308 } 2309 2310 sense_sz = sts.req_sense_length; 2311 2312 /* Insure data does not exceed buf. */ 2313 if (sp->pkt->pkt_rsplen <= 2314 (uint32_t)sizeof (fcp_rsp_t) + 2315 fcpr->fcp_response_len) { 2316 sp->request_sense_length = 0; 2317 } else { 2318 sp->request_sense_length = (uint32_t) 2319 (sp->pkt->pkt_rsplen - 2320 sizeof (fcp_rsp_t) - 2321 fcpr->fcp_response_len); 2322 } 2323 2324 if (sense_sz < 2325 sp->request_sense_length) { 2326 sp->request_sense_length = 2327 sense_sz; 2328 } 2329 2330 sp->request_sense_ptr = (caddr_t)rsp; 2331 2332 sense_sz = (uint32_t) 2333 (((uintptr_t)pkt23 + 2334 sizeof (sts_entry_t)) - 2335 (uintptr_t)sts.req_sense_data); 2336 if (sp->request_sense_length < 2337 sense_sz) { 2338 sense_sz = 2339 sp->request_sense_length; 2340 } 2341 2342 fcpr->fcp_sense_len = sense_sz; 2343 2344 /* Move sense data. */ 2345 ddi_rep_get8(ha->hba_buf.acc_handle, 2346 (uint8_t *)sp->request_sense_ptr, 2347 sts.req_sense_data, 2348 (size_t)sense_sz, 2349 DDI_DEV_AUTOINCR); 2350 2351 sp->request_sense_ptr += sense_sz; 2352 sp->request_sense_length -= sense_sz; 2353 if (sp->request_sense_length != 0) { 2354 ha->status_srb = sp; 2355 } 2356 } 2357 2358 if (sense_sz != 0) { 2359 EL(sp->ha, "check condition sense data, " 2360 "d_id=%xh, lun=%xh\n%2xh%3xh%3xh%3xh" 2361 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh" 2362 "%3xh%3xh%3xh%3xh%3xh\n", tq->d_id.b24, 2363 sp->lun_queue->lun_no, 2364 sts.req_sense_data[0], 2365 sts.req_sense_data[1], 2366 sts.req_sense_data[2], 2367 sts.req_sense_data[3], 2368 sts.req_sense_data[4], 2369 sts.req_sense_data[5], 2370 sts.req_sense_data[6], 2371 sts.req_sense_data[7], 2372 sts.req_sense_data[8], 2373 sts.req_sense_data[9], 2374 sts.req_sense_data[10], 2375 sts.req_sense_data[11], 2376 sts.req_sense_data[12], 2377 sts.req_sense_data[13], 2378 sts.req_sense_data[14], 2379 sts.req_sense_data[15], 2380 sts.req_sense_data[16], 2381 sts.req_sense_data[17]); 2382 } else { 2383 EL(sp->ha, "check condition, d_id=%xh, lun=%xh" 2384 "\n", tq->d_id.b24, sp->lun_queue->lun_no); 2385 } 2386 } 2387 } 2388 2389 /* Set completed status. */ 2390 sp->flags |= SRB_ISP_COMPLETED; 2391 2392 /* Place command on done queue. */ 2393 if (ha->status_srb == NULL) { 2394 ql_add_link_b(done_q, &sp->cmd); 2395 } 2396 2397 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2398 2399 return (rval); 2400 } 2401 2402 /* 2403 * ql_status_cont_entry 2404 * Processes status continuation entry. 2405 * 2406 * Input: 2407 * ha: adapter state pointer. 2408 * pkt: entry pointer. 2409 * done_q: done queue pointer. 2410 * set_flags: task daemon flags to set. 2411 * reset_flags: task daemon flags to reset. 2412 * 2413 * Context: 2414 * Interrupt or Kernel context, no mailbox commands allowed. 2415 */ 2416 /* ARGSUSED */ 2417 static void 2418 ql_status_cont_entry(ql_adapter_state_t *ha, sts_cont_entry_t *pkt, 2419 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 2420 { 2421 uint32_t sense_sz, index; 2422 ql_srb_t *sp = ha->status_srb; 2423 2424 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2425 2426 if (sp != NULL && sp->request_sense_length) { 2427 if (sp->request_sense_length > sizeof (pkt->req_sense_data)) { 2428 sense_sz = sizeof (pkt->req_sense_data); 2429 } else { 2430 sense_sz = sp->request_sense_length; 2431 } 2432 2433 if (CFG_IST(ha, CFG_CTRL_2425)) { 2434 for (index = 0; index < sense_sz; index += 4) { 2435 ql_chg_endian((uint8_t *) 2436 &pkt->req_sense_data[0] + index, 4); 2437 } 2438 } 2439 2440 /* Move sense data. */ 2441 ddi_rep_get8(ha->hba_buf.acc_handle, 2442 (uint8_t *)sp->request_sense_ptr, 2443 (uint8_t *)&pkt->req_sense_data[0], (size_t)sense_sz, 2444 DDI_DEV_AUTOINCR); 2445 2446 sp->request_sense_ptr += sense_sz; 2447 sp->request_sense_length -= sense_sz; 2448 2449 /* Place command on done queue. */ 2450 if (sp->request_sense_length == 0) { 2451 ql_add_link_b(done_q, &sp->cmd); 2452 ha->status_srb = NULL; 2453 } 2454 } 2455 2456 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2457 } 2458 2459 /* 2460 * ql_immediate_notify_entry 2461 * Processes immediate notify entry. 2462 * 2463 * Input: 2464 * ha: adapter state pointer. 2465 * pkt: entry pointer. 2466 * done_q: done queue pointer. 2467 * set_flags: task daemon flags to set. 2468 * reset_flags: task daemon flags to reset. 2469 * 2470 * Context: 2471 * Interrupt or Kernel context, no mailbox commands allowed. 2472 */ 2473 /* ARGSUSED */ 2474 static void 2475 ql_immediate_notify_entry(ql_adapter_state_t *ha, 2476 immediate_notify_entry_t *pkt, ql_head_t *done_q, uint32_t *set_flags, 2477 uint32_t *reset_flags) 2478 { 2479 notify_acknowledge_entry_t *nack; 2480 ql_srb_t *sp; 2481 fcp_cmd_t *fcp; 2482 tgt_cmd_t *cmd, *nackcmd; 2483 ql_tgt_t *tq; 2484 fc_unsol_buf_t *ubp = NULL; 2485 int use_ubuffer; 2486 uint16_t loop_id; 2487 2488 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2489 2490 cmd = (tgt_cmd_t *)kmem_zalloc(sizeof (tgt_cmd_t), KM_NOSLEEP); 2491 if (cmd != NULL) { 2492 cmd->cmd.base_address = cmd; 2493 2494 /* Save command context. */ 2495 cmd->type = pkt->entry_type; 2496 cmd->initiator_id_l = pkt->initiator_id_l; 2497 cmd->initiator_id_h = pkt->initiator_id_h; 2498 cmd->rx_id = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 2499 &pkt->sequence_id); 2500 cmd->status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 2501 &pkt->status); 2502 cmd->task_flags_l = pkt->task_flags_l; 2503 cmd->task_flags_h = pkt->task_flags_h; 2504 2505 /* 2506 * TODO: flushing in case of pkt_status_l of 0x34 2507 * needs to be handled properly. 2508 */ 2509 2510 EL(ha, "status = %xh\n", cmd->status); 2511 2512 /* 2513 * For immediate notify situations that need attention, 2514 * we attempt to put the command in the array of notify 2515 * acknowledge slots for future handling. A LIP RESET 2516 * always gets slot 0, since we have to ensure that there's 2517 * always a slot available and we can't do any other 2518 * processsing if a LIP RESET is pending. 2519 * 2520 * Otherwise, immediate notifies take the next open slot. 2521 */ 2522 use_ubuffer = 0; 2523 nackcmd = NULL; 2524 mutex_enter(&ha->ql_nack_mtx); 2525 switch (cmd->status) { 2526 case 0x0E: 2527 if (ha->ql_nack != NULL) { 2528 /* 2529 * We're in the sticky situation of receiving 2530 * LIP reset while one is pending. What appears 2531 * to work is to drop the old request and 2532 * replace it with the new. We send a NACK 2533 * for the old to replenish the IOCB. 2534 */ 2535 nackcmd = ha->ql_nack; 2536 } 2537 ha->ql_nack = cmd; 2538 break; 2539 case 0x20: 2540 case 0x29: 2541 case 0x36: 2542 /* If this isn't NULL, a LIP RESET is outstanding */ 2543 if (ha->ql_nack == NULL) { 2544 use_ubuffer++; 2545 } 2546 break; 2547 default: 2548 EL(ha, "unknown status=%xh\n", 2549 cmd->status); 2550 break; 2551 } 2552 mutex_exit(&ha->ql_nack_mtx); 2553 2554 if (use_ubuffer) { 2555 /* 2556 * Get an unsolicited buffer to send the message up in 2557 */ 2558 2559 /* Locate a buffer to use. */ 2560 loop_id = (uint16_t) 2561 (CFG_IST(ha, CFG_EXT_FW_INTERFACE) ? 2562 CHAR_TO_SHORT(pkt->initiator_id_l, 2563 pkt->initiator_id_h) : pkt->initiator_id_h); 2564 if ((tq = ql_loop_id_to_queue(ha, loop_id)) != NULL) { 2565 ubp = ql_get_unsolicited_buffer(ha, 2566 FC_TYPE_SCSI_FCP); 2567 } 2568 if (ubp != NULL) { 2569 ubp->ub_resp_flags = FC_UB_FCP_CDB_FLAG; 2570 ubp->ub_resp_token = tq; 2571 sp = ubp->ub_fca_private; 2572 2573 fcp = (fcp_cmd_t *)ubp->ub_buffer; 2574 2575 /* Set header. */ 2576 ubp->ub_frame.d_id = ha->d_id.b24; 2577 /* Set 0x06 for R_CTL_COMMAND */ 2578 ubp->ub_frame.r_ctl = R_CTL_COMMAND; 2579 ubp->ub_frame.s_id = tq->d_id.b24; 2580 ubp->ub_frame.rsvd = 0; 2581 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | 2582 F_CTL_END_SEQ | F_CTL_SEQ_INITIATIVE; 2583 ubp->ub_frame.type = FC_TYPE_SCSI_FCP; 2584 ubp->ub_frame.seq_cnt = 0; 2585 ubp->ub_frame.df_ctl = 0; 2586 ubp->ub_frame.seq_id = 0; 2587 ubp->ub_frame.rx_id = cmd->rx_id; 2588 ubp->ub_frame.ox_id = (uint16_t)ddi_get16( 2589 ha->hba_buf.acc_handle, &pkt->ox_id); 2590 ubp->ub_frame.ro = 0; 2591 2592 /* Set command in buffer. */ 2593 bzero((void *)fcp, sizeof (fcp_cmd_t)); 2594 2595 lobyte(fcp->fcp_ent_addr.ent_addr_0) = 2596 pkt->lun_l; 2597 hibyte(fcp->fcp_ent_addr.ent_addr_0) = 2598 pkt->lun_h; 2599 2600 switch (cmd->status) { 2601 case 0x29: 2602 ubp->ub_resp_flags = (uint16_t) 2603 (ubp->ub_resp_flags | 2604 FC_UB_FCP_PORT_LOGOUT); 2605 break; 2606 case 0x20: 2607 ubp->ub_resp_flags = (uint16_t) 2608 (ubp->ub_resp_flags | 2609 FC_UB_FCP_ABORT_TASK); 2610 break; 2611 case 0x36: 2612 if (pkt->task_flags_h & BIT_7) { 2613 fcp->fcp_cntl.cntl_kill_tsk = 2614 1; 2615 } 2616 if (pkt->task_flags_h & BIT_6) { 2617 fcp->fcp_cntl.cntl_clr_aca = 1; 2618 } 2619 if (pkt->task_flags_h & BIT_5) { 2620 fcp->fcp_cntl.cntl_reset_tgt = 2621 1; 2622 } 2623 if (pkt->task_flags_h & BIT_4) { 2624 fcp->fcp_cntl.cntl_reset_lun = 2625 1; 2626 } 2627 if (pkt->task_flags_h & BIT_2) { 2628 fcp->fcp_cntl.cntl_clr_tsk = 1; 2629 } 2630 if (pkt->task_flags_h & BIT_1) { 2631 fcp->fcp_cntl.cntl_abort_tsk = 2632 1; 2633 } 2634 break; 2635 default: 2636 EL(ha, "default, no action\n"); 2637 break; 2638 } 2639 2640 QL_UB_LOCK(ha); 2641 sp->flags |= SRB_UB_CALLBACK | SRB_UB_FCP; 2642 QL_UB_UNLOCK(ha); 2643 QL_PRINT_3(CE_CONT, "(%d): Sent Up status = " 2644 "%xh\n", ha->instance, cmd->status); 2645 ql_add_link_b(done_q, &sp->cmd); 2646 } 2647 } 2648 2649 if (nackcmd) { 2650 if (ql_req_pkt(ha, (request_t **)&nack) == 2651 QL_SUCCESS) { 2652 2653 ql_notify_acknowledge_iocb(ha, nackcmd, nack); 2654 2655 nack->flags_l = 0; 2656 QL_PRINT_3(CE_CONT, "(%d): send clear " 2657 "notify_ack: status=%xh, flag=%xh\n", 2658 ha->instance, ddi_get16( 2659 ha->hba_buf.acc_handle, &nack->status), 2660 nack->flags_l); 2661 2662 /* Issue command to ISP */ 2663 ql_isp_cmd(ha); 2664 } 2665 kmem_free(nackcmd, sizeof (tgt_cmd_t)); 2666 } 2667 2668 /* 2669 * ql_nack can only be non-NULL if we got a LIP RESET and 2670 * are processing it. In that case, we don't want to send 2671 * a notify acknowledge right now. 2672 */ 2673 if (cmd->status != 0x0E) { 2674 if (ql_req_pkt(ha, (request_t **)&nack) == 2675 QL_SUCCESS) { 2676 ql_notify_acknowledge_iocb(ha, cmd, nack); 2677 2678 EL(ha, "send notify_ack: status=%xh " 2679 "flag=%xh\n", cmd->status, nack->flags_l); 2680 2681 /* Issue command to ISP */ 2682 ql_isp_cmd(ha); 2683 } 2684 kmem_free(cmd, sizeof (tgt_cmd_t)); 2685 } else { 2686 ql_awaken_task_daemon(ha, NULL, 2687 LIP_RESET_PENDING, 0); 2688 } 2689 } 2690 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2691 } 2692 2693 /* 2694 * ql_notify_acknowledge_entry 2695 * Processes notify acknowledge entry. 2696 * 2697 * Input: 2698 * ha: adapter state pointer. 2699 * pkt: entry pointer. 2700 * done_q: done queue pointer. 2701 * set_flags: task daemon flags to set. 2702 * reset_flags: task daemon flags to reset. 2703 * 2704 * Context: 2705 * Interrupt or Kernel context, no mailbox commands allowed. 2706 */ 2707 /* ARGSUSED */ 2708 static void 2709 ql_notify_acknowledge_entry(ql_adapter_state_t *ha, 2710 notify_acknowledge_entry_t *pkt, ql_head_t *done_q, uint32_t *set_flags, 2711 uint32_t *reset_flags) 2712 { 2713 ql_srb_t *sp; 2714 uint32_t index, cnt; 2715 2716 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2717 2718 /* Get handle. */ 2719 cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle); 2720 index = cnt & OSC_INDEX_MASK; 2721 2722 /* Validate handle. */ 2723 sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] : 2724 NULL; 2725 2726 if (sp != NULL && sp->handle == cnt) { 2727 ha->outstanding_cmds[index] = NULL; 2728 sp->handle = 0; 2729 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 2730 2731 /* Set completion status */ 2732 sp->pkt->pkt_reason = ddi_get16(ha->hba_buf.acc_handle, 2733 &pkt->status) == 1 ? CS_COMPLETE : CS_PORT_UNAVAILABLE; 2734 2735 /* Set completed status. */ 2736 sp->flags |= SRB_ISP_COMPLETED; 2737 2738 /* Place command on done queue. */ 2739 ql_add_link_b(done_q, &sp->cmd); 2740 2741 } else if (cnt != QL_FCA_BRAND) { 2742 if (sp == NULL) { 2743 EL(ha, "unknown IOCB handle=%xh\n", cnt); 2744 } else { 2745 EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n", 2746 cnt, sp->handle); 2747 } 2748 2749 (void) ql_binary_fw_dump(ha, FALSE); 2750 2751 if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED | 2752 ABORT_ISP_ACTIVE))) { 2753 EL(ha, "ISP Invalid handle, isp_abort_needed\n"); 2754 *set_flags |= ISP_ABORT_NEEDED; 2755 } 2756 } 2757 2758 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2759 } 2760 2761 /* 2762 * ql_accept_target_io_entry 2763 * Processes accept target I/O entry. 2764 * 2765 * Input: 2766 * ha: adapter state pointer. 2767 * pkt: entry pointer. 2768 * done_q: done queue pointer. 2769 * set_flags: task daemon flags to set. 2770 * reset_flags: task daemon flags to reset. 2771 * 2772 * Context: 2773 * Interrupt or Kernel context, no mailbox commands allowed. 2774 */ 2775 /* ARGSUSED */ 2776 static void 2777 ql_accept_target_io_entry(ql_adapter_state_t *ha, atio_entry_t *pkt, 2778 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 2779 { 2780 ctio_entry_t *ctio; 2781 atio_entry_t *atio; 2782 ql_srb_t *sp; 2783 fcp_cmd_t *fcp; 2784 ql_tgt_t *tq; 2785 uint16_t loop_id; 2786 fc_unsol_buf_t *ubp = NULL; 2787 2788 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2789 2790 /* Locate a buffer to use. */ 2791 loop_id = (uint16_t)(CFG_IST(ha, CFG_EXT_FW_INTERFACE) ? 2792 CHAR_TO_SHORT(pkt->initiator_id_l, pkt->initiator_id_h) : 2793 pkt->initiator_id_h); 2794 if ((tq = ql_loop_id_to_queue(ha, loop_id)) != NULL) { 2795 ubp = ql_get_unsolicited_buffer(ha, 2796 FC_TYPE_SCSI_FCP); 2797 } 2798 if (ubp != NULL) { 2799 ubp->ub_resp_flags = FC_UB_FCP_CDB_FLAG; 2800 ubp->ub_resp_token = tq; 2801 sp = ubp->ub_fca_private; 2802 fcp = (fcp_cmd_t *)ubp->ub_buffer; 2803 2804 /* Set header. */ 2805 ubp->ub_frame.d_id = ha->d_id.b24; 2806 ubp->ub_frame.r_ctl = R_CTL_COMMAND; 2807 ubp->ub_frame.s_id = tq->d_id.b24; 2808 ubp->ub_frame.rsvd = 0; 2809 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | 2810 F_CTL_END_SEQ | F_CTL_SEQ_INITIATIVE; 2811 ubp->ub_frame.type = FC_TYPE_SCSI_FCP; 2812 ubp->ub_frame.seq_cnt = 0; 2813 ubp->ub_frame.df_ctl = 0; 2814 ubp->ub_frame.seq_id = 0; 2815 ubp->ub_frame.rx_id = (uint16_t)ddi_get16( 2816 ha->hba_buf.acc_handle, &pkt->rx_id); 2817 ubp->ub_frame.ox_id = (uint16_t)ddi_get16( 2818 ha->hba_buf.acc_handle, &pkt->ox_id); 2819 ubp->ub_frame.ro = 0; 2820 2821 /* Set command in buffer. */ 2822 2823 bzero((void *)fcp, sizeof (fcp_cmd_t)); 2824 2825 lobyte(fcp->fcp_ent_addr.ent_addr_0) = pkt->lun_l; 2826 hibyte(fcp->fcp_ent_addr.ent_addr_0) = pkt->lun_h; 2827 2828 fcp->fcp_cntl.cntl_qtype = pkt->task_codes; 2829 2830 if (pkt->execution_codes & BIT_1) 2831 fcp->fcp_cntl.cntl_read_data = 1; 2832 if (pkt->execution_codes & BIT_0) 2833 fcp->fcp_cntl.cntl_write_data = 1; 2834 2835 ddi_rep_put8(ha->hba_buf.acc_handle, (uint8_t *)&pkt->cdb[0], 2836 (uint8_t *)&fcp->fcp_cdb[0], FCP_CDB_SIZE, 2837 DDI_DEV_AUTOINCR); 2838 2839 fcp->fcp_data_len = (int)ddi_get32( 2840 ha->hba_buf.acc_handle, (uint32_t *)&pkt->data_length); 2841 QL_UB_LOCK(ha); 2842 sp->flags |= SRB_UB_CALLBACK | SRB_UB_FCP; 2843 QL_UB_UNLOCK(ha); 2844 ql_add_link_b(done_q, &sp->cmd); 2845 } 2846 2847 /* If command not sent to transport layer. */ 2848 if (ubp == NULL) { 2849 2850 if (ql_req_pkt(ha, (request_t **)&ctio) == QL_SUCCESS) { 2851 ctio->entry_type = CTIO_TYPE_2; 2852 ctio->initiator_id_l = pkt->initiator_id_l; 2853 ctio->initiator_id_h = pkt->initiator_id_h; 2854 ddi_put16(ha->hba_buf.acc_handle, &ctio->rx_id, 2855 (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 2856 &pkt->rx_id)); 2857 ctio->flags_l = BIT_7 | BIT_6; 2858 ctio->flags_h = BIT_7 | BIT_1 | BIT_0; 2859 ctio->timeout = 0xffff; 2860 ctio->type.s0_32bit.scsi_status_l = STATUS_BUSY; 2861 2862 /* Issue command to ISP */ 2863 ql_isp_cmd(ha); 2864 } 2865 } else { 2866 if (ql_req_pkt(ha, (request_t **)&atio) == QL_SUCCESS) { 2867 atio->entry_type = ATIO_TYPE; 2868 atio->initiator_id_l = pkt->initiator_id_l; 2869 atio->initiator_id_h = pkt->initiator_id_h; 2870 ddi_put16(ha->hba_buf.acc_handle, &atio->rx_id, 2871 (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 2872 &pkt->rx_id)); 2873 atio->lun_l = pkt->lun_l; 2874 atio->lun_h = pkt->lun_h; 2875 2876 /* Issue command to ISP */ 2877 ql_isp_cmd(ha); 2878 } 2879 } 2880 2881 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2882 } 2883 2884 /* 2885 * ql_continue_target_io_entry 2886 * Processes continue target IO entry. 2887 * 2888 * Input: 2889 * ha: adapter state pointer. 2890 * pkt: entry pointer. 2891 * done_q: done queue pointer. 2892 * set_flags: task daemon flags to set. 2893 * reset_flags: task daemon flags to reset. 2894 * 2895 * Context: 2896 * Interrupt context, no mailbox commands allowed. 2897 */ 2898 /* ARGSUSED */ 2899 static void 2900 ql_continue_target_io_entry(ql_adapter_state_t *ha, ctio_entry_t *pkt, 2901 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 2902 { 2903 ql_srb_t *sp; 2904 uint32_t index, cnt; 2905 uint16_t status; 2906 2907 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2908 2909 /* Get handle. */ 2910 cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle); 2911 index = cnt & OSC_INDEX_MASK; 2912 2913 /* Validate handle. */ 2914 sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] : 2915 NULL; 2916 2917 if (sp != NULL && sp->handle == cnt) { 2918 ha->outstanding_cmds[index] = NULL; 2919 sp->handle = 0; 2920 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 2921 2922 /* Set completion status */ 2923 status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 2924 &pkt->status); 2925 2926 switch (status) { 2927 case 1: 2928 sp->pkt->pkt_reason = CS_COMPLETE; 2929 break; 2930 case 2: 2931 sp->pkt->pkt_reason = CS_ABORTED; 2932 break; 2933 case 9: 2934 sp->pkt->pkt_reason = CS_DATA_OVERRUN; 2935 break; 2936 case 0xa: 2937 case 0xb: 2938 sp->pkt->pkt_reason = CS_TIMEOUT; 2939 break; 2940 case 0xe: 2941 case 0x17: 2942 sp->pkt->pkt_reason = CS_RESET; 2943 break; 2944 case 0x10: 2945 sp->pkt->pkt_reason = CS_DMA_ERROR; 2946 break; 2947 case 0x15: 2948 case 0x28: 2949 case 0x29: 2950 case 0x2A: 2951 sp->pkt->pkt_reason = status; 2952 break; 2953 default: 2954 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE; 2955 break; 2956 } 2957 2958 /* Set completed status. */ 2959 sp->flags |= SRB_ISP_COMPLETED; 2960 2961 /* Place command on done queue. */ 2962 ql_add_link_b(done_q, &sp->cmd); 2963 2964 } else if (cnt != QL_FCA_BRAND) { 2965 if (sp == NULL) { 2966 EL(ha, "unknown IOCB handle=%xh\n", cnt); 2967 } else { 2968 EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n", 2969 cnt, sp->handle); 2970 } 2971 2972 (void) ql_binary_fw_dump(ha, FALSE); 2973 2974 if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED | 2975 ABORT_ISP_ACTIVE))) { 2976 EL(ha, "ISP Invalid handle, isp_abort_needed\n"); 2977 *set_flags |= ISP_ABORT_NEEDED; 2978 } 2979 } 2980 2981 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2982 } 2983 2984 /* 2985 * ql_ip_entry 2986 * Processes received ISP IP entry. 2987 * 2988 * Input: 2989 * ha: adapter state pointer. 2990 * pkt: entry pointer. 2991 * done_q: done queue pointer. 2992 * set_flags: task daemon flags to set. 2993 * reset_flags: task daemon flags to reset. 2994 * 2995 * Context: 2996 * Interrupt or Kernel context, no mailbox commands allowed. 2997 */ 2998 /* ARGSUSED */ 2999 static void 3000 ql_ip_entry(ql_adapter_state_t *ha, ip_entry_t *pkt23, ql_head_t *done_q, 3001 uint32_t *set_flags, uint32_t *reset_flags) 3002 { 3003 ql_srb_t *sp; 3004 uint32_t index, cnt; 3005 ql_tgt_t *tq; 3006 3007 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 3008 3009 /* Get handle. */ 3010 cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle); 3011 index = cnt & OSC_INDEX_MASK; 3012 3013 /* Validate handle. */ 3014 sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] : 3015 NULL; 3016 3017 if (sp != NULL && sp->handle == cnt) { 3018 ha->outstanding_cmds[index] = NULL; 3019 sp->handle = 0; 3020 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 3021 tq = sp->lun_queue->target_queue; 3022 3023 /* Set ISP completion status */ 3024 if (CFG_IST(ha, CFG_CTRL_2425)) { 3025 ip_cmd_entry_t *pkt24 = (ip_cmd_entry_t *)pkt23; 3026 3027 sp->pkt->pkt_reason = ddi_get16( 3028 ha->hba_buf.acc_handle, &pkt24->hdl_status); 3029 } else { 3030 sp->pkt->pkt_reason = ddi_get16( 3031 ha->hba_buf.acc_handle, &pkt23->comp_status); 3032 } 3033 3034 if (ha->task_daemon_flags & LOOP_DOWN) { 3035 EL(ha, "Loop Not Ready Retry, d_id=%xh\n", 3036 tq->d_id.b24); 3037 3038 /* Set retry status. */ 3039 sp->flags |= SRB_RETRY; 3040 3041 } else if (tq->port_down_retry_count && 3042 (sp->pkt->pkt_reason == CS_INCOMPLETE || 3043 sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE || 3044 sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT || 3045 sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG || 3046 sp->pkt->pkt_reason == CS_PORT_BUSY)) { 3047 EL(ha, "Port Down Retry=%xh, d_id=%xh, count=%d\n", 3048 sp->pkt->pkt_reason, tq->d_id.b24, 3049 tq->port_down_retry_count); 3050 3051 /* Set retry status. */ 3052 sp->flags |= SRB_RETRY; 3053 3054 if (sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT || 3055 sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE) { 3056 ha->adapter_stats->d_stats[lobyte( 3057 tq->loop_id)].logouts_recvd++; 3058 ql_send_logo(ha, tq, done_q); 3059 } 3060 3061 /* Acquire device queue lock. */ 3062 DEVICE_QUEUE_LOCK(tq); 3063 3064 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) { 3065 tq->flags |= TQF_QUEUE_SUSPENDED; 3066 3067 tq->port_down_retry_count--; 3068 3069 ADAPTER_STATE_LOCK(ha); 3070 if (ha->port_retry_timer == 0) { 3071 if ((ha->port_retry_timer = 3072 ha->port_down_retry_delay) == 0) { 3073 *set_flags |= 3074 PORT_RETRY_NEEDED; 3075 } 3076 } 3077 ADAPTER_STATE_UNLOCK(ha); 3078 } 3079 3080 /* Release device queue specific lock. */ 3081 DEVICE_QUEUE_UNLOCK(tq); 3082 3083 } else if (sp->pkt->pkt_reason == CS_RESET) { 3084 EL(ha, "Reset Retry, d_id=%xh\n", tq->d_id.b24); 3085 3086 /* Set retry status. */ 3087 sp->flags |= SRB_RETRY; 3088 } else { 3089 if (sp->pkt->pkt_reason != CS_COMPLETE) { 3090 EL(ha, "Cmplt status err=%xh, d_id=%xh\n", 3091 sp->pkt->pkt_reason, tq->d_id.b24); 3092 } 3093 } 3094 3095 /* Set completed status. */ 3096 sp->flags |= SRB_ISP_COMPLETED; 3097 3098 ql_add_link_b(done_q, &sp->cmd); 3099 3100 } else { 3101 if (sp == NULL) { 3102 EL(ha, "unknown IOCB handle=%xh\n", cnt); 3103 } else { 3104 EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n", 3105 cnt, sp->handle); 3106 } 3107 3108 (void) ql_binary_fw_dump(ha, FALSE); 3109 3110 if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED | 3111 ABORT_ISP_ACTIVE))) { 3112 EL(ha, "ISP Invalid handle, isp_abort_needed\n"); 3113 *set_flags |= ISP_ABORT_NEEDED; 3114 } 3115 } 3116 3117 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 3118 } 3119 3120 /* 3121 * ql_ip_rcv_entry 3122 * Processes received ISP IP buffers entry. 3123 * 3124 * Input: 3125 * ha: adapter state pointer. 3126 * pkt: entry pointer. 3127 * done_q: done queue pointer. 3128 * set_flags: task daemon flags to set. 3129 * reset_flags: task daemon flags to reset. 3130 * 3131 * Context: 3132 * Interrupt or Kernel context, no mailbox commands allowed. 3133 */ 3134 /* ARGSUSED */ 3135 static void 3136 ql_ip_rcv_entry(ql_adapter_state_t *ha, ip_rcv_entry_t *pkt, 3137 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 3138 { 3139 port_id_t s_id; 3140 uint16_t index; 3141 uint8_t cnt; 3142 ql_tgt_t *tq; 3143 3144 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 3145 3146 /* Locate device queue. */ 3147 s_id.b.al_pa = pkt->s_id[0]; 3148 s_id.b.area = pkt->s_id[1]; 3149 s_id.b.domain = pkt->s_id[2]; 3150 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) { 3151 EL(ha, "Unknown IP device ID=%xh\n", s_id.b24); 3152 return; 3153 } 3154 3155 tq->ub_sequence_length = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 3156 &pkt->seq_length); 3157 tq->ub_total_seg_cnt = pkt->segment_count; 3158 tq->ub_seq_id = ++ha->ub_seq_id; 3159 tq->ub_seq_cnt = 0; 3160 tq->ub_frame_ro = 0; 3161 tq->ub_loop_id = pkt->loop_id; 3162 ha->rcv_dev_q = tq; 3163 3164 for (cnt = 0; cnt < IP_RCVBUF_HANDLES && tq->ub_seq_cnt < 3165 tq->ub_total_seg_cnt; cnt++) { 3166 3167 index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 3168 &pkt->buffer_handle[cnt]); 3169 3170 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) { 3171 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n"); 3172 *set_flags |= ISP_ABORT_NEEDED; 3173 break; 3174 } 3175 } 3176 3177 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 3178 } 3179 3180 /* 3181 * ql_ip_rcv_cont_entry 3182 * Processes received ISP IP buffers continuation entry. 3183 * 3184 * Input: 3185 * ha: adapter state pointer. 3186 * pkt: entry pointer. 3187 * done_q: done queue pointer. 3188 * set_flags: task daemon flags to set. 3189 * reset_flags: task daemon flags to reset. 3190 * 3191 * Context: 3192 * Interrupt or Kernel context, no mailbox commands allowed. 3193 */ 3194 /* ARGSUSED */ 3195 static void 3196 ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ip_rcv_cont_entry_t *pkt, 3197 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 3198 { 3199 uint16_t index; 3200 uint8_t cnt; 3201 ql_tgt_t *tq; 3202 3203 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 3204 3205 if ((tq = ha->rcv_dev_q) == NULL) { 3206 EL(ha, "No IP receive device\n"); 3207 return; 3208 } 3209 3210 for (cnt = 0; cnt < IP_RCVBUF_CONT_HANDLES && 3211 tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) { 3212 3213 index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 3214 &pkt->buffer_handle[cnt]); 3215 3216 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) { 3217 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n"); 3218 *set_flags |= ISP_ABORT_NEEDED; 3219 break; 3220 } 3221 } 3222 3223 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 3224 } 3225 3226 /* 3227 * ip_rcv_24xx_entry_t 3228 * Processes received ISP24xx IP buffers entry. 3229 * 3230 * Input: 3231 * ha: adapter state pointer. 3232 * pkt: entry pointer. 3233 * done_q: done queue pointer. 3234 * set_flags: task daemon flags to set. 3235 * reset_flags: task daemon flags to reset. 3236 * 3237 * Context: 3238 * Interrupt or Kernel context, no mailbox commands allowed. 3239 */ 3240 /* ARGSUSED */ 3241 static void 3242 ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ip_rcv_24xx_entry_t *pkt, 3243 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 3244 { 3245 port_id_t s_id; 3246 uint16_t index; 3247 uint8_t cnt; 3248 ql_tgt_t *tq; 3249 3250 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 3251 3252 /* Locate device queue. */ 3253 s_id.b.al_pa = pkt->s_id[0]; 3254 s_id.b.area = pkt->s_id[1]; 3255 s_id.b.domain = pkt->s_id[2]; 3256 if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) { 3257 EL(ha, "Unknown IP device ID=%xh\n", s_id.b24); 3258 return; 3259 } 3260 3261 if (tq->ub_total_seg_cnt == 0) { 3262 tq->ub_sequence_length = (uint16_t)ddi_get16( 3263 ha->hba_buf.acc_handle, &pkt->seq_length); 3264 tq->ub_total_seg_cnt = pkt->segment_count; 3265 tq->ub_seq_id = ++ha->ub_seq_id; 3266 tq->ub_seq_cnt = 0; 3267 tq->ub_frame_ro = 0; 3268 tq->ub_loop_id = (uint16_t)ddi_get16( 3269 ha->hba_buf.acc_handle, &pkt->n_port_hdl); 3270 } 3271 3272 for (cnt = 0; cnt < IP_24XX_RCVBUF_HANDLES && tq->ub_seq_cnt < 3273 tq->ub_total_seg_cnt; cnt++) { 3274 3275 index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle, 3276 &pkt->buffer_handle[cnt]); 3277 3278 if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) { 3279 EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n"); 3280 *set_flags |= ISP_ABORT_NEEDED; 3281 break; 3282 } 3283 } 3284 3285 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 3286 } 3287 3288 /* 3289 * ql_ms_entry 3290 * Processes received Name/Management/CT Pass-Through entry. 3291 * 3292 * Input: 3293 * ha: adapter state pointer. 3294 * pkt23: entry pointer. 3295 * done_q: done queue pointer. 3296 * set_flags: task daemon flags to set. 3297 * reset_flags: task daemon flags to reset. 3298 * 3299 * Context: 3300 * Interrupt or Kernel context, no mailbox commands allowed. 3301 */ 3302 /* ARGSUSED */ 3303 static void 3304 ql_ms_entry(ql_adapter_state_t *ha, ms_entry_t *pkt23, ql_head_t *done_q, 3305 uint32_t *set_flags, uint32_t *reset_flags) 3306 { 3307 ql_srb_t *sp; 3308 uint32_t index, cnt; 3309 ql_tgt_t *tq; 3310 ct_passthru_entry_t *pkt24 = (ct_passthru_entry_t *)pkt23; 3311 3312 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 3313 3314 /* Get handle. */ 3315 cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle); 3316 index = cnt & OSC_INDEX_MASK; 3317 3318 /* Validate handle. */ 3319 sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] : 3320 NULL; 3321 3322 if (sp != NULL && sp->handle == cnt) { 3323 if (!(sp->flags & SRB_MS_PKT)) { 3324 EL(ha, "Not SRB_MS_PKT flags=%xh, isp_abort_needed", 3325 sp->flags); 3326 *set_flags |= ISP_ABORT_NEEDED; 3327 return; 3328 } 3329 3330 ha->outstanding_cmds[index] = NULL; 3331 sp->handle = 0; 3332 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 3333 tq = sp->lun_queue->target_queue; 3334 3335 /* Set ISP completion status */ 3336 if (CFG_IST(ha, CFG_CTRL_2425)) { 3337 sp->pkt->pkt_reason = ddi_get16( 3338 ha->hba_buf.acc_handle, &pkt24->status); 3339 } else { 3340 sp->pkt->pkt_reason = ddi_get16( 3341 ha->hba_buf.acc_handle, &pkt23->comp_status); 3342 } 3343 3344 if (sp->pkt->pkt_reason == CS_RESOUCE_UNAVAILABLE && 3345 sp->retry_count) { 3346 EL(ha, "Resouce Unavailable Retry = %d\n", 3347 sp->retry_count); 3348 3349 /* Set retry status. */ 3350 sp->retry_count--; 3351 sp->flags |= SRB_RETRY; 3352 3353 /* Acquire device queue lock. */ 3354 DEVICE_QUEUE_LOCK(tq); 3355 3356 if (!(tq->flags & TQF_QUEUE_SUSPENDED)) { 3357 tq->flags |= TQF_QUEUE_SUSPENDED; 3358 3359 ADAPTER_STATE_LOCK(ha); 3360 if (ha->port_retry_timer == 0) { 3361 ha->port_retry_timer = 2; 3362 } 3363 ADAPTER_STATE_UNLOCK(ha); 3364 } 3365 3366 /* Release device queue specific lock. */ 3367 DEVICE_QUEUE_UNLOCK(tq); 3368 3369 } else if (tq->port_down_retry_count && 3370 (sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG || 3371 sp->pkt->pkt_reason == CS_PORT_BUSY)) { 3372 EL(ha, "Port Down Retry\n"); 3373 3374 /* Set retry status. */ 3375 sp->flags |= SRB_RETRY; 3376 3377 /* Acquire device queue lock. */ 3378 DEVICE_QUEUE_LOCK(tq); 3379 3380 if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) { 3381 tq->flags |= TQF_QUEUE_SUSPENDED; 3382 3383 tq->port_down_retry_count--; 3384 3385 ADAPTER_STATE_LOCK(ha); 3386 if (ha->port_retry_timer == 0) { 3387 if ((ha->port_retry_timer = 3388 ha->port_down_retry_delay) == 0) { 3389 *set_flags |= 3390 PORT_RETRY_NEEDED; 3391 } 3392 } 3393 ADAPTER_STATE_UNLOCK(ha); 3394 } 3395 3396 /* Release device queue specific lock. */ 3397 DEVICE_QUEUE_UNLOCK(tq); 3398 3399 } else if (sp->pkt->pkt_reason == CS_RESET) { 3400 EL(ha, "Reset Retry\n"); 3401 3402 /* Set retry status. */ 3403 sp->flags |= SRB_RETRY; 3404 3405 } else if (CFG_IST(ha, CFG_CTRL_2425) && 3406 sp->pkt->pkt_reason == CS_DATA_UNDERRUN) { 3407 cnt = ddi_get32(ha->hba_buf.acc_handle, 3408 &pkt24->resp_byte_count); 3409 if (cnt < sizeof (fc_ct_header_t)) { 3410 EL(ha, "Data underrrun\n"); 3411 } else { 3412 sp->pkt->pkt_reason = CS_COMPLETE; 3413 } 3414 3415 } else if (sp->pkt->pkt_reason != CS_COMPLETE) { 3416 EL(ha, "status err=%xh\n", sp->pkt->pkt_reason); 3417 } 3418 3419 if (sp->pkt->pkt_reason == CS_COMPLETE) { 3420 /*EMPTY*/ 3421 QL_PRINT_3(CE_CONT, "(%d): resp\n", ha->instance); 3422 QL_DUMP_3(sp->pkt->pkt_resp, 8, sp->pkt->pkt_rsplen); 3423 } 3424 3425 /* For nameserver restore command, management change header. */ 3426 if ((sp->flags & SRB_RETRY) == 0) { 3427 tq->d_id.b24 == 0xfffffc ? 3428 ql_cthdr_endian(sp->pkt->pkt_cmd_acc, 3429 sp->pkt->pkt_cmd, B_TRUE) : 3430 ql_cthdr_endian(sp->pkt->pkt_resp_acc, 3431 sp->pkt->pkt_resp, B_TRUE); 3432 } 3433 3434 /* Set completed status. */ 3435 sp->flags |= SRB_ISP_COMPLETED; 3436 3437 /* Place command on done queue. */ 3438 ql_add_link_b(done_q, &sp->cmd); 3439 3440 } else { 3441 if (sp == NULL) { 3442 EL(ha, "unknown IOCB handle=%xh\n", cnt); 3443 } else { 3444 EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n", 3445 cnt, sp->handle); 3446 } 3447 3448 (void) ql_binary_fw_dump(ha, FALSE); 3449 3450 if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED | 3451 ABORT_ISP_ACTIVE))) { 3452 EL(ha, "ISP Invalid handle, isp_abort_needed\n"); 3453 *set_flags |= ISP_ABORT_NEEDED; 3454 } 3455 } 3456 3457 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 3458 } 3459 3460 /* 3461 * ql_report_id_entry 3462 * Processes received Name/Management/CT Pass-Through entry. 3463 * 3464 * Input: 3465 * ha: adapter state pointer. 3466 * pkt23: entry pointer. 3467 * done_q: done queue pointer. 3468 * set_flags: task daemon flags to set. 3469 * reset_flags: task daemon flags to reset. 3470 * 3471 * Context: 3472 * Interrupt or Kernel context, no mailbox commands allowed. 3473 */ 3474 /* ARGSUSED */ 3475 static void 3476 ql_report_id_entry(ql_adapter_state_t *ha, report_id_1_t *pkt, 3477 ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags) 3478 { 3479 ql_adapter_state_t *vha; 3480 3481 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 3482 3483 EL(ha, "format=%d, vp=%d, status=%d\n", 3484 pkt->format, pkt->vp_index, pkt->status); 3485 3486 if (pkt->format == 1) { 3487 /* Locate port state structure. */ 3488 for (vha = ha; vha != NULL; vha = vha->vp_next) { 3489 if (vha->vp_index == pkt->vp_index) { 3490 break; 3491 } 3492 } 3493 if (vha != NULL && (pkt->status == CS_COMPLETE || 3494 pkt->status == CS_PORT_ID_CHANGE)) { 3495 *set_flags |= LOOP_RESYNC_NEEDED; 3496 *reset_flags &= ~LOOP_RESYNC_NEEDED; 3497 vha->loop_down_timer = LOOP_DOWN_TIMER_OFF; 3498 TASK_DAEMON_LOCK(ha); 3499 vha->task_daemon_flags |= LOOP_RESYNC_NEEDED; 3500 vha->task_daemon_flags &= ~LOOP_DOWN; 3501 TASK_DAEMON_UNLOCK(ha); 3502 } 3503 } 3504 3505 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 3506 } 3507