1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2011 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <scsi/scsi_tcq.h> 12 #include <scsi/scsi_bsg_fc.h> 13 #include <scsi/scsi_eh.h> 14 15 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 16 static void qla2x00_process_completed_request(struct scsi_qla_host *, 17 struct req_que *, uint32_t); 18 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 19 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 20 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 21 sts_entry_t *); 22 23 /** 24 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 25 * @irq: 26 * @dev_id: SCSI driver HA context 27 * 28 * Called by system whenever the host adapter generates an interrupt. 29 * 30 * Returns handled flag. 31 */ 32 irqreturn_t 33 qla2100_intr_handler(int irq, void *dev_id) 34 { 35 scsi_qla_host_t *vha; 36 struct qla_hw_data *ha; 37 struct device_reg_2xxx __iomem *reg; 38 int status; 39 unsigned long iter; 40 uint16_t hccr; 41 uint16_t mb[4]; 42 struct rsp_que *rsp; 43 unsigned long flags; 44 45 rsp = (struct rsp_que *) dev_id; 46 if (!rsp) { 47 printk(KERN_INFO 48 "%s(): NULL response queue pointer.\n", __func__); 49 return (IRQ_NONE); 50 } 51 52 ha = rsp->hw; 53 reg = &ha->iobase->isp; 54 status = 0; 55 56 spin_lock_irqsave(&ha->hardware_lock, flags); 57 vha = pci_get_drvdata(ha->pdev); 58 for (iter = 50; iter--; ) { 59 hccr = RD_REG_WORD(®->hccr); 60 if (hccr & HCCR_RISC_PAUSE) { 61 if (pci_channel_offline(ha->pdev)) 62 break; 63 64 /* 65 * Issue a "HARD" reset in order for the RISC interrupt 66 * bit to be cleared. Schedule a big hammer to get 67 * out of the RISC PAUSED state. 68 */ 69 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 70 RD_REG_WORD(®->hccr); 71 72 ha->isp_ops->fw_dump(vha, 1); 73 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 74 break; 75 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 76 break; 77 78 if (RD_REG_WORD(®->semaphore) & BIT_0) { 79 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 80 RD_REG_WORD(®->hccr); 81 82 /* Get mailbox data. */ 83 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 84 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 85 qla2x00_mbx_completion(vha, mb[0]); 86 status |= MBX_INTERRUPT; 87 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 88 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 89 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 90 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 91 qla2x00_async_event(vha, rsp, mb); 92 } else { 93 /*EMPTY*/ 94 ql_dbg(ql_dbg_async, vha, 0x5025, 95 "Unrecognized interrupt type (%d).\n", 96 mb[0]); 97 } 98 /* Release mailbox registers. */ 99 WRT_REG_WORD(®->semaphore, 0); 100 RD_REG_WORD(®->semaphore); 101 } else { 102 qla2x00_process_response_queue(rsp); 103 104 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 105 RD_REG_WORD(®->hccr); 106 } 107 } 108 spin_unlock_irqrestore(&ha->hardware_lock, flags); 109 110 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 111 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 112 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 113 complete(&ha->mbx_intr_comp); 114 } 115 116 return (IRQ_HANDLED); 117 } 118 119 /** 120 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 121 * @irq: 122 * @dev_id: SCSI driver HA context 123 * 124 * Called by system whenever the host adapter generates an interrupt. 125 * 126 * Returns handled flag. 127 */ 128 irqreturn_t 129 qla2300_intr_handler(int irq, void *dev_id) 130 { 131 scsi_qla_host_t *vha; 132 struct device_reg_2xxx __iomem *reg; 133 int status; 134 unsigned long iter; 135 uint32_t stat; 136 uint16_t hccr; 137 uint16_t mb[4]; 138 struct rsp_que *rsp; 139 struct qla_hw_data *ha; 140 unsigned long flags; 141 142 rsp = (struct rsp_que *) dev_id; 143 if (!rsp) { 144 printk(KERN_INFO 145 "%s(): NULL response queue pointer.\n", __func__); 146 return (IRQ_NONE); 147 } 148 149 ha = rsp->hw; 150 reg = &ha->iobase->isp; 151 status = 0; 152 153 spin_lock_irqsave(&ha->hardware_lock, flags); 154 vha = pci_get_drvdata(ha->pdev); 155 for (iter = 50; iter--; ) { 156 stat = RD_REG_DWORD(®->u.isp2300.host_status); 157 if (stat & HSR_RISC_PAUSED) { 158 if (unlikely(pci_channel_offline(ha->pdev))) 159 break; 160 161 hccr = RD_REG_WORD(®->hccr); 162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 163 ql_log(ql_log_warn, vha, 0x5026, 164 "Parity error -- HCCR=%x, Dumping " 165 "firmware.\n", hccr); 166 else 167 ql_log(ql_log_warn, vha, 0x5027, 168 "RISC paused -- HCCR=%x, Dumping " 169 "firmware.\n", hccr); 170 171 /* 172 * Issue a "HARD" reset in order for the RISC 173 * interrupt bit to be cleared. Schedule a big 174 * hammer to get out of the RISC PAUSED state. 175 */ 176 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 177 RD_REG_WORD(®->hccr); 178 179 ha->isp_ops->fw_dump(vha, 1); 180 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 181 break; 182 } else if ((stat & HSR_RISC_INT) == 0) 183 break; 184 185 switch (stat & 0xff) { 186 case 0x1: 187 case 0x2: 188 case 0x10: 189 case 0x11: 190 qla2x00_mbx_completion(vha, MSW(stat)); 191 status |= MBX_INTERRUPT; 192 193 /* Release mailbox registers. */ 194 WRT_REG_WORD(®->semaphore, 0); 195 break; 196 case 0x12: 197 mb[0] = MSW(stat); 198 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 199 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 200 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 201 qla2x00_async_event(vha, rsp, mb); 202 break; 203 case 0x13: 204 qla2x00_process_response_queue(rsp); 205 break; 206 case 0x15: 207 mb[0] = MBA_CMPLT_1_16BIT; 208 mb[1] = MSW(stat); 209 qla2x00_async_event(vha, rsp, mb); 210 break; 211 case 0x16: 212 mb[0] = MBA_SCSI_COMPLETION; 213 mb[1] = MSW(stat); 214 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 215 qla2x00_async_event(vha, rsp, mb); 216 break; 217 default: 218 ql_dbg(ql_dbg_async, vha, 0x5028, 219 "Unrecognized interrupt type (%d).\n", stat & 0xff); 220 break; 221 } 222 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 223 RD_REG_WORD_RELAXED(®->hccr); 224 } 225 spin_unlock_irqrestore(&ha->hardware_lock, flags); 226 227 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 228 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 229 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 230 complete(&ha->mbx_intr_comp); 231 } 232 233 return (IRQ_HANDLED); 234 } 235 236 /** 237 * qla2x00_mbx_completion() - Process mailbox command completions. 238 * @ha: SCSI driver HA context 239 * @mb0: Mailbox0 register 240 */ 241 static void 242 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 243 { 244 uint16_t cnt; 245 uint16_t __iomem *wptr; 246 struct qla_hw_data *ha = vha->hw; 247 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 248 249 /* Load return mailbox registers. */ 250 ha->flags.mbox_int = 1; 251 ha->mailbox_out[0] = mb0; 252 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 253 254 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 255 if (IS_QLA2200(ha) && cnt == 8) 256 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 257 if (cnt == 4 || cnt == 5) 258 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 259 else 260 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 261 262 wptr++; 263 } 264 265 if (ha->mcp) { 266 ql_dbg(ql_dbg_async, vha, 0x5000, 267 "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]); 268 } else { 269 ql_dbg(ql_dbg_async, vha, 0x5001, 270 "MBX pointer ERROR.\n"); 271 } 272 } 273 274 static void 275 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 276 { 277 static char *event[] = 278 { "Complete", "Request Notification", "Time Extension" }; 279 int rval; 280 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 281 uint16_t __iomem *wptr; 282 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 283 284 /* Seed data -- mailbox1 -> mailbox7. */ 285 wptr = (uint16_t __iomem *)®24->mailbox1; 286 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 287 mb[cnt] = RD_REG_WORD(wptr); 288 289 ql_dbg(ql_dbg_async, vha, 0x5021, 290 "Inter-Driver Commucation %s -- " 291 "%04x %04x %04x %04x %04x %04x %04x.\n", 292 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 293 mb[4], mb[5], mb[6]); 294 295 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 296 timeout = (descr >> 8) & 0xf; 297 if (aen != MBA_IDC_NOTIFY || !timeout) 298 return; 299 300 ql_dbg(ql_dbg_async, vha, 0x5022, 301 "Inter-Driver Commucation %s -- ACK timeout=%d.\n", 302 vha->host_no, event[aen & 0xff], timeout); 303 304 rval = qla2x00_post_idc_ack_work(vha, mb); 305 if (rval != QLA_SUCCESS) 306 ql_log(ql_log_warn, vha, 0x5023, 307 "IDC failed to post ACK.\n"); 308 } 309 310 /** 311 * qla2x00_async_event() - Process aynchronous events. 312 * @ha: SCSI driver HA context 313 * @mb: Mailbox registers (0 - 3) 314 */ 315 void 316 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 317 { 318 #define LS_UNKNOWN 2 319 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; 320 char *link_speed; 321 uint16_t handle_cnt; 322 uint16_t cnt, mbx; 323 uint32_t handles[5]; 324 struct qla_hw_data *ha = vha->hw; 325 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 326 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 327 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 328 uint32_t rscn_entry, host_pid; 329 uint8_t rscn_queue_index; 330 unsigned long flags; 331 332 /* Setup to process RIO completion. */ 333 handle_cnt = 0; 334 if (IS_QLA8XXX_TYPE(ha)) 335 goto skip_rio; 336 switch (mb[0]) { 337 case MBA_SCSI_COMPLETION: 338 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 339 handle_cnt = 1; 340 break; 341 case MBA_CMPLT_1_16BIT: 342 handles[0] = mb[1]; 343 handle_cnt = 1; 344 mb[0] = MBA_SCSI_COMPLETION; 345 break; 346 case MBA_CMPLT_2_16BIT: 347 handles[0] = mb[1]; 348 handles[1] = mb[2]; 349 handle_cnt = 2; 350 mb[0] = MBA_SCSI_COMPLETION; 351 break; 352 case MBA_CMPLT_3_16BIT: 353 handles[0] = mb[1]; 354 handles[1] = mb[2]; 355 handles[2] = mb[3]; 356 handle_cnt = 3; 357 mb[0] = MBA_SCSI_COMPLETION; 358 break; 359 case MBA_CMPLT_4_16BIT: 360 handles[0] = mb[1]; 361 handles[1] = mb[2]; 362 handles[2] = mb[3]; 363 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 364 handle_cnt = 4; 365 mb[0] = MBA_SCSI_COMPLETION; 366 break; 367 case MBA_CMPLT_5_16BIT: 368 handles[0] = mb[1]; 369 handles[1] = mb[2]; 370 handles[2] = mb[3]; 371 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 372 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 373 handle_cnt = 5; 374 mb[0] = MBA_SCSI_COMPLETION; 375 break; 376 case MBA_CMPLT_2_32BIT: 377 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 378 handles[1] = le32_to_cpu( 379 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 380 RD_MAILBOX_REG(ha, reg, 6)); 381 handle_cnt = 2; 382 mb[0] = MBA_SCSI_COMPLETION; 383 break; 384 default: 385 break; 386 } 387 skip_rio: 388 switch (mb[0]) { 389 case MBA_SCSI_COMPLETION: /* Fast Post */ 390 if (!vha->flags.online) 391 break; 392 393 for (cnt = 0; cnt < handle_cnt; cnt++) 394 qla2x00_process_completed_request(vha, rsp->req, 395 handles[cnt]); 396 break; 397 398 case MBA_RESET: /* Reset */ 399 ql_dbg(ql_dbg_async, vha, 0x5002, 400 "Asynchronous RESET.\n"); 401 402 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 403 break; 404 405 case MBA_SYSTEM_ERR: /* System Error */ 406 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox7) : 0; 407 ql_log(ql_log_warn, vha, 0x5003, 408 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 409 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 410 411 ha->isp_ops->fw_dump(vha, 1); 412 413 if (IS_FWI2_CAPABLE(ha)) { 414 if (mb[1] == 0 && mb[2] == 0) { 415 ql_log(ql_log_fatal, vha, 0x5004, 416 "Unrecoverable Hardware Error: adapter " 417 "marked OFFLINE!\n"); 418 vha->flags.online = 0; 419 } else { 420 /* Check to see if MPI timeout occurred */ 421 if ((mbx & MBX_3) && (ha->flags.port0)) 422 set_bit(MPI_RESET_NEEDED, 423 &vha->dpc_flags); 424 425 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 426 } 427 } else if (mb[1] == 0) { 428 ql_log(ql_log_fatal, vha, 0x5005, 429 "Unrecoverable Hardware Error: adapter marked " 430 "OFFLINE!\n"); 431 vha->flags.online = 0; 432 } else 433 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 434 break; 435 436 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 437 ql_log(ql_log_warn, vha, 0x5006, 438 "ISP Request Transfer Error (%x).\n", mb[1]); 439 440 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 441 break; 442 443 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 444 ql_log(ql_log_warn, vha, 0x5007, 445 "ISP Response Transfer Error.\n"); 446 447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 448 break; 449 450 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 451 ql_dbg(ql_dbg_async, vha, 0x5008, 452 "Asynchronous WAKEUP_THRES.\n"); 453 break; 454 455 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 456 ql_log(ql_log_info, vha, 0x5009, 457 "LIP occurred (%x).\n", mb[1]); 458 459 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 460 atomic_set(&vha->loop_state, LOOP_DOWN); 461 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 462 qla2x00_mark_all_devices_lost(vha, 1); 463 } 464 465 if (vha->vp_idx) { 466 atomic_set(&vha->vp_state, VP_FAILED); 467 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 468 } 469 470 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 471 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 472 473 vha->flags.management_server_logged_in = 0; 474 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 475 break; 476 477 case MBA_LOOP_UP: /* Loop Up Event */ 478 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 479 link_speed = link_speeds[0]; 480 ha->link_data_rate = PORT_SPEED_1GB; 481 } else { 482 link_speed = link_speeds[LS_UNKNOWN]; 483 if (mb[1] < 5) 484 link_speed = link_speeds[mb[1]]; 485 else if (mb[1] == 0x13) 486 link_speed = link_speeds[5]; 487 ha->link_data_rate = mb[1]; 488 } 489 490 ql_log(ql_log_info, vha, 0x500a, 491 "LOOP UP detected (%s Gbps).\n", link_speed); 492 493 vha->flags.management_server_logged_in = 0; 494 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 495 break; 496 497 case MBA_LOOP_DOWN: /* Loop Down Event */ 498 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox4) : 0; 499 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; 500 ql_log(ql_log_info, vha, 0x500b, 501 "LOOP DOWN detected (%x %x %x %x).\n", 502 mb[1], mb[2], mb[3], mbx); 503 504 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 505 atomic_set(&vha->loop_state, LOOP_DOWN); 506 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 507 vha->device_flags |= DFLG_NO_CABLE; 508 qla2x00_mark_all_devices_lost(vha, 1); 509 } 510 511 if (vha->vp_idx) { 512 atomic_set(&vha->vp_state, VP_FAILED); 513 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 514 } 515 516 vha->flags.management_server_logged_in = 0; 517 ha->link_data_rate = PORT_SPEED_UNKNOWN; 518 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 519 break; 520 521 case MBA_LIP_RESET: /* LIP reset occurred */ 522 ql_log(ql_log_info, vha, 0x500c, 523 "LIP reset occurred (%x).\n", mb[1]); 524 525 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 526 atomic_set(&vha->loop_state, LOOP_DOWN); 527 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 528 qla2x00_mark_all_devices_lost(vha, 1); 529 } 530 531 if (vha->vp_idx) { 532 atomic_set(&vha->vp_state, VP_FAILED); 533 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 534 } 535 536 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 537 538 ha->operating_mode = LOOP; 539 vha->flags.management_server_logged_in = 0; 540 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 541 break; 542 543 /* case MBA_DCBX_COMPLETE: */ 544 case MBA_POINT_TO_POINT: /* Point-to-Point */ 545 if (IS_QLA2100(ha)) 546 break; 547 548 if (IS_QLA8XXX_TYPE(ha)) { 549 ql_dbg(ql_dbg_async, vha, 0x500d, 550 "DCBX Completed -- %04x %04x %04x.\n", 551 mb[1], mb[2], mb[3]); 552 if (ha->notify_dcbx_comp) 553 complete(&ha->dcbx_comp); 554 555 } else 556 ql_dbg(ql_dbg_async, vha, 0x500e, 557 "Asynchronous P2P MODE received.\n"); 558 559 /* 560 * Until there's a transition from loop down to loop up, treat 561 * this as loop down only. 562 */ 563 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 564 atomic_set(&vha->loop_state, LOOP_DOWN); 565 if (!atomic_read(&vha->loop_down_timer)) 566 atomic_set(&vha->loop_down_timer, 567 LOOP_DOWN_TIME); 568 qla2x00_mark_all_devices_lost(vha, 1); 569 } 570 571 if (vha->vp_idx) { 572 atomic_set(&vha->vp_state, VP_FAILED); 573 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 574 } 575 576 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 577 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 578 579 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 580 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 581 582 ha->flags.gpsc_supported = 1; 583 vha->flags.management_server_logged_in = 0; 584 break; 585 586 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 587 if (IS_QLA2100(ha)) 588 break; 589 590 ql_log(ql_log_info, vha, 0x500f, 591 "Configuration change detected: value=%x.\n", mb[1]); 592 593 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 594 atomic_set(&vha->loop_state, LOOP_DOWN); 595 if (!atomic_read(&vha->loop_down_timer)) 596 atomic_set(&vha->loop_down_timer, 597 LOOP_DOWN_TIME); 598 qla2x00_mark_all_devices_lost(vha, 1); 599 } 600 601 if (vha->vp_idx) { 602 atomic_set(&vha->vp_state, VP_FAILED); 603 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 604 } 605 606 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 607 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 608 break; 609 610 case MBA_PORT_UPDATE: /* Port database update */ 611 /* 612 * Handle only global and vn-port update events 613 * 614 * Relevant inputs: 615 * mb[1] = N_Port handle of changed port 616 * OR 0xffff for global event 617 * mb[2] = New login state 618 * 7 = Port logged out 619 * mb[3] = LSB is vp_idx, 0xff = all vps 620 * 621 * Skip processing if: 622 * Event is global, vp_idx is NOT all vps, 623 * vp_idx does not match 624 * Event is not global, vp_idx does not match 625 */ 626 if (IS_QLA2XXX_MIDTYPE(ha) && 627 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 628 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 629 break; 630 631 /* Global event -- port logout or port unavailable. */ 632 if (mb[1] == 0xffff && mb[2] == 0x7) { 633 ql_dbg(ql_dbg_async, vha, 0x5010, 634 "Port unavailable %04x %04x %04x.\n", 635 mb[1], mb[2], mb[3]); 636 637 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 638 atomic_set(&vha->loop_state, LOOP_DOWN); 639 atomic_set(&vha->loop_down_timer, 640 LOOP_DOWN_TIME); 641 vha->device_flags |= DFLG_NO_CABLE; 642 qla2x00_mark_all_devices_lost(vha, 1); 643 } 644 645 if (vha->vp_idx) { 646 atomic_set(&vha->vp_state, VP_FAILED); 647 fc_vport_set_state(vha->fc_vport, 648 FC_VPORT_FAILED); 649 qla2x00_mark_all_devices_lost(vha, 1); 650 } 651 652 vha->flags.management_server_logged_in = 0; 653 ha->link_data_rate = PORT_SPEED_UNKNOWN; 654 break; 655 } 656 657 /* 658 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 659 * event etc. earlier indicating loop is down) then process 660 * it. Otherwise ignore it and Wait for RSCN to come in. 661 */ 662 atomic_set(&vha->loop_down_timer, 0); 663 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 664 atomic_read(&vha->loop_state) != LOOP_DEAD) { 665 ql_dbg(ql_dbg_async, vha, 0x5011, 666 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 667 mb[1], mb[2], mb[3]); 668 break; 669 } 670 671 ql_dbg(ql_dbg_async, vha, 0x5012, 672 "Port database changed %04x %04x %04x.\n", 673 mb[1], mb[2], mb[3]); 674 675 /* 676 * Mark all devices as missing so we will login again. 677 */ 678 atomic_set(&vha->loop_state, LOOP_UP); 679 680 qla2x00_mark_all_devices_lost(vha, 1); 681 682 vha->flags.rscn_queue_overflow = 1; 683 684 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 685 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 686 break; 687 688 case MBA_RSCN_UPDATE: /* State Change Registration */ 689 /* Check if the Vport has issued a SCR */ 690 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 691 break; 692 /* Only handle SCNs for our Vport index. */ 693 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 694 break; 695 696 ql_dbg(ql_dbg_async, vha, 0x5013, 697 "RSCN database changed -- %04x %04x %04x.\n", 698 mb[1], mb[2], mb[3]); 699 700 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 701 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 702 | vha->d_id.b.al_pa; 703 if (rscn_entry == host_pid) { 704 ql_dbg(ql_dbg_async, vha, 0x5014, 705 "Ignoring RSCN update to local host " 706 "port ID (%06x).\n", host_pid); 707 break; 708 } 709 710 /* Ignore reserved bits from RSCN-payload. */ 711 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 712 rscn_queue_index = vha->rscn_in_ptr + 1; 713 if (rscn_queue_index == MAX_RSCN_COUNT) 714 rscn_queue_index = 0; 715 if (rscn_queue_index != vha->rscn_out_ptr) { 716 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry; 717 vha->rscn_in_ptr = rscn_queue_index; 718 } else { 719 vha->flags.rscn_queue_overflow = 1; 720 } 721 722 atomic_set(&vha->loop_state, LOOP_UPDATE); 723 atomic_set(&vha->loop_down_timer, 0); 724 vha->flags.management_server_logged_in = 0; 725 726 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 727 set_bit(RSCN_UPDATE, &vha->dpc_flags); 728 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 729 break; 730 731 /* case MBA_RIO_RESPONSE: */ 732 case MBA_ZIO_RESPONSE: 733 ql_dbg(ql_dbg_async, vha, 0x5015, 734 "[R|Z]IO update completion.\n"); 735 736 if (IS_FWI2_CAPABLE(ha)) 737 qla24xx_process_response_queue(vha, rsp); 738 else 739 qla2x00_process_response_queue(rsp); 740 break; 741 742 case MBA_DISCARD_RND_FRAME: 743 ql_dbg(ql_dbg_async, vha, 0x5016, 744 "Discard RND Frame -- %04x %04x %04x.\n", 745 mb[1], mb[2], mb[3]); 746 break; 747 748 case MBA_TRACE_NOTIFICATION: 749 ql_dbg(ql_dbg_async, vha, 0x5017, 750 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 751 break; 752 753 case MBA_ISP84XX_ALERT: 754 ql_dbg(ql_dbg_async, vha, 0x5018, 755 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 756 mb[1], mb[2], mb[3]); 757 758 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 759 switch (mb[1]) { 760 case A84_PANIC_RECOVERY: 761 ql_log(ql_log_info, vha, 0x5019, 762 "Alert 84XX: panic recovery %04x %04x.\n", 763 mb[2], mb[3]); 764 break; 765 case A84_OP_LOGIN_COMPLETE: 766 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 767 ql_log(ql_log_info, vha, 0x501a, 768 "Alert 84XX: firmware version %x.\n", 769 ha->cs84xx->op_fw_version); 770 break; 771 case A84_DIAG_LOGIN_COMPLETE: 772 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 773 ql_log(ql_log_info, vha, 0x501b, 774 "Alert 84XX: diagnostic firmware version %x.\n", 775 ha->cs84xx->diag_fw_version); 776 break; 777 case A84_GOLD_LOGIN_COMPLETE: 778 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 779 ha->cs84xx->fw_update = 1; 780 ql_log(ql_log_info, vha, 0x501c, 781 "Alert 84XX: gold firmware version %x.\n", 782 ha->cs84xx->gold_fw_version); 783 break; 784 default: 785 ql_log(ql_log_warn, vha, 0x501d, 786 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 787 mb[1], mb[2], mb[3]); 788 } 789 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 790 break; 791 case MBA_DCBX_START: 792 ql_dbg(ql_dbg_async, vha, 0x501e, 793 "DCBX Started -- %04x %04x %04x.\n", 794 mb[1], mb[2], mb[3]); 795 break; 796 case MBA_DCBX_PARAM_UPDATE: 797 ql_dbg(ql_dbg_async, vha, 0x501f, 798 "DCBX Parameters Updated -- %04x %04x %04x.\n", 799 mb[1], mb[2], mb[3]); 800 break; 801 case MBA_FCF_CONF_ERR: 802 ql_dbg(ql_dbg_async, vha, 0x5020, 803 "FCF Configuration Error -- %04x %04x %04x.\n", 804 mb[1], mb[2], mb[3]); 805 break; 806 case MBA_IDC_COMPLETE: 807 case MBA_IDC_NOTIFY: 808 case MBA_IDC_TIME_EXT: 809 qla81xx_idc_event(vha, mb[0], mb[1]); 810 break; 811 } 812 813 if (!vha->vp_idx && ha->num_vhosts) 814 qla2x00_alert_all_vps(rsp, mb); 815 } 816 817 /** 818 * qla2x00_process_completed_request() - Process a Fast Post response. 819 * @ha: SCSI driver HA context 820 * @index: SRB index 821 */ 822 static void 823 qla2x00_process_completed_request(struct scsi_qla_host *vha, 824 struct req_que *req, uint32_t index) 825 { 826 srb_t *sp; 827 struct qla_hw_data *ha = vha->hw; 828 829 /* Validate handle. */ 830 if (index >= MAX_OUTSTANDING_COMMANDS) { 831 ql_log(ql_log_warn, vha, 0x3014, 832 "Invalid SCSI command index (%x).\n", index); 833 834 if (IS_QLA82XX(ha)) 835 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 836 else 837 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 838 return; 839 } 840 841 sp = req->outstanding_cmds[index]; 842 if (sp) { 843 /* Free outstanding command slot. */ 844 req->outstanding_cmds[index] = NULL; 845 846 /* Save ISP completion status */ 847 sp->cmd->result = DID_OK << 16; 848 qla2x00_sp_compl(ha, sp); 849 } else { 850 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 851 852 if (IS_QLA82XX(ha)) 853 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 854 else 855 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 856 } 857 } 858 859 static srb_t * 860 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 861 struct req_que *req, void *iocb) 862 { 863 struct qla_hw_data *ha = vha->hw; 864 sts_entry_t *pkt = iocb; 865 srb_t *sp = NULL; 866 uint16_t index; 867 868 index = LSW(pkt->handle); 869 if (index >= MAX_OUTSTANDING_COMMANDS) { 870 ql_log(ql_log_warn, vha, 0x5031, 871 "Invalid command index (%x).\n", index); 872 if (IS_QLA82XX(ha)) 873 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 874 else 875 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 876 goto done; 877 } 878 sp = req->outstanding_cmds[index]; 879 if (!sp) { 880 ql_log(ql_log_warn, vha, 0x5032, 881 "Invalid completion handle (%x) -- timed-out.\n", index); 882 return sp; 883 } 884 if (sp->handle != index) { 885 ql_log(ql_log_warn, vha, 0x5033, 886 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 887 return NULL; 888 } 889 890 req->outstanding_cmds[index] = NULL; 891 892 done: 893 return sp; 894 } 895 896 static void 897 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 898 struct mbx_entry *mbx) 899 { 900 const char func[] = "MBX-IOCB"; 901 const char *type; 902 fc_port_t *fcport; 903 srb_t *sp; 904 struct srb_iocb *lio; 905 struct srb_ctx *ctx; 906 uint16_t *data; 907 uint16_t status; 908 909 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 910 if (!sp) 911 return; 912 913 ctx = sp->ctx; 914 lio = ctx->u.iocb_cmd; 915 type = ctx->name; 916 fcport = sp->fcport; 917 data = lio->u.logio.data; 918 919 data[0] = MBS_COMMAND_ERROR; 920 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 921 QLA_LOGIO_LOGIN_RETRIED : 0; 922 if (mbx->entry_status) { 923 ql_dbg(ql_dbg_async, vha, 0x5043, 924 "Async-%s error entry - portid=%02x%02x%02x " 925 "entry-status=%x status=%x state-flag=%x " 926 "status-flags=%x.\n", 927 type, fcport->d_id.b.domain, fcport->d_id.b.area, 928 fcport->d_id.b.al_pa, mbx->entry_status, 929 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 930 le16_to_cpu(mbx->status_flags)); 931 932 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057, 933 (uint8_t *)mbx, sizeof(*mbx)); 934 935 goto logio_done; 936 } 937 938 status = le16_to_cpu(mbx->status); 939 if (status == 0x30 && ctx->type == SRB_LOGIN_CMD && 940 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 941 status = 0; 942 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 943 ql_dbg(ql_dbg_async, vha, 0x5045, 944 "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n", 945 type, fcport->d_id.b.domain, fcport->d_id.b.area, 946 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)); 947 948 data[0] = MBS_COMMAND_COMPLETE; 949 if (ctx->type == SRB_LOGIN_CMD) { 950 fcport->port_type = FCT_TARGET; 951 if (le16_to_cpu(mbx->mb1) & BIT_0) 952 fcport->port_type = FCT_INITIATOR; 953 else if (le16_to_cpu(mbx->mb1) & BIT_1) 954 fcport->flags |= FCF_FCP2_DEVICE; 955 } 956 goto logio_done; 957 } 958 959 data[0] = le16_to_cpu(mbx->mb0); 960 switch (data[0]) { 961 case MBS_PORT_ID_USED: 962 data[1] = le16_to_cpu(mbx->mb1); 963 break; 964 case MBS_LOOP_ID_USED: 965 break; 966 default: 967 data[0] = MBS_COMMAND_ERROR; 968 break; 969 } 970 971 ql_log(ql_log_warn, vha, 0x5046, 972 "Async-%s failed - portid=%02x%02x%02x status=%x " 973 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", 974 type, fcport->d_id.b.domain, 975 fcport->d_id.b.area, fcport->d_id.b.al_pa, status, 976 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 977 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 978 le16_to_cpu(mbx->mb7)); 979 980 logio_done: 981 lio->done(sp); 982 } 983 984 static void 985 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 986 sts_entry_t *pkt, int iocb_type) 987 { 988 const char func[] = "CT_IOCB"; 989 const char *type; 990 struct qla_hw_data *ha = vha->hw; 991 srb_t *sp; 992 struct srb_ctx *sp_bsg; 993 struct fc_bsg_job *bsg_job; 994 uint16_t comp_status; 995 996 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 997 if (!sp) 998 return; 999 1000 sp_bsg = sp->ctx; 1001 bsg_job = sp_bsg->u.bsg_job; 1002 1003 type = NULL; 1004 switch (sp_bsg->type) { 1005 case SRB_CT_CMD: 1006 type = "ct pass-through"; 1007 break; 1008 default: 1009 ql_log(ql_log_warn, vha, 0x5047, 1010 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type); 1011 return; 1012 } 1013 1014 comp_status = le16_to_cpu(pkt->comp_status); 1015 1016 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1017 * fc payload to the caller 1018 */ 1019 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1020 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1021 1022 if (comp_status != CS_COMPLETE) { 1023 if (comp_status == CS_DATA_UNDERRUN) { 1024 bsg_job->reply->result = DID_OK << 16; 1025 bsg_job->reply->reply_payload_rcv_len = 1026 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1027 1028 ql_log(ql_log_warn, vha, 0x5048, 1029 "CT pass-through-%s error " 1030 "comp_status-status=0x%x total_byte = 0x%x.\n", 1031 type, comp_status, 1032 bsg_job->reply->reply_payload_rcv_len); 1033 } else { 1034 ql_log(ql_log_warn, vha, 0x5049, 1035 "CT pass-through-%s error " 1036 "comp_status-status=0x%x.\n", type, comp_status); 1037 bsg_job->reply->result = DID_ERROR << 16; 1038 bsg_job->reply->reply_payload_rcv_len = 0; 1039 } 1040 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058, 1041 (uint8_t *)pkt, sizeof(*pkt)); 1042 } else { 1043 bsg_job->reply->result = DID_OK << 16; 1044 bsg_job->reply->reply_payload_rcv_len = 1045 bsg_job->reply_payload.payload_len; 1046 bsg_job->reply_len = 0; 1047 } 1048 1049 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1050 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1051 1052 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1053 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1054 1055 if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD) 1056 kfree(sp->fcport); 1057 1058 kfree(sp->ctx); 1059 mempool_free(sp, ha->srb_mempool); 1060 bsg_job->job_done(bsg_job); 1061 } 1062 1063 static void 1064 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1065 struct sts_entry_24xx *pkt, int iocb_type) 1066 { 1067 const char func[] = "ELS_CT_IOCB"; 1068 const char *type; 1069 struct qla_hw_data *ha = vha->hw; 1070 srb_t *sp; 1071 struct srb_ctx *sp_bsg; 1072 struct fc_bsg_job *bsg_job; 1073 uint16_t comp_status; 1074 uint32_t fw_status[3]; 1075 uint8_t* fw_sts_ptr; 1076 1077 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1078 if (!sp) 1079 return; 1080 sp_bsg = sp->ctx; 1081 bsg_job = sp_bsg->u.bsg_job; 1082 1083 type = NULL; 1084 switch (sp_bsg->type) { 1085 case SRB_ELS_CMD_RPT: 1086 case SRB_ELS_CMD_HST: 1087 type = "els"; 1088 break; 1089 case SRB_CT_CMD: 1090 type = "ct pass-through"; 1091 break; 1092 default: 1093 ql_log(ql_log_warn, vha, 0x503e, 1094 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type); 1095 return; 1096 } 1097 1098 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1099 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1100 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1101 1102 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1103 * fc payload to the caller 1104 */ 1105 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1106 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1107 1108 if (comp_status != CS_COMPLETE) { 1109 if (comp_status == CS_DATA_UNDERRUN) { 1110 bsg_job->reply->result = DID_OK << 16; 1111 bsg_job->reply->reply_payload_rcv_len = 1112 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); 1113 1114 ql_log(ql_log_info, vha, 0x503f, 1115 "ELS-CT pass-through-%s error comp_status-status=0x%x " 1116 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1117 type, comp_status, fw_status[1], fw_status[2], 1118 le16_to_cpu(((struct els_sts_entry_24xx *) 1119 pkt)->total_byte_count)); 1120 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1121 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1122 } 1123 else { 1124 ql_log(ql_log_info, vha, 0x5040, 1125 "ELS-CT pass-through-%s error comp_status-status=0x%x " 1126 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1127 type, comp_status, 1128 le16_to_cpu(((struct els_sts_entry_24xx *) 1129 pkt)->error_subcode_1), 1130 le16_to_cpu(((struct els_sts_entry_24xx *) 1131 pkt)->error_subcode_2)); 1132 bsg_job->reply->result = DID_ERROR << 16; 1133 bsg_job->reply->reply_payload_rcv_len = 0; 1134 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1135 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1136 } 1137 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056, 1138 (uint8_t *)pkt, sizeof(*pkt)); 1139 } 1140 else { 1141 bsg_job->reply->result = DID_OK << 16; 1142 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1143 bsg_job->reply_len = 0; 1144 } 1145 1146 dma_unmap_sg(&ha->pdev->dev, 1147 bsg_job->request_payload.sg_list, 1148 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1149 dma_unmap_sg(&ha->pdev->dev, 1150 bsg_job->reply_payload.sg_list, 1151 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1152 if ((sp_bsg->type == SRB_ELS_CMD_HST) || 1153 (sp_bsg->type == SRB_CT_CMD)) 1154 kfree(sp->fcport); 1155 kfree(sp->ctx); 1156 mempool_free(sp, ha->srb_mempool); 1157 bsg_job->job_done(bsg_job); 1158 } 1159 1160 static void 1161 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1162 struct logio_entry_24xx *logio) 1163 { 1164 const char func[] = "LOGIO-IOCB"; 1165 const char *type; 1166 fc_port_t *fcport; 1167 srb_t *sp; 1168 struct srb_iocb *lio; 1169 struct srb_ctx *ctx; 1170 uint16_t *data; 1171 uint32_t iop[2]; 1172 1173 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1174 if (!sp) 1175 return; 1176 1177 ctx = sp->ctx; 1178 lio = ctx->u.iocb_cmd; 1179 type = ctx->name; 1180 fcport = sp->fcport; 1181 data = lio->u.logio.data; 1182 1183 data[0] = MBS_COMMAND_ERROR; 1184 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1185 QLA_LOGIO_LOGIN_RETRIED : 0; 1186 if (logio->entry_status) { 1187 ql_log(ql_log_warn, vha, 0x5034, 1188 "Async-%s error entry - " 1189 "portid=%02x%02x%02x entry-status=%x.\n", 1190 type, fcport->d_id.b.domain, fcport->d_id.b.area, 1191 fcport->d_id.b.al_pa, logio->entry_status); 1192 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059, 1193 (uint8_t *)logio, sizeof(*logio)); 1194 1195 goto logio_done; 1196 } 1197 1198 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1199 ql_dbg(ql_dbg_async, vha, 0x5036, 1200 "Async-%s complete - portid=%02x%02x%02x " 1201 "iop0=%x.\n", 1202 type, fcport->d_id.b.domain, fcport->d_id.b.area, 1203 fcport->d_id.b.al_pa, 1204 le32_to_cpu(logio->io_parameter[0])); 1205 1206 data[0] = MBS_COMMAND_COMPLETE; 1207 if (ctx->type != SRB_LOGIN_CMD) 1208 goto logio_done; 1209 1210 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1211 if (iop[0] & BIT_4) { 1212 fcport->port_type = FCT_TARGET; 1213 if (iop[0] & BIT_8) 1214 fcport->flags |= FCF_FCP2_DEVICE; 1215 } else if (iop[0] & BIT_5) 1216 fcport->port_type = FCT_INITIATOR; 1217 1218 if (logio->io_parameter[7] || logio->io_parameter[8]) 1219 fcport->supported_classes |= FC_COS_CLASS2; 1220 if (logio->io_parameter[9] || logio->io_parameter[10]) 1221 fcport->supported_classes |= FC_COS_CLASS3; 1222 1223 goto logio_done; 1224 } 1225 1226 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1227 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1228 switch (iop[0]) { 1229 case LSC_SCODE_PORTID_USED: 1230 data[0] = MBS_PORT_ID_USED; 1231 data[1] = LSW(iop[1]); 1232 break; 1233 case LSC_SCODE_NPORT_USED: 1234 data[0] = MBS_LOOP_ID_USED; 1235 break; 1236 default: 1237 data[0] = MBS_COMMAND_ERROR; 1238 break; 1239 } 1240 1241 ql_dbg(ql_dbg_async, vha, 0x5037, 1242 "Async-%s failed - portid=%02x%02x%02x comp=%x " 1243 "iop0=%x iop1=%x.\n", 1244 type, fcport->d_id.b.domain, 1245 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1246 le16_to_cpu(logio->comp_status), 1247 le32_to_cpu(logio->io_parameter[0]), 1248 le32_to_cpu(logio->io_parameter[1])); 1249 1250 logio_done: 1251 lio->done(sp); 1252 } 1253 1254 static void 1255 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1256 struct tsk_mgmt_entry *tsk) 1257 { 1258 const char func[] = "TMF-IOCB"; 1259 const char *type; 1260 fc_port_t *fcport; 1261 srb_t *sp; 1262 struct srb_iocb *iocb; 1263 struct srb_ctx *ctx; 1264 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1265 int error = 1; 1266 1267 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1268 if (!sp) 1269 return; 1270 1271 ctx = sp->ctx; 1272 iocb = ctx->u.iocb_cmd; 1273 type = ctx->name; 1274 fcport = sp->fcport; 1275 1276 if (sts->entry_status) { 1277 ql_log(ql_log_warn, vha, 0x5038, 1278 "Async-%s error - entry-status(%x).\n", 1279 type, sts->entry_status); 1280 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1281 ql_log(ql_log_warn, vha, 0x5039, 1282 "Async-%s error - completion status(%x).\n", 1283 type, sts->comp_status); 1284 } else if (!(le16_to_cpu(sts->scsi_status) & 1285 SS_RESPONSE_INFO_LEN_VALID)) { 1286 ql_log(ql_log_warn, vha, 0x503a, 1287 "Async-%s error - no response info(%x).\n", 1288 type, sts->scsi_status); 1289 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1290 ql_log(ql_log_warn, vha, 0x503b, 1291 "Async-%s error - not enough response(%d).\n", 1292 type, sts->rsp_data_len); 1293 } else if (sts->data[3]) { 1294 ql_log(ql_log_warn, vha, 0x503c, 1295 "Async-%s error - response(%x).\n", 1296 type, sts->data[3]); 1297 } else { 1298 error = 0; 1299 } 1300 1301 if (error) { 1302 iocb->u.tmf.data = error; 1303 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1304 (uint8_t *)sts, sizeof(*sts)); 1305 } 1306 1307 iocb->done(sp); 1308 } 1309 1310 /** 1311 * qla2x00_process_response_queue() - Process response queue entries. 1312 * @ha: SCSI driver HA context 1313 */ 1314 void 1315 qla2x00_process_response_queue(struct rsp_que *rsp) 1316 { 1317 struct scsi_qla_host *vha; 1318 struct qla_hw_data *ha = rsp->hw; 1319 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1320 sts_entry_t *pkt; 1321 uint16_t handle_cnt; 1322 uint16_t cnt; 1323 1324 vha = pci_get_drvdata(ha->pdev); 1325 1326 if (!vha->flags.online) 1327 return; 1328 1329 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1330 pkt = (sts_entry_t *)rsp->ring_ptr; 1331 1332 rsp->ring_index++; 1333 if (rsp->ring_index == rsp->length) { 1334 rsp->ring_index = 0; 1335 rsp->ring_ptr = rsp->ring; 1336 } else { 1337 rsp->ring_ptr++; 1338 } 1339 1340 if (pkt->entry_status != 0) { 1341 ql_log(ql_log_warn, vha, 0x5035, 1342 "Process error entry.\n"); 1343 1344 qla2x00_error_entry(vha, rsp, pkt); 1345 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1346 wmb(); 1347 continue; 1348 } 1349 1350 switch (pkt->entry_type) { 1351 case STATUS_TYPE: 1352 qla2x00_status_entry(vha, rsp, pkt); 1353 break; 1354 case STATUS_TYPE_21: 1355 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 1356 for (cnt = 0; cnt < handle_cnt; cnt++) { 1357 qla2x00_process_completed_request(vha, rsp->req, 1358 ((sts21_entry_t *)pkt)->handle[cnt]); 1359 } 1360 break; 1361 case STATUS_TYPE_22: 1362 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 1363 for (cnt = 0; cnt < handle_cnt; cnt++) { 1364 qla2x00_process_completed_request(vha, rsp->req, 1365 ((sts22_entry_t *)pkt)->handle[cnt]); 1366 } 1367 break; 1368 case STATUS_CONT_TYPE: 1369 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1370 break; 1371 case MBX_IOCB_TYPE: 1372 qla2x00_mbx_iocb_entry(vha, rsp->req, 1373 (struct mbx_entry *)pkt); 1374 break; 1375 case CT_IOCB_TYPE: 1376 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1377 break; 1378 default: 1379 /* Type Not Supported. */ 1380 ql_log(ql_log_warn, vha, 0x504a, 1381 "Received unknown response pkt type %x " 1382 "entry status=%x.\n", 1383 pkt->entry_type, pkt->entry_status); 1384 break; 1385 } 1386 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1387 wmb(); 1388 } 1389 1390 /* Adjust ring index */ 1391 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 1392 } 1393 1394 static inline void 1395 1396 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1397 uint32_t sense_len, struct rsp_que *rsp) 1398 { 1399 struct scsi_qla_host *vha = sp->fcport->vha; 1400 struct scsi_cmnd *cp = sp->cmd; 1401 1402 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1403 sense_len = SCSI_SENSE_BUFFERSIZE; 1404 1405 sp->request_sense_length = sense_len; 1406 sp->request_sense_ptr = cp->sense_buffer; 1407 if (sp->request_sense_length > par_sense_len) 1408 sense_len = par_sense_len; 1409 1410 memcpy(cp->sense_buffer, sense_data, sense_len); 1411 1412 sp->request_sense_ptr += sense_len; 1413 sp->request_sense_length -= sense_len; 1414 if (sp->request_sense_length != 0) 1415 rsp->status_srb = sp; 1416 1417 ql_dbg(ql_dbg_io, vha, 0x301c, 1418 "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n", 1419 sp->fcport->vha->host_no, cp->device->channel, cp->device->id, 1420 cp->device->lun, cp); 1421 if (sense_len) 1422 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 1423 cp->sense_buffer, sense_len); 1424 } 1425 1426 struct scsi_dif_tuple { 1427 __be16 guard; /* Checksum */ 1428 __be16 app_tag; /* APPL identifer */ 1429 __be32 ref_tag; /* Target LBA or indirect LBA */ 1430 }; 1431 1432 /* 1433 * Checks the guard or meta-data for the type of error 1434 * detected by the HBA. In case of errors, we set the 1435 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1436 * to indicate to the kernel that the HBA detected error. 1437 */ 1438 static inline void 1439 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1440 { 1441 struct scsi_qla_host *vha = sp->fcport->vha; 1442 struct scsi_cmnd *cmd = sp->cmd; 1443 struct scsi_dif_tuple *ep = 1444 (struct scsi_dif_tuple *)&sts24->data[20]; 1445 struct scsi_dif_tuple *ap = 1446 (struct scsi_dif_tuple *)&sts24->data[12]; 1447 uint32_t e_ref_tag, a_ref_tag; 1448 uint16_t e_app_tag, a_app_tag; 1449 uint16_t e_guard, a_guard; 1450 1451 e_ref_tag = be32_to_cpu(ep->ref_tag); 1452 a_ref_tag = be32_to_cpu(ap->ref_tag); 1453 e_app_tag = be16_to_cpu(ep->app_tag); 1454 a_app_tag = be16_to_cpu(ap->app_tag); 1455 e_guard = be16_to_cpu(ep->guard); 1456 a_guard = be16_to_cpu(ap->guard); 1457 1458 ql_dbg(ql_dbg_io, vha, 0x3023, 1459 "iocb(s) %p Returned STATUS.\n", sts24); 1460 1461 ql_dbg(ql_dbg_io, vha, 0x3024, 1462 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1463 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1464 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 1465 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1466 a_app_tag, e_app_tag, a_guard, e_guard); 1467 1468 /* check guard */ 1469 if (e_guard != a_guard) { 1470 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1471 0x10, 0x1); 1472 set_driver_byte(cmd, DRIVER_SENSE); 1473 set_host_byte(cmd, DID_ABORT); 1474 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1475 return; 1476 } 1477 1478 /* check appl tag */ 1479 if (e_app_tag != a_app_tag) { 1480 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1481 0x10, 0x2); 1482 set_driver_byte(cmd, DRIVER_SENSE); 1483 set_host_byte(cmd, DID_ABORT); 1484 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1485 return; 1486 } 1487 1488 /* check ref tag */ 1489 if (e_ref_tag != a_ref_tag) { 1490 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1491 0x10, 0x3); 1492 set_driver_byte(cmd, DRIVER_SENSE); 1493 set_host_byte(cmd, DID_ABORT); 1494 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1495 return; 1496 } 1497 } 1498 1499 /** 1500 * qla2x00_status_entry() - Process a Status IOCB entry. 1501 * @ha: SCSI driver HA context 1502 * @pkt: Entry pointer 1503 */ 1504 static void 1505 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 1506 { 1507 srb_t *sp; 1508 fc_port_t *fcport; 1509 struct scsi_cmnd *cp; 1510 sts_entry_t *sts; 1511 struct sts_entry_24xx *sts24; 1512 uint16_t comp_status; 1513 uint16_t scsi_status; 1514 uint16_t ox_id; 1515 uint8_t lscsi_status; 1516 int32_t resid; 1517 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 1518 fw_resid_len; 1519 uint8_t *rsp_info, *sense_data; 1520 struct qla_hw_data *ha = vha->hw; 1521 uint32_t handle; 1522 uint16_t que; 1523 struct req_que *req; 1524 int logit = 1; 1525 1526 sts = (sts_entry_t *) pkt; 1527 sts24 = (struct sts_entry_24xx *) pkt; 1528 if (IS_FWI2_CAPABLE(ha)) { 1529 comp_status = le16_to_cpu(sts24->comp_status); 1530 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1531 } else { 1532 comp_status = le16_to_cpu(sts->comp_status); 1533 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1534 } 1535 handle = (uint32_t) LSW(sts->handle); 1536 que = MSW(sts->handle); 1537 req = ha->req_q_map[que]; 1538 1539 /* Fast path completion. */ 1540 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1541 qla2x00_process_completed_request(vha, req, handle); 1542 1543 return; 1544 } 1545 1546 /* Validate handle. */ 1547 if (handle < MAX_OUTSTANDING_COMMANDS) { 1548 sp = req->outstanding_cmds[handle]; 1549 req->outstanding_cmds[handle] = NULL; 1550 } else 1551 sp = NULL; 1552 1553 if (sp == NULL) { 1554 ql_log(ql_log_warn, vha, 0x3017, 1555 "Invalid status handle (0x%x).\n", sts->handle); 1556 1557 if (IS_QLA82XX(ha)) 1558 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1559 else 1560 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1561 qla2xxx_wake_dpc(vha); 1562 return; 1563 } 1564 cp = sp->cmd; 1565 if (cp == NULL) { 1566 ql_log(ql_log_warn, vha, 0x3018, 1567 "Command already returned (0x%x/%p).\n", 1568 sts->handle, sp); 1569 1570 return; 1571 } 1572 1573 lscsi_status = scsi_status & STATUS_MASK; 1574 1575 fcport = sp->fcport; 1576 1577 ox_id = 0; 1578 sense_len = par_sense_len = rsp_info_len = resid_len = 1579 fw_resid_len = 0; 1580 if (IS_FWI2_CAPABLE(ha)) { 1581 if (scsi_status & SS_SENSE_LEN_VALID) 1582 sense_len = le32_to_cpu(sts24->sense_len); 1583 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 1584 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 1585 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 1586 resid_len = le32_to_cpu(sts24->rsp_residual_count); 1587 if (comp_status == CS_DATA_UNDERRUN) 1588 fw_resid_len = le32_to_cpu(sts24->residual_len); 1589 rsp_info = sts24->data; 1590 sense_data = sts24->data; 1591 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 1592 ox_id = le16_to_cpu(sts24->ox_id); 1593 par_sense_len = sizeof(sts24->data); 1594 } else { 1595 if (scsi_status & SS_SENSE_LEN_VALID) 1596 sense_len = le16_to_cpu(sts->req_sense_length); 1597 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 1598 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 1599 resid_len = le32_to_cpu(sts->residual_length); 1600 rsp_info = sts->rsp_info; 1601 sense_data = sts->req_sense_data; 1602 par_sense_len = sizeof(sts->req_sense_data); 1603 } 1604 1605 /* Check for any FCP transport errors. */ 1606 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 1607 /* Sense data lies beyond any FCP RESPONSE data. */ 1608 if (IS_FWI2_CAPABLE(ha)) { 1609 sense_data += rsp_info_len; 1610 par_sense_len -= rsp_info_len; 1611 } 1612 if (rsp_info_len > 3 && rsp_info[3]) { 1613 ql_log(ql_log_warn, vha, 0x3019, 1614 "FCP I/O protocol failure (0x%x/0x%x).\n", 1615 rsp_info_len, rsp_info[3]); 1616 1617 cp->result = DID_BUS_BUSY << 16; 1618 goto out; 1619 } 1620 } 1621 1622 /* Check for overrun. */ 1623 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 1624 scsi_status & SS_RESIDUAL_OVER) 1625 comp_status = CS_DATA_OVERRUN; 1626 1627 /* 1628 * Based on Host and scsi status generate status code for Linux 1629 */ 1630 switch (comp_status) { 1631 case CS_COMPLETE: 1632 case CS_QUEUE_FULL: 1633 if (scsi_status == 0) { 1634 cp->result = DID_OK << 16; 1635 break; 1636 } 1637 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 1638 resid = resid_len; 1639 scsi_set_resid(cp, resid); 1640 1641 if (!lscsi_status && 1642 ((unsigned)(scsi_bufflen(cp) - resid) < 1643 cp->underflow)) { 1644 ql_log(ql_log_warn, vha, 0x301a, 1645 "Mid-layer underflow " 1646 "detected (0x%x of 0x%x bytes).\n", 1647 resid, scsi_bufflen(cp)); 1648 1649 cp->result = DID_ERROR << 16; 1650 break; 1651 } 1652 } 1653 cp->result = DID_OK << 16 | lscsi_status; 1654 1655 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1656 ql_log(ql_log_warn, vha, 0x301b, 1657 "QUEUE FULL detected.\n"); 1658 break; 1659 } 1660 logit = 0; 1661 if (lscsi_status != SS_CHECK_CONDITION) 1662 break; 1663 1664 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1665 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1666 break; 1667 1668 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 1669 rsp); 1670 break; 1671 1672 case CS_DATA_UNDERRUN: 1673 /* Use F/W calculated residual length. */ 1674 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 1675 scsi_set_resid(cp, resid); 1676 if (scsi_status & SS_RESIDUAL_UNDER) { 1677 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 1678 ql_log(ql_log_warn, vha, 0x301d, 1679 "Dropped frame(s) detected " 1680 "(0x%x of 0x%x bytes).\n", 1681 resid, scsi_bufflen(cp)); 1682 1683 cp->result = DID_ERROR << 16 | lscsi_status; 1684 break; 1685 } 1686 1687 if (!lscsi_status && 1688 ((unsigned)(scsi_bufflen(cp) - resid) < 1689 cp->underflow)) { 1690 ql_log(ql_log_warn, vha, 0x301e, 1691 "Mid-layer underflow " 1692 "detected (0x%x of 0x%x bytes).\n", 1693 resid, scsi_bufflen(cp)); 1694 1695 cp->result = DID_ERROR << 16; 1696 break; 1697 } 1698 } else { 1699 ql_log(ql_log_warn, vha, 0x301f, 1700 "Dropped frame(s) detected (0x%x " 1701 "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); 1702 1703 cp->result = DID_ERROR << 16 | lscsi_status; 1704 goto check_scsi_status; 1705 } 1706 1707 cp->result = DID_OK << 16 | lscsi_status; 1708 logit = 0; 1709 1710 check_scsi_status: 1711 /* 1712 * Check to see if SCSI Status is non zero. If so report SCSI 1713 * Status. 1714 */ 1715 if (lscsi_status != 0) { 1716 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1717 ql_log(ql_log_warn, vha, 0x3020, 1718 "QUEUE FULL detected.\n"); 1719 logit = 1; 1720 break; 1721 } 1722 if (lscsi_status != SS_CHECK_CONDITION) 1723 break; 1724 1725 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1726 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1727 break; 1728 1729 qla2x00_handle_sense(sp, sense_data, par_sense_len, 1730 sense_len, rsp); 1731 } 1732 break; 1733 1734 case CS_PORT_LOGGED_OUT: 1735 case CS_PORT_CONFIG_CHG: 1736 case CS_PORT_BUSY: 1737 case CS_INCOMPLETE: 1738 case CS_PORT_UNAVAILABLE: 1739 case CS_TIMEOUT: 1740 case CS_RESET: 1741 1742 /* 1743 * We are going to have the fc class block the rport 1744 * while we try to recover so instruct the mid layer 1745 * to requeue until the class decides how to handle this. 1746 */ 1747 cp->result = DID_TRANSPORT_DISRUPTED << 16; 1748 1749 if (comp_status == CS_TIMEOUT) { 1750 if (IS_FWI2_CAPABLE(ha)) 1751 break; 1752 else if ((le16_to_cpu(sts->status_flags) & 1753 SF_LOGOUT_SENT) == 0) 1754 break; 1755 } 1756 1757 ql_dbg(ql_dbg_io, vha, 0x3021, 1758 "Port down status: port-state=0x%x.\n", 1759 atomic_read(&fcport->state)); 1760 1761 if (atomic_read(&fcport->state) == FCS_ONLINE) 1762 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1763 break; 1764 1765 case CS_ABORTED: 1766 cp->result = DID_RESET << 16; 1767 break; 1768 1769 case CS_DIF_ERROR: 1770 qla2x00_handle_dif_error(sp, sts24); 1771 break; 1772 default: 1773 cp->result = DID_ERROR << 16; 1774 break; 1775 } 1776 1777 out: 1778 if (logit) 1779 ql_dbg(ql_dbg_io, vha, 0x3022, 1780 "FCP command status: 0x%x-0x%x (0x%x) " 1781 "oxid=0x%x cdb=%02x%02x%02x len=0x%x " 1782 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 1783 comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0], 1784 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, 1785 resid_len, fw_resid_len); 1786 1787 if (rsp->status_srb == NULL) 1788 qla2x00_sp_compl(ha, sp); 1789 } 1790 1791 /** 1792 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 1793 * @ha: SCSI driver HA context 1794 * @pkt: Entry pointer 1795 * 1796 * Extended sense data. 1797 */ 1798 static void 1799 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 1800 { 1801 uint8_t sense_sz = 0; 1802 struct qla_hw_data *ha = rsp->hw; 1803 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 1804 srb_t *sp = rsp->status_srb; 1805 struct scsi_cmnd *cp; 1806 1807 if (sp != NULL && sp->request_sense_length != 0) { 1808 cp = sp->cmd; 1809 if (cp == NULL) { 1810 ql_log(ql_log_warn, vha, 0x3025, 1811 "cmd is NULL: already returned to OS (sp=%p).\n", 1812 sp); 1813 1814 rsp->status_srb = NULL; 1815 return; 1816 } 1817 1818 if (sp->request_sense_length > sizeof(pkt->data)) { 1819 sense_sz = sizeof(pkt->data); 1820 } else { 1821 sense_sz = sp->request_sense_length; 1822 } 1823 1824 /* Move sense data. */ 1825 if (IS_FWI2_CAPABLE(ha)) 1826 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 1827 memcpy(sp->request_sense_ptr, pkt->data, sense_sz); 1828 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 1829 sp->request_sense_ptr, sense_sz); 1830 1831 sp->request_sense_ptr += sense_sz; 1832 sp->request_sense_length -= sense_sz; 1833 1834 /* Place command on done queue. */ 1835 if (sp->request_sense_length == 0) { 1836 rsp->status_srb = NULL; 1837 qla2x00_sp_compl(ha, sp); 1838 } 1839 } 1840 } 1841 1842 /** 1843 * qla2x00_error_entry() - Process an error entry. 1844 * @ha: SCSI driver HA context 1845 * @pkt: Entry pointer 1846 */ 1847 static void 1848 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 1849 { 1850 srb_t *sp; 1851 struct qla_hw_data *ha = vha->hw; 1852 uint32_t handle = LSW(pkt->handle); 1853 uint16_t que = MSW(pkt->handle); 1854 struct req_que *req = ha->req_q_map[que]; 1855 1856 if (pkt->entry_status & RF_INV_E_ORDER) 1857 ql_dbg(ql_dbg_async, vha, 0x502a, 1858 "Invalid Entry Order.\n"); 1859 else if (pkt->entry_status & RF_INV_E_COUNT) 1860 ql_dbg(ql_dbg_async, vha, 0x502b, 1861 "Invalid Entry Count.\n"); 1862 else if (pkt->entry_status & RF_INV_E_PARAM) 1863 ql_dbg(ql_dbg_async, vha, 0x502c, 1864 "Invalid Entry Parameter.\n"); 1865 else if (pkt->entry_status & RF_INV_E_TYPE) 1866 ql_dbg(ql_dbg_async, vha, 0x502d, 1867 "Invalid Entry Type.\n"); 1868 else if (pkt->entry_status & RF_BUSY) 1869 ql_dbg(ql_dbg_async, vha, 0x502e, 1870 "Busy.\n"); 1871 else 1872 ql_dbg(ql_dbg_async, vha, 0x502f, 1873 "UNKNOWN flag error.\n"); 1874 1875 /* Validate handle. */ 1876 if (handle < MAX_OUTSTANDING_COMMANDS) 1877 sp = req->outstanding_cmds[handle]; 1878 else 1879 sp = NULL; 1880 1881 if (sp) { 1882 /* Free outstanding command slot. */ 1883 req->outstanding_cmds[handle] = NULL; 1884 1885 /* Bad payload or header */ 1886 if (pkt->entry_status & 1887 (RF_INV_E_ORDER | RF_INV_E_COUNT | 1888 RF_INV_E_PARAM | RF_INV_E_TYPE)) { 1889 sp->cmd->result = DID_ERROR << 16; 1890 } else if (pkt->entry_status & RF_BUSY) { 1891 sp->cmd->result = DID_BUS_BUSY << 16; 1892 } else { 1893 sp->cmd->result = DID_ERROR << 16; 1894 } 1895 qla2x00_sp_compl(ha, sp); 1896 1897 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1898 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7 1899 || pkt->entry_type == COMMAND_TYPE_6) { 1900 ql_log(ql_log_warn, vha, 0x5030, 1901 "Error entry - invalid handle.\n"); 1902 1903 if (IS_QLA82XX(ha)) 1904 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1905 else 1906 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1907 qla2xxx_wake_dpc(vha); 1908 } 1909 } 1910 1911 /** 1912 * qla24xx_mbx_completion() - Process mailbox command completions. 1913 * @ha: SCSI driver HA context 1914 * @mb0: Mailbox0 register 1915 */ 1916 static void 1917 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 1918 { 1919 uint16_t cnt; 1920 uint16_t __iomem *wptr; 1921 struct qla_hw_data *ha = vha->hw; 1922 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1923 1924 /* Load return mailbox registers. */ 1925 ha->flags.mbox_int = 1; 1926 ha->mailbox_out[0] = mb0; 1927 wptr = (uint16_t __iomem *)®->mailbox1; 1928 1929 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 1930 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 1931 wptr++; 1932 } 1933 1934 if (ha->mcp) { 1935 ql_dbg(ql_dbg_async, vha, 0x504d, 1936 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]); 1937 } else { 1938 ql_dbg(ql_dbg_async, vha, 0x504e, 1939 "MBX pointer ERROR.\n"); 1940 } 1941 } 1942 1943 /** 1944 * qla24xx_process_response_queue() - Process response queue entries. 1945 * @ha: SCSI driver HA context 1946 */ 1947 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 1948 struct rsp_que *rsp) 1949 { 1950 struct sts_entry_24xx *pkt; 1951 struct qla_hw_data *ha = vha->hw; 1952 1953 if (!vha->flags.online) 1954 return; 1955 1956 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1957 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 1958 1959 rsp->ring_index++; 1960 if (rsp->ring_index == rsp->length) { 1961 rsp->ring_index = 0; 1962 rsp->ring_ptr = rsp->ring; 1963 } else { 1964 rsp->ring_ptr++; 1965 } 1966 1967 if (pkt->entry_status != 0) { 1968 ql_dbg(ql_dbg_async, vha, 0x5029, 1969 "Process error entry.\n"); 1970 1971 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 1972 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1973 wmb(); 1974 continue; 1975 } 1976 1977 switch (pkt->entry_type) { 1978 case STATUS_TYPE: 1979 qla2x00_status_entry(vha, rsp, pkt); 1980 break; 1981 case STATUS_CONT_TYPE: 1982 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1983 break; 1984 case VP_RPT_ID_IOCB_TYPE: 1985 qla24xx_report_id_acquisition(vha, 1986 (struct vp_rpt_id_entry_24xx *)pkt); 1987 break; 1988 case LOGINOUT_PORT_IOCB_TYPE: 1989 qla24xx_logio_entry(vha, rsp->req, 1990 (struct logio_entry_24xx *)pkt); 1991 break; 1992 case TSK_MGMT_IOCB_TYPE: 1993 qla24xx_tm_iocb_entry(vha, rsp->req, 1994 (struct tsk_mgmt_entry *)pkt); 1995 break; 1996 case CT_IOCB_TYPE: 1997 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1998 clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags); 1999 break; 2000 case ELS_IOCB_TYPE: 2001 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2002 break; 2003 default: 2004 /* Type Not Supported. */ 2005 ql_dbg(ql_dbg_async, vha, 0x5042, 2006 "Received unknown response pkt type %x " 2007 "entry status=%x.\n", 2008 pkt->entry_type, pkt->entry_status); 2009 break; 2010 } 2011 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2012 wmb(); 2013 } 2014 2015 /* Adjust ring index */ 2016 if (IS_QLA82XX(ha)) { 2017 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2018 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 2019 } else 2020 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2021 } 2022 2023 static void 2024 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 2025 { 2026 int rval; 2027 uint32_t cnt; 2028 struct qla_hw_data *ha = vha->hw; 2029 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2030 2031 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 2032 return; 2033 2034 rval = QLA_SUCCESS; 2035 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 2036 RD_REG_DWORD(®->iobase_addr); 2037 WRT_REG_DWORD(®->iobase_window, 0x0001); 2038 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2039 rval == QLA_SUCCESS; cnt--) { 2040 if (cnt) { 2041 WRT_REG_DWORD(®->iobase_window, 0x0001); 2042 udelay(10); 2043 } else 2044 rval = QLA_FUNCTION_TIMEOUT; 2045 } 2046 if (rval == QLA_SUCCESS) 2047 goto next_test; 2048 2049 WRT_REG_DWORD(®->iobase_window, 0x0003); 2050 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2051 rval == QLA_SUCCESS; cnt--) { 2052 if (cnt) { 2053 WRT_REG_DWORD(®->iobase_window, 0x0003); 2054 udelay(10); 2055 } else 2056 rval = QLA_FUNCTION_TIMEOUT; 2057 } 2058 if (rval != QLA_SUCCESS) 2059 goto done; 2060 2061 next_test: 2062 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 2063 ql_log(ql_log_info, vha, 0x504c, 2064 "Additional code -- 0x55AA.\n"); 2065 2066 done: 2067 WRT_REG_DWORD(®->iobase_window, 0x0000); 2068 RD_REG_DWORD(®->iobase_window); 2069 } 2070 2071 /** 2072 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 2073 * @irq: 2074 * @dev_id: SCSI driver HA context 2075 * 2076 * Called by system whenever the host adapter generates an interrupt. 2077 * 2078 * Returns handled flag. 2079 */ 2080 irqreturn_t 2081 qla24xx_intr_handler(int irq, void *dev_id) 2082 { 2083 scsi_qla_host_t *vha; 2084 struct qla_hw_data *ha; 2085 struct device_reg_24xx __iomem *reg; 2086 int status; 2087 unsigned long iter; 2088 uint32_t stat; 2089 uint32_t hccr; 2090 uint16_t mb[4]; 2091 struct rsp_que *rsp; 2092 unsigned long flags; 2093 2094 rsp = (struct rsp_que *) dev_id; 2095 if (!rsp) { 2096 printk(KERN_INFO 2097 "%s(): NULL response queue pointer.\n", __func__); 2098 return IRQ_NONE; 2099 } 2100 2101 ha = rsp->hw; 2102 reg = &ha->iobase->isp24; 2103 status = 0; 2104 2105 if (unlikely(pci_channel_offline(ha->pdev))) 2106 return IRQ_HANDLED; 2107 2108 spin_lock_irqsave(&ha->hardware_lock, flags); 2109 vha = pci_get_drvdata(ha->pdev); 2110 for (iter = 50; iter--; ) { 2111 stat = RD_REG_DWORD(®->host_status); 2112 if (stat & HSRX_RISC_PAUSED) { 2113 if (unlikely(pci_channel_offline(ha->pdev))) 2114 break; 2115 2116 hccr = RD_REG_DWORD(®->hccr); 2117 2118 ql_log(ql_log_warn, vha, 0x504b, 2119 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2120 hccr); 2121 2122 qla2xxx_check_risc_status(vha); 2123 2124 ha->isp_ops->fw_dump(vha, 1); 2125 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2126 break; 2127 } else if ((stat & HSRX_RISC_INT) == 0) 2128 break; 2129 2130 switch (stat & 0xff) { 2131 case 0x1: 2132 case 0x2: 2133 case 0x10: 2134 case 0x11: 2135 qla24xx_mbx_completion(vha, MSW(stat)); 2136 status |= MBX_INTERRUPT; 2137 2138 break; 2139 case 0x12: 2140 mb[0] = MSW(stat); 2141 mb[1] = RD_REG_WORD(®->mailbox1); 2142 mb[2] = RD_REG_WORD(®->mailbox2); 2143 mb[3] = RD_REG_WORD(®->mailbox3); 2144 qla2x00_async_event(vha, rsp, mb); 2145 break; 2146 case 0x13: 2147 case 0x14: 2148 qla24xx_process_response_queue(vha, rsp); 2149 break; 2150 default: 2151 ql_dbg(ql_dbg_async, vha, 0x504f, 2152 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2153 break; 2154 } 2155 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2156 RD_REG_DWORD_RELAXED(®->hccr); 2157 } 2158 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2159 2160 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2161 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2162 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2163 complete(&ha->mbx_intr_comp); 2164 } 2165 2166 return IRQ_HANDLED; 2167 } 2168 2169 static irqreturn_t 2170 qla24xx_msix_rsp_q(int irq, void *dev_id) 2171 { 2172 struct qla_hw_data *ha; 2173 struct rsp_que *rsp; 2174 struct device_reg_24xx __iomem *reg; 2175 struct scsi_qla_host *vha; 2176 unsigned long flags; 2177 2178 rsp = (struct rsp_que *) dev_id; 2179 if (!rsp) { 2180 printk(KERN_INFO 2181 "%s(): NULL response queue pointer.\n", __func__); 2182 return IRQ_NONE; 2183 } 2184 ha = rsp->hw; 2185 reg = &ha->iobase->isp24; 2186 2187 spin_lock_irqsave(&ha->hardware_lock, flags); 2188 2189 vha = pci_get_drvdata(ha->pdev); 2190 qla24xx_process_response_queue(vha, rsp); 2191 if (!ha->flags.disable_msix_handshake) { 2192 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2193 RD_REG_DWORD_RELAXED(®->hccr); 2194 } 2195 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2196 2197 return IRQ_HANDLED; 2198 } 2199 2200 static irqreturn_t 2201 qla25xx_msix_rsp_q(int irq, void *dev_id) 2202 { 2203 struct qla_hw_data *ha; 2204 struct rsp_que *rsp; 2205 struct device_reg_24xx __iomem *reg; 2206 unsigned long flags; 2207 2208 rsp = (struct rsp_que *) dev_id; 2209 if (!rsp) { 2210 printk(KERN_INFO 2211 "%s(): NULL response queue pointer.\n", __func__); 2212 return IRQ_NONE; 2213 } 2214 ha = rsp->hw; 2215 2216 /* Clear the interrupt, if enabled, for this response queue */ 2217 if (rsp->options & ~BIT_6) { 2218 reg = &ha->iobase->isp24; 2219 spin_lock_irqsave(&ha->hardware_lock, flags); 2220 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2221 RD_REG_DWORD_RELAXED(®->hccr); 2222 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2223 } 2224 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 2225 2226 return IRQ_HANDLED; 2227 } 2228 2229 static irqreturn_t 2230 qla24xx_msix_default(int irq, void *dev_id) 2231 { 2232 scsi_qla_host_t *vha; 2233 struct qla_hw_data *ha; 2234 struct rsp_que *rsp; 2235 struct device_reg_24xx __iomem *reg; 2236 int status; 2237 uint32_t stat; 2238 uint32_t hccr; 2239 uint16_t mb[4]; 2240 unsigned long flags; 2241 2242 rsp = (struct rsp_que *) dev_id; 2243 if (!rsp) { 2244 printk(KERN_INFO 2245 "%s(): NULL response queue pointer.\n", __func__); 2246 return IRQ_NONE; 2247 } 2248 ha = rsp->hw; 2249 reg = &ha->iobase->isp24; 2250 status = 0; 2251 2252 spin_lock_irqsave(&ha->hardware_lock, flags); 2253 vha = pci_get_drvdata(ha->pdev); 2254 do { 2255 stat = RD_REG_DWORD(®->host_status); 2256 if (stat & HSRX_RISC_PAUSED) { 2257 if (unlikely(pci_channel_offline(ha->pdev))) 2258 break; 2259 2260 hccr = RD_REG_DWORD(®->hccr); 2261 2262 ql_log(ql_log_info, vha, 0x5050, 2263 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2264 hccr); 2265 2266 qla2xxx_check_risc_status(vha); 2267 2268 ha->isp_ops->fw_dump(vha, 1); 2269 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2270 break; 2271 } else if ((stat & HSRX_RISC_INT) == 0) 2272 break; 2273 2274 switch (stat & 0xff) { 2275 case 0x1: 2276 case 0x2: 2277 case 0x10: 2278 case 0x11: 2279 qla24xx_mbx_completion(vha, MSW(stat)); 2280 status |= MBX_INTERRUPT; 2281 2282 break; 2283 case 0x12: 2284 mb[0] = MSW(stat); 2285 mb[1] = RD_REG_WORD(®->mailbox1); 2286 mb[2] = RD_REG_WORD(®->mailbox2); 2287 mb[3] = RD_REG_WORD(®->mailbox3); 2288 qla2x00_async_event(vha, rsp, mb); 2289 break; 2290 case 0x13: 2291 case 0x14: 2292 qla24xx_process_response_queue(vha, rsp); 2293 break; 2294 default: 2295 ql_dbg(ql_dbg_async, vha, 0x5051, 2296 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2297 break; 2298 } 2299 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2300 } while (0); 2301 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2302 2303 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2304 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2305 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2306 complete(&ha->mbx_intr_comp); 2307 } 2308 return IRQ_HANDLED; 2309 } 2310 2311 /* Interrupt handling helpers. */ 2312 2313 struct qla_init_msix_entry { 2314 const char *name; 2315 irq_handler_t handler; 2316 }; 2317 2318 static struct qla_init_msix_entry msix_entries[3] = { 2319 { "qla2xxx (default)", qla24xx_msix_default }, 2320 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2321 { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, 2322 }; 2323 2324 static struct qla_init_msix_entry qla82xx_msix_entries[2] = { 2325 { "qla2xxx (default)", qla82xx_msix_default }, 2326 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 2327 }; 2328 2329 static void 2330 qla24xx_disable_msix(struct qla_hw_data *ha) 2331 { 2332 int i; 2333 struct qla_msix_entry *qentry; 2334 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2335 2336 for (i = 0; i < ha->msix_count; i++) { 2337 qentry = &ha->msix_entries[i]; 2338 if (qentry->have_irq) 2339 free_irq(qentry->vector, qentry->rsp); 2340 } 2341 pci_disable_msix(ha->pdev); 2342 kfree(ha->msix_entries); 2343 ha->msix_entries = NULL; 2344 ha->flags.msix_enabled = 0; 2345 ql_dbg(ql_dbg_init, vha, 0x0042, 2346 "Disabled the MSI.\n"); 2347 } 2348 2349 static int 2350 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 2351 { 2352 #define MIN_MSIX_COUNT 2 2353 int i, ret; 2354 struct msix_entry *entries; 2355 struct qla_msix_entry *qentry; 2356 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2357 2358 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2359 GFP_KERNEL); 2360 if (!entries) { 2361 ql_log(ql_log_warn, vha, 0x00bc, 2362 "Failed to allocate memory for msix_entry.\n"); 2363 return -ENOMEM; 2364 } 2365 2366 for (i = 0; i < ha->msix_count; i++) 2367 entries[i].entry = i; 2368 2369 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2370 if (ret) { 2371 if (ret < MIN_MSIX_COUNT) 2372 goto msix_failed; 2373 2374 ql_log(ql_log_warn, vha, 0x00c6, 2375 "MSI-X: Failed to enable support " 2376 "-- %d/%d\n Retry with %d vectors.\n", 2377 ha->msix_count, ret, ret); 2378 ha->msix_count = ret; 2379 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2380 if (ret) { 2381 msix_failed: 2382 ql_log(ql_log_fatal, vha, 0x00c7, 2383 "MSI-X: Failed to enable support, " 2384 "giving up -- %d/%d.\n", 2385 ha->msix_count, ret); 2386 goto msix_out; 2387 } 2388 ha->max_rsp_queues = ha->msix_count - 1; 2389 } 2390 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2391 ha->msix_count, GFP_KERNEL); 2392 if (!ha->msix_entries) { 2393 ql_log(ql_log_fatal, vha, 0x00c8, 2394 "Failed to allocate memory for ha->msix_entries.\n"); 2395 ret = -ENOMEM; 2396 goto msix_out; 2397 } 2398 ha->flags.msix_enabled = 1; 2399 2400 for (i = 0; i < ha->msix_count; i++) { 2401 qentry = &ha->msix_entries[i]; 2402 qentry->vector = entries[i].vector; 2403 qentry->entry = entries[i].entry; 2404 qentry->have_irq = 0; 2405 qentry->rsp = NULL; 2406 } 2407 2408 /* Enable MSI-X vectors for the base queue */ 2409 for (i = 0; i < 2; i++) { 2410 qentry = &ha->msix_entries[i]; 2411 if (IS_QLA82XX(ha)) { 2412 ret = request_irq(qentry->vector, 2413 qla82xx_msix_entries[i].handler, 2414 0, qla82xx_msix_entries[i].name, rsp); 2415 } else { 2416 ret = request_irq(qentry->vector, 2417 msix_entries[i].handler, 2418 0, msix_entries[i].name, rsp); 2419 } 2420 if (ret) { 2421 ql_log(ql_log_fatal, vha, 0x00cb, 2422 "MSI-X: unable to register handler -- %x/%d.\n", 2423 qentry->vector, ret); 2424 qla24xx_disable_msix(ha); 2425 ha->mqenable = 0; 2426 goto msix_out; 2427 } 2428 qentry->have_irq = 1; 2429 qentry->rsp = rsp; 2430 rsp->msix = qentry; 2431 } 2432 2433 /* Enable MSI-X vector for response queue update for queue 0 */ 2434 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2435 ha->mqenable = 1; 2436 ql_dbg(ql_dbg_multiq, vha, 0xc005, 2437 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2438 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2439 ql_dbg(ql_dbg_init, vha, 0x0055, 2440 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2441 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2442 2443 msix_out: 2444 kfree(entries); 2445 return ret; 2446 } 2447 2448 int 2449 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 2450 { 2451 int ret; 2452 device_reg_t __iomem *reg = ha->iobase; 2453 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2454 2455 /* If possible, enable MSI-X. */ 2456 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 2457 !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha)) 2458 goto skip_msi; 2459 2460 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 2461 (ha->pdev->subsystem_device == 0x7040 || 2462 ha->pdev->subsystem_device == 0x7041 || 2463 ha->pdev->subsystem_device == 0x1705)) { 2464 ql_log(ql_log_warn, vha, 0x0034, 2465 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 2466 ha->pdev->subsystem_vendor, 2467 ha->pdev->subsystem_device); 2468 goto skip_msi; 2469 } 2470 2471 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || 2472 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { 2473 ql_log(ql_log_warn, vha, 0x0035, 2474 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 2475 ha->pdev->revision, ha->fw_attributes); 2476 goto skip_msix; 2477 } 2478 2479 ret = qla24xx_enable_msix(ha, rsp); 2480 if (!ret) { 2481 ql_dbg(ql_dbg_init, vha, 0x0036, 2482 "MSI-X: Enabled (0x%X, 0x%X).\n", 2483 ha->chip_revision, ha->fw_attributes); 2484 goto clear_risc_ints; 2485 } 2486 ql_log(ql_log_info, vha, 0x0037, 2487 "MSI-X Falling back-to MSI mode -%d.\n", ret); 2488 skip_msix: 2489 2490 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2491 !IS_QLA8001(ha)) 2492 goto skip_msi; 2493 2494 ret = pci_enable_msi(ha->pdev); 2495 if (!ret) { 2496 ql_dbg(ql_dbg_init, vha, 0x0038, 2497 "MSI: Enabled.\n"); 2498 ha->flags.msi_enabled = 1; 2499 } else 2500 ql_log(ql_log_warn, vha, 0x0039, 2501 "MSI-X; Falling back-to INTa mode -- %d.\n", ret); 2502 skip_msi: 2503 2504 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2505 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 2506 QLA2XXX_DRIVER_NAME, rsp); 2507 if (ret) { 2508 ql_log(ql_log_warn, vha, 0x003a, 2509 "Failed to reserve interrupt %d already in use.\n", 2510 ha->pdev->irq); 2511 goto fail; 2512 } 2513 2514 clear_risc_ints: 2515 2516 /* 2517 * FIXME: Noted that 8014s were being dropped during NK testing. 2518 * Timing deltas during MSI-X/INTa transitions? 2519 */ 2520 if (IS_QLA81XX(ha) || IS_QLA82XX(ha)) 2521 goto fail; 2522 spin_lock_irq(&ha->hardware_lock); 2523 if (IS_FWI2_CAPABLE(ha)) { 2524 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT); 2525 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_RISC_INT); 2526 } else { 2527 WRT_REG_WORD(®->isp.semaphore, 0); 2528 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_RISC_INT); 2529 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); 2530 } 2531 spin_unlock_irq(&ha->hardware_lock); 2532 2533 fail: 2534 return ret; 2535 } 2536 2537 void 2538 qla2x00_free_irqs(scsi_qla_host_t *vha) 2539 { 2540 struct qla_hw_data *ha = vha->hw; 2541 struct rsp_que *rsp = ha->rsp_q_map[0]; 2542 2543 if (ha->flags.msix_enabled) 2544 qla24xx_disable_msix(ha); 2545 else if (ha->flags.msi_enabled) { 2546 free_irq(ha->pdev->irq, rsp); 2547 pci_disable_msi(ha->pdev); 2548 } else 2549 free_irq(ha->pdev->irq, rsp); 2550 } 2551 2552 2553 int qla25xx_request_irq(struct rsp_que *rsp) 2554 { 2555 struct qla_hw_data *ha = rsp->hw; 2556 struct qla_init_msix_entry *intr = &msix_entries[2]; 2557 struct qla_msix_entry *msix = rsp->msix; 2558 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2559 int ret; 2560 2561 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 2562 if (ret) { 2563 ql_log(ql_log_fatal, vha, 0x00e6, 2564 "MSI-X: Unable to register handler -- %x/%d.\n", 2565 msix->vector, ret); 2566 return ret; 2567 } 2568 msix->have_irq = 1; 2569 msix->rsp = rsp; 2570 return ret; 2571 } 2572