1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* Copyright 2009 QLogic Corporation */ 23 24 /* 25 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29 #pragma ident "Copyright 2009 QLogic Corporation; ql_iocb.c" 30 31 /* 32 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file. 33 * 34 * *********************************************************************** 35 * * ** 36 * * NOTICE ** 37 * * COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION ** 38 * * ALL RIGHTS RESERVED ** 39 * * ** 40 * *********************************************************************** 41 * 42 */ 43 44 #include <ql_apps.h> 45 #include <ql_api.h> 46 #include <ql_debug.h> 47 #include <ql_iocb.h> 48 #include <ql_isr.h> 49 #include <ql_xioctl.h> 50 51 /* 52 * Local Function Prototypes. 53 */ 54 static int ql_req_pkt(ql_adapter_state_t *, request_t **); 55 static void ql_continuation_iocb(ql_adapter_state_t *, ddi_dma_cookie_t *, 56 uint16_t, boolean_t); 57 static void ql_isp24xx_rcvbuf(ql_adapter_state_t *); 58 59 /* 60 * ql_start_iocb 61 * The start IOCB is responsible for building request packets 62 * on request ring and modifying ISP input pointer. 63 * 64 * Input: 65 * ha: adapter state pointer. 66 * sp: srb structure pointer. 67 * 68 * Context: 69 * Interrupt or Kernel context, no mailbox commands allowed. 70 */ 71 void 72 ql_start_iocb(ql_adapter_state_t *vha, ql_srb_t *sp) 73 { 74 ql_link_t *link; 75 request_t *pkt; 76 uint64_t *ptr64; 77 uint32_t cnt; 78 ql_adapter_state_t *ha = vha->pha; 79 80 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 81 82 /* Acquire ring lock. */ 83 REQUEST_RING_LOCK(ha); 84 85 if (sp != NULL) { 86 /* 87 * If the pending queue is not empty maintain order 88 * by puting this srb at the tail and geting the head. 89 */ 90 if ((link = ha->pending_cmds.first) != NULL) { 91 ql_add_link_b(&ha->pending_cmds, &sp->cmd); 92 /* Remove command from pending command queue */ 93 sp = link->base_address; 94 ql_remove_link(&ha->pending_cmds, &sp->cmd); 95 } 96 } else { 97 /* Get command from pending command queue if not empty. */ 98 if ((link = ha->pending_cmds.first) == NULL) { 99 /* Release ring specific lock */ 100 REQUEST_RING_UNLOCK(ha); 101 QL_PRINT_3(CE_CONT, "(%d): empty done\n", 102 ha->instance); 103 return; 104 } 105 /* Remove command from pending command queue */ 106 sp = link->base_address; 107 ql_remove_link(&ha->pending_cmds, &sp->cmd); 108 } 109 110 /* start this request and as many others as possible */ 111 for (;;) { 112 if (ha->req_q_cnt < sp->req_cnt) { 113 /* Calculate number of free request entries. */ 114 cnt = RD16_IO_REG(ha, req_out); 115 if (ha->req_ring_index < cnt) { 116 ha->req_q_cnt = (uint16_t) 117 (cnt - ha->req_ring_index); 118 } else { 119 ha->req_q_cnt = (uint16_t)(REQUEST_ENTRY_CNT - 120 (ha->req_ring_index - cnt)); 121 } 122 if (ha->req_q_cnt != 0) { 123 ha->req_q_cnt--; 124 } 125 126 /* 127 * If no room in request ring put this srb at 128 * the head of the pending queue and exit. 129 */ 130 if (ha->req_q_cnt < sp->req_cnt) { 131 QL_PRINT_8(CE_CONT, "(%d): request ring full," 132 " req_q_cnt=%d, req_ring_index=%d\n", 133 ha->instance, ha->req_q_cnt, 134 ha->req_ring_index); 135 ql_add_link_t(&ha->pending_cmds, &sp->cmd); 136 break; 137 } 138 } 139 140 /* Check for room in outstanding command list. */ 141 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 142 ha->osc_index++; 143 if (ha->osc_index == MAX_OUTSTANDING_COMMANDS) { 144 ha->osc_index = 1; 145 } 146 if (ha->outstanding_cmds[ha->osc_index] == NULL) { 147 break; 148 } 149 } 150 /* 151 * If no room in outstanding array put this srb at 152 * the head of the pending queue and exit. 153 */ 154 if (cnt == MAX_OUTSTANDING_COMMANDS) { 155 QL_PRINT_8(CE_CONT, "(%d): no room in outstanding " 156 "array\n", ha->instance); 157 ql_add_link_t(&ha->pending_cmds, &sp->cmd); 158 break; 159 } 160 161 /* nothing to stop us now. */ 162 ha->outstanding_cmds[ha->osc_index] = sp; 163 /* create and save a unique response identifier in the srb */ 164 sp->handle = ha->adapter_stats->ncmds << OSC_INDEX_SHIFT | 165 ha->osc_index; 166 ha->req_q_cnt -= sp->req_cnt; 167 168 /* build the iocb in the request ring */ 169 pkt = ha->request_ring_ptr; 170 sp->flags |= SRB_IN_TOKEN_ARRAY; 171 172 /* Zero out packet. */ 173 ptr64 = (uint64_t *)pkt; 174 *ptr64++ = 0; *ptr64++ = 0; 175 *ptr64++ = 0; *ptr64++ = 0; 176 *ptr64++ = 0; *ptr64++ = 0; 177 *ptr64++ = 0; *ptr64 = 0; 178 179 /* Setup IOCB common data. */ 180 pkt->entry_count = (uint8_t)sp->req_cnt; 181 pkt->sys_define = (uint8_t)ha->req_ring_index; 182 /* mark the iocb with the response identifier */ 183 ddi_put32(ha->hba_buf.acc_handle, &pkt->handle, 184 (uint32_t)sp->handle); 185 186 /* Setup IOCB unique data. */ 187 (sp->iocb)(vha, sp, pkt); 188 189 sp->flags |= SRB_ISP_STARTED; 190 191 QL_PRINT_5(CE_CONT, "(%d,%d): req packet, sp=%p\n", 192 ha->instance, vha->vp_index, (void *)sp); 193 QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE); 194 195 /* Sync DMA buffer. */ 196 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 197 (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE + 198 REQUEST_Q_BUFFER_OFFSET), (size_t)REQUEST_ENTRY_SIZE, 199 DDI_DMA_SYNC_FORDEV); 200 201 /* Adjust ring index. */ 202 ha->req_ring_index++; 203 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 204 ha->req_ring_index = 0; 205 ha->request_ring_ptr = ha->request_ring_bp; 206 } else { 207 ha->request_ring_ptr++; 208 } 209 210 /* Reset watchdog timer */ 211 sp->wdg_q_time = sp->init_wdg_q_time; 212 213 /* 214 * Send it by setting the new ring index in the ISP Request 215 * Ring In Pointer register. This is the mechanism 216 * used to notify the isp that a new iocb has been 217 * placed on the request ring. 218 */ 219 WRT16_IO_REG(ha, req_in, ha->req_ring_index); 220 221 /* Update outstanding command count statistic. */ 222 ha->adapter_stats->ncmds++; 223 224 /* if there is a pending command, try to start it. */ 225 if ((link = ha->pending_cmds.first) == NULL) { 226 break; 227 } 228 229 /* Remove command from pending command queue */ 230 sp = link->base_address; 231 ql_remove_link(&ha->pending_cmds, &sp->cmd); 232 } 233 234 /* Release ring specific lock */ 235 REQUEST_RING_UNLOCK(ha); 236 237 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 238 } 239 240 /* 241 * ql_req_pkt 242 * Function is responsible for locking ring and 243 * getting a zeroed out request packet. 244 * 245 * Input: 246 * ha: adapter state pointer. 247 * pkt: address for packet pointer. 248 * 249 * Returns: 250 * ql local function return status code. 251 * 252 * Context: 253 * Interrupt or Kernel context, no mailbox commands allowed. 254 */ 255 static int 256 ql_req_pkt(ql_adapter_state_t *vha, request_t **pktp) 257 { 258 uint16_t cnt; 259 uint32_t *long_ptr; 260 uint32_t timer; 261 int rval = QL_FUNCTION_TIMEOUT; 262 ql_adapter_state_t *ha = vha->pha; 263 264 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 265 266 /* Wait for 30 seconds for slot. */ 267 for (timer = 30000; timer != 0; timer--) { 268 /* Acquire ring lock. */ 269 REQUEST_RING_LOCK(ha); 270 271 if (ha->req_q_cnt == 0) { 272 /* Calculate number of free request entries. */ 273 cnt = RD16_IO_REG(ha, req_out); 274 if (ha->req_ring_index < cnt) { 275 ha->req_q_cnt = (uint16_t) 276 (cnt - ha->req_ring_index); 277 } else { 278 ha->req_q_cnt = (uint16_t) 279 (REQUEST_ENTRY_CNT - 280 (ha->req_ring_index - cnt)); 281 } 282 if (ha->req_q_cnt != 0) { 283 ha->req_q_cnt--; 284 } 285 } 286 287 /* Found empty request ring slot? */ 288 if (ha->req_q_cnt != 0) { 289 ha->req_q_cnt--; 290 *pktp = ha->request_ring_ptr; 291 292 /* Zero out packet. */ 293 long_ptr = (uint32_t *)ha->request_ring_ptr; 294 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE/4; cnt++) { 295 *long_ptr++ = 0; 296 } 297 298 /* Setup IOCB common data. */ 299 ha->request_ring_ptr->entry_count = 1; 300 ha->request_ring_ptr->sys_define = 301 (uint8_t)ha->req_ring_index; 302 ddi_put32(ha->hba_buf.acc_handle, 303 &ha->request_ring_ptr->handle, 304 (uint32_t)QL_FCA_BRAND); 305 306 rval = QL_SUCCESS; 307 308 break; 309 } 310 311 /* Release request queue lock. */ 312 REQUEST_RING_UNLOCK(ha); 313 314 drv_usecwait(MILLISEC); 315 316 /* Check for pending interrupts. */ 317 /* 318 * XXX protect interrupt routine from calling itself. 319 * Need to revisit this routine. So far we never 320 * hit this case as req slot was available 321 */ 322 if ((!(curthread->t_flag & T_INTR_THREAD)) && 323 (RD16_IO_REG(ha, istatus) & RISC_INT)) { 324 (void) ql_isr((caddr_t)ha); 325 INTR_LOCK(ha); 326 ha->intr_claimed = TRUE; 327 INTR_UNLOCK(ha); 328 } 329 } 330 331 if (rval != QL_SUCCESS) { 332 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0); 333 EL(ha, "failed, rval = %xh, isp_abort_needed\n", rval); 334 } else { 335 /*EMPTY*/ 336 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 337 } 338 return (rval); 339 } 340 341 /* 342 * ql_isp_cmd 343 * Function is responsible for modifying ISP input pointer. 344 * This action notifies the isp that a new request has been 345 * added to the request ring. 346 * 347 * Releases ring lock. 348 * 349 * Input: 350 * ha: adapter state pointer. 351 * 352 * Context: 353 * Interrupt or Kernel context, no mailbox commands allowed. 354 */ 355 void 356 ql_isp_cmd(ql_adapter_state_t *vha) 357 { 358 ql_adapter_state_t *ha = vha->pha; 359 360 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 361 362 QL_PRINT_5(CE_CONT, "(%d): req packet:\n", ha->instance); 363 QL_DUMP_5((uint8_t *)ha->request_ring_ptr, 8, REQUEST_ENTRY_SIZE); 364 365 /* Sync DMA buffer. */ 366 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 367 (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE + 368 REQUEST_Q_BUFFER_OFFSET), (size_t)REQUEST_ENTRY_SIZE, 369 DDI_DMA_SYNC_FORDEV); 370 371 /* Adjust ring index. */ 372 ha->req_ring_index++; 373 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 374 ha->req_ring_index = 0; 375 ha->request_ring_ptr = ha->request_ring_bp; 376 } else { 377 ha->request_ring_ptr++; 378 } 379 380 /* Set chip new ring index. */ 381 WRT16_IO_REG(ha, req_in, ha->req_ring_index); 382 383 /* Release ring lock. */ 384 REQUEST_RING_UNLOCK(ha); 385 386 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 387 } 388 389 /* 390 * ql_command_iocb 391 * Setup of command IOCB. 392 * 393 * Input: 394 * ha: adapter state pointer. 395 * sp: srb structure pointer. 396 * 397 * arg: request queue packet. 398 * 399 * Context: 400 * Interrupt or Kernel context, no mailbox commands allowed. 401 */ 402 void 403 ql_command_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg) 404 { 405 ddi_dma_cookie_t *cp; 406 uint32_t *ptr32, cnt; 407 uint16_t seg_cnt; 408 fcp_cmd_t *fcp = sp->fcp; 409 ql_tgt_t *tq = sp->lun_queue->target_queue; 410 cmd_entry_t *pkt = arg; 411 412 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 413 414 /* Set LUN number */ 415 pkt->lun_l = LSB(sp->lun_queue->lun_no); 416 pkt->lun_h = MSB(sp->lun_queue->lun_no); 417 418 /* Set target ID */ 419 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 420 pkt->target_l = LSB(tq->loop_id); 421 pkt->target_h = MSB(tq->loop_id); 422 } else { 423 pkt->target_h = LSB(tq->loop_id); 424 } 425 426 /* Set tag queue control flags */ 427 if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) { 428 pkt->control_flags_l = (uint8_t) 429 (pkt->control_flags_l | CF_HTAG); 430 } else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) { 431 pkt->control_flags_l = (uint8_t) 432 (pkt->control_flags_l | CF_OTAG); 433 /* else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) */ 434 } else { 435 pkt->control_flags_l = (uint8_t) 436 (pkt->control_flags_l | CF_STAG); 437 } 438 439 /* Set ISP command timeout. */ 440 ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout); 441 442 /* Load SCSI CDB */ 443 ddi_rep_put8(ha->hba_buf.acc_handle, fcp->fcp_cdb, 444 pkt->scsi_cdb, MAX_CMDSZ, DDI_DEV_AUTOINCR); 445 446 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 447 pkt->entry_type = IOCB_CMD_TYPE_3; 448 cnt = CMD_TYPE_3_DATA_SEGMENTS; 449 } else { 450 pkt->entry_type = IOCB_CMD_TYPE_2; 451 cnt = CMD_TYPE_2_DATA_SEGMENTS; 452 } 453 454 if (fcp->fcp_data_len == 0) { 455 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 456 ha->xioctl->IOControlRequests++; 457 return; 458 } 459 460 /* 461 * Set transfer direction. Load Data segments. 462 */ 463 if (fcp->fcp_cntl.cntl_write_data) { 464 pkt->control_flags_l = (uint8_t) 465 (pkt->control_flags_l | CF_DATA_OUT); 466 ha->xioctl->IOOutputRequests++; 467 ha->xioctl->IOOutputByteCnt += fcp->fcp_data_len; 468 } else if (fcp->fcp_cntl.cntl_read_data) { 469 pkt->control_flags_l = (uint8_t) 470 (pkt->control_flags_l | CF_DATA_IN); 471 ha->xioctl->IOInputRequests++; 472 ha->xioctl->IOInputByteCnt += fcp->fcp_data_len; 473 } 474 475 /* Set data segment count. */ 476 seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt; 477 ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt); 478 479 /* Load total byte count. */ 480 ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count, fcp->fcp_data_len); 481 482 /* Load command data segment. */ 483 ptr32 = (uint32_t *)&pkt->dseg_0_address; 484 cp = sp->pkt->pkt_data_cookie; 485 while (cnt && seg_cnt) { 486 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 487 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 488 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 489 cp->dmac_notused); 490 } 491 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 492 (uint32_t)cp->dmac_size); 493 seg_cnt--; 494 cnt--; 495 cp++; 496 } 497 498 /* 499 * Build continuation packets. 500 */ 501 if (seg_cnt) { 502 ql_continuation_iocb(ha, cp, seg_cnt, 503 (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING))); 504 } 505 506 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 507 } 508 509 /* 510 * ql_continuation_iocb 511 * Setup of continuation IOCB. 512 * 513 * Input: 514 * ha: adapter state pointer. 515 * cp: cookie list pointer. 516 * seg_cnt: number of segments. 517 * addr64: 64 bit addresses. 518 * 519 * Context: 520 * Interrupt or Kernel context, no mailbox commands allowed. 521 */ 522 static void 523 ql_continuation_iocb(ql_adapter_state_t *ha, ddi_dma_cookie_t *cp, 524 uint16_t seg_cnt, boolean_t addr64) 525 { 526 cont_entry_t *pkt; 527 uint64_t *ptr64; 528 uint32_t *ptr32, cnt; 529 530 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 531 532 /* 533 * Build continuation packets. 534 */ 535 while (seg_cnt) { 536 /* Sync DMA buffer. */ 537 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 538 (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE + 539 REQUEST_Q_BUFFER_OFFSET), REQUEST_ENTRY_SIZE, 540 DDI_DMA_SYNC_FORDEV); 541 542 /* Adjust ring pointer, and deal with wrap. */ 543 ha->req_ring_index++; 544 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 545 ha->req_ring_index = 0; 546 ha->request_ring_ptr = ha->request_ring_bp; 547 } else { 548 ha->request_ring_ptr++; 549 } 550 pkt = (cont_entry_t *)ha->request_ring_ptr; 551 552 /* Zero out packet. */ 553 ptr64 = (uint64_t *)pkt; 554 *ptr64++ = 0; *ptr64++ = 0; 555 *ptr64++ = 0; *ptr64++ = 0; 556 *ptr64++ = 0; *ptr64++ = 0; 557 *ptr64++ = 0; *ptr64 = 0; 558 559 /* 560 * Build continuation packet. 561 */ 562 pkt->entry_count = 1; 563 pkt->sys_define = (uint8_t)ha->req_ring_index; 564 if (addr64) { 565 pkt->entry_type = CONTINUATION_TYPE_1; 566 cnt = CONT_TYPE_1_DATA_SEGMENTS; 567 ptr32 = (uint32_t *) 568 &((cont_type_1_entry_t *)pkt)->dseg_0_address; 569 while (cnt && seg_cnt) { 570 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 571 cp->dmac_address); 572 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 573 cp->dmac_notused); 574 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 575 (uint32_t)cp->dmac_size); 576 seg_cnt--; 577 cnt--; 578 cp++; 579 } 580 } else { 581 pkt->entry_type = CONTINUATION_TYPE_0; 582 cnt = CONT_TYPE_0_DATA_SEGMENTS; 583 ptr32 = (uint32_t *)&pkt->dseg_0_address; 584 while (cnt && seg_cnt) { 585 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 586 cp->dmac_address); 587 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 588 (uint32_t)cp->dmac_size); 589 seg_cnt--; 590 cnt--; 591 cp++; 592 } 593 } 594 595 QL_PRINT_5(CE_CONT, "(%d): packet:\n", ha->instance); 596 QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE); 597 } 598 599 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 600 } 601 602 /* 603 * ql_command_24xx_iocb 604 * Setup of ISP24xx command IOCB. 605 * 606 * Input: 607 * ha: adapter state pointer. 608 * sp: srb structure pointer. 609 * arg: request queue packet. 610 * 611 * Context: 612 * Interrupt or Kernel context, no mailbox commands allowed. 613 */ 614 void 615 ql_command_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg) 616 { 617 ddi_dma_cookie_t *cp; 618 uint32_t *ptr32, cnt; 619 uint16_t seg_cnt; 620 fcp_cmd_t *fcp = sp->fcp; 621 ql_tgt_t *tq = sp->lun_queue->target_queue; 622 cmd_24xx_entry_t *pkt = arg; 623 ql_adapter_state_t *pha = ha->pha; 624 625 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 626 627 pkt->entry_type = IOCB_CMD_TYPE_7; 628 629 /* Set LUN number */ 630 pkt->fcp_lun[2] = LSB(sp->lun_queue->lun_no); 631 pkt->fcp_lun[3] = MSB(sp->lun_queue->lun_no); 632 633 /* Set N_port handle */ 634 ddi_put16(pha->hba_buf.acc_handle, &pkt->n_port_hdl, tq->loop_id); 635 636 /* Set target ID */ 637 pkt->target_id[0] = tq->d_id.b.al_pa; 638 pkt->target_id[1] = tq->d_id.b.area; 639 pkt->target_id[2] = tq->d_id.b.domain; 640 641 pkt->vp_index = ha->vp_index; 642 643 /* Set ISP command timeout. */ 644 if (sp->isp_timeout < 0x1999) { 645 ddi_put16(pha->hba_buf.acc_handle, &pkt->timeout, 646 sp->isp_timeout); 647 } 648 649 /* Load SCSI CDB */ 650 ddi_rep_put8(pha->hba_buf.acc_handle, fcp->fcp_cdb, pkt->scsi_cdb, 651 MAX_CMDSZ, DDI_DEV_AUTOINCR); 652 for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) { 653 ql_chg_endian((uint8_t *)&pkt->scsi_cdb + cnt, 4); 654 } 655 656 /* 657 * Set tag queue control flags 658 * Note: 659 * Cannot copy fcp->fcp_cntl.cntl_qtype directly, 660 * problem with x86 in 32bit kernel mode 661 */ 662 switch (fcp->fcp_cntl.cntl_qtype) { 663 case FCP_QTYPE_SIMPLE: 664 pkt->task = TA_STAG; 665 break; 666 case FCP_QTYPE_HEAD_OF_Q: 667 pkt->task = TA_HTAG; 668 break; 669 case FCP_QTYPE_ORDERED: 670 pkt->task = TA_OTAG; 671 break; 672 case FCP_QTYPE_ACA_Q_TAG: 673 pkt->task = TA_ACA; 674 break; 675 case FCP_QTYPE_UNTAGGED: 676 pkt->task = TA_UNTAGGED; 677 break; 678 default: 679 break; 680 } 681 682 if (fcp->fcp_data_len == 0) { 683 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 684 pha->xioctl->IOControlRequests++; 685 return; 686 } 687 688 /* Set transfer direction. */ 689 if (fcp->fcp_cntl.cntl_write_data) { 690 pkt->control_flags = CF_WR; 691 pha->xioctl->IOOutputRequests++; 692 pha->xioctl->IOOutputByteCnt += fcp->fcp_data_len; 693 } else if (fcp->fcp_cntl.cntl_read_data) { 694 pkt->control_flags = CF_RD; 695 pha->xioctl->IOInputRequests++; 696 pha->xioctl->IOInputByteCnt += fcp->fcp_data_len; 697 } 698 699 /* Set data segment count. */ 700 seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt; 701 ddi_put16(pha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt); 702 703 /* Load total byte count. */ 704 ddi_put32(pha->hba_buf.acc_handle, &pkt->total_byte_count, 705 fcp->fcp_data_len); 706 707 /* Load command data segment. */ 708 ptr32 = (uint32_t *)&pkt->dseg_0_address; 709 cp = sp->pkt->pkt_data_cookie; 710 ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 711 ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused); 712 ddi_put32(pha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size); 713 seg_cnt--; 714 cp++; 715 716 /* 717 * Build continuation packets. 718 */ 719 if (seg_cnt) { 720 ql_continuation_iocb(pha, cp, seg_cnt, B_TRUE); 721 } 722 723 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 724 } 725 726 /* 727 * ql_marker 728 * Function issues marker IOCB. 729 * 730 * Input: 731 * ha: adapter state pointer. 732 * loop_id: device loop ID 733 * lun: device LUN 734 * type: marker modifier 735 * 736 * Returns: 737 * ql local function return status code. 738 * 739 * Context: 740 * Interrupt or Kernel context, no mailbox commands allowed. 741 */ 742 int 743 ql_marker(ql_adapter_state_t *ha, uint16_t loop_id, uint16_t lun, 744 uint8_t type) 745 { 746 mrk_entry_t *pkt; 747 int rval; 748 749 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 750 751 rval = ql_req_pkt(ha, (request_t **)&pkt); 752 if (rval == QL_SUCCESS) { 753 pkt->entry_type = MARKER_TYPE; 754 755 if (CFG_IST(ha, CFG_CTRL_242581)) { 756 marker_24xx_entry_t *pkt24 = 757 (marker_24xx_entry_t *)pkt; 758 759 pkt24->modifier = type; 760 761 /* Set LUN number */ 762 pkt24->fcp_lun[2] = LSB(lun); 763 pkt24->fcp_lun[3] = MSB(lun); 764 765 pkt24->vp_index = ha->vp_index; 766 767 /* Set N_port handle */ 768 ddi_put16(ha->pha->hba_buf.acc_handle, 769 &pkt24->n_port_hdl, loop_id); 770 771 } else { 772 pkt->modifier = type; 773 774 pkt->lun_l = LSB(lun); 775 pkt->lun_h = MSB(lun); 776 777 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 778 pkt->target_l = LSB(loop_id); 779 pkt->target_h = MSB(loop_id); 780 } else { 781 pkt->target_h = LSB(loop_id); 782 } 783 } 784 785 /* Issue command to ISP */ 786 ql_isp_cmd(ha); 787 } 788 789 if (rval != QL_SUCCESS) { 790 EL(ha, "failed, rval = %xh\n", rval); 791 } else { 792 /*EMPTY*/ 793 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 794 } 795 return (rval); 796 } 797 798 /* 799 * ql_ms_iocb 800 * Setup of name/management server IOCB. 801 * 802 * Input: 803 * ha = adapter state pointer. 804 * sp = srb structure pointer. 805 * arg = request queue packet. 806 * 807 * Context: 808 * Interrupt or Kernel context, no mailbox commands allowed. 809 */ 810 void 811 ql_ms_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg) 812 { 813 ddi_dma_cookie_t *cp; 814 uint32_t *ptr32; 815 uint16_t seg_cnt; 816 ql_tgt_t *tq = sp->lun_queue->target_queue; 817 ms_entry_t *pkt = arg; 818 819 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 820 QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen); 821 /* 822 * Build command packet. 823 */ 824 pkt->entry_type = MS_TYPE; 825 826 /* Set loop ID */ 827 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 828 pkt->loop_id_l = LSB(tq->loop_id); 829 pkt->loop_id_h = MSB(tq->loop_id); 830 } else { 831 pkt->loop_id_h = LSB(tq->loop_id); 832 } 833 834 /* Set ISP command timeout. */ 835 ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout); 836 837 /* Set cmd data segment count. */ 838 pkt->cmd_dseg_count_l = 1; 839 840 /* Set total data segment count */ 841 seg_cnt = (uint16_t)(sp->pkt->pkt_resp_cookie_cnt + 1); 842 ddi_put16(ha->hba_buf.acc_handle, &pkt->total_dseg_count, seg_cnt); 843 844 /* Load ct cmd byte count. */ 845 ddi_put32(ha->hba_buf.acc_handle, &pkt->cmd_byte_count, 846 (uint32_t)sp->pkt->pkt_cmdlen); 847 848 /* Load ct rsp byte count. */ 849 ddi_put32(ha->hba_buf.acc_handle, &pkt->resp_byte_count, 850 (uint32_t)sp->pkt->pkt_rsplen); 851 852 /* Load MS command data segments. */ 853 ptr32 = (uint32_t *)&pkt->dseg_0_address; 854 cp = sp->pkt->pkt_cmd_cookie; 855 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 856 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused); 857 ddi_put32(ha->hba_buf.acc_handle, ptr32++, (uint32_t)cp->dmac_size); 858 seg_cnt--; 859 860 /* Load MS response entry data segments. */ 861 cp = sp->pkt->pkt_resp_cookie; 862 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 863 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused); 864 ddi_put32(ha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size); 865 seg_cnt--; 866 cp++; 867 868 /* 869 * Build continuation packets. 870 */ 871 if (seg_cnt) { 872 ql_continuation_iocb(ha, cp, seg_cnt, B_TRUE); 873 } 874 875 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 876 } 877 878 /* 879 * ql_ms_24xx_iocb 880 * Setup of name/management server IOCB. 881 * 882 * Input: 883 * ha: adapter state pointer. 884 * sp: srb structure pointer. 885 * arg: request queue packet. 886 * 887 * Context: 888 * Interrupt or Kernel context, no mailbox commands allowed. 889 */ 890 void 891 ql_ms_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg) 892 { 893 ddi_dma_cookie_t *cp; 894 uint32_t *ptr32; 895 uint16_t seg_cnt; 896 ql_tgt_t *tq = sp->lun_queue->target_queue; 897 ct_passthru_entry_t *pkt = arg; 898 ql_adapter_state_t *pha = ha->pha; 899 900 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 901 QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen); 902 /* 903 * Build command packet. 904 */ 905 pkt->entry_type = CT_PASSTHRU_TYPE; 906 907 /* Set loop ID */ 908 ddi_put16(pha->hba_buf.acc_handle, &pkt->n_port_hdl, tq->loop_id); 909 910 pkt->vp_index = ha->vp_index; 911 912 /* Set ISP command timeout. */ 913 if (sp->isp_timeout < 0x1999) { 914 ddi_put16(pha->hba_buf.acc_handle, &pkt->timeout, 915 sp->isp_timeout); 916 } 917 918 /* Set cmd/response data segment counts. */ 919 ddi_put16(pha->hba_buf.acc_handle, &pkt->cmd_dseg_count, 1); 920 seg_cnt = (uint16_t)sp->pkt->pkt_resp_cookie_cnt; 921 ddi_put16(pha->hba_buf.acc_handle, &pkt->resp_dseg_count, seg_cnt); 922 923 /* Load ct cmd byte count. */ 924 ddi_put32(pha->hba_buf.acc_handle, &pkt->cmd_byte_count, 925 (uint32_t)sp->pkt->pkt_cmdlen); 926 927 /* Load ct rsp byte count. */ 928 ddi_put32(pha->hba_buf.acc_handle, &pkt->resp_byte_count, 929 (uint32_t)sp->pkt->pkt_rsplen); 930 931 /* Load MS command entry data segments. */ 932 ptr32 = (uint32_t *)&pkt->dseg_0_address; 933 cp = sp->pkt->pkt_cmd_cookie; 934 ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 935 ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused); 936 ddi_put32(pha->hba_buf.acc_handle, ptr32++, (uint32_t)cp->dmac_size); 937 938 /* Load MS response entry data segments. */ 939 cp = sp->pkt->pkt_resp_cookie; 940 ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 941 ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused); 942 ddi_put32(pha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size); 943 seg_cnt--; 944 cp++; 945 946 /* 947 * Build continuation packets. 948 */ 949 if (seg_cnt) { 950 ql_continuation_iocb(pha, cp, seg_cnt, B_TRUE); 951 } 952 953 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 954 } 955 956 /* 957 * ql_ip_iocb 958 * Setup of IP IOCB. 959 * 960 * Input: 961 * ha: adapter state pointer. 962 * sp: srb structure pointer. 963 * arg: request queue packet. 964 * 965 * Context: 966 * Interrupt or Kernel context, no mailbox commands allowed. 967 */ 968 void 969 ql_ip_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg) 970 { 971 ddi_dma_cookie_t *cp; 972 uint32_t *ptr32, cnt; 973 uint16_t seg_cnt; 974 ql_tgt_t *tq = sp->lun_queue->target_queue; 975 ip_entry_t *pkt = arg; 976 977 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 978 979 /* Set loop ID */ 980 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 981 pkt->loop_id_l = LSB(tq->loop_id); 982 pkt->loop_id_h = MSB(tq->loop_id); 983 } else { 984 pkt->loop_id_h = LSB(tq->loop_id); 985 } 986 987 /* Set control flags */ 988 pkt->control_flags_l = BIT_6; 989 if (sp->pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) { 990 pkt->control_flags_h = BIT_7; 991 } 992 993 /* Set ISP command timeout. */ 994 ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout); 995 996 /* Set data segment count. */ 997 seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt; 998 /* Load total byte count. */ 999 ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count, 1000 (uint32_t)sp->pkt->pkt_cmdlen); 1001 ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt); 1002 1003 /* 1004 * Build command packet. 1005 */ 1006 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 1007 pkt->entry_type = IP_A64_TYPE; 1008 cnt = IP_A64_DATA_SEGMENTS; 1009 } else { 1010 pkt->entry_type = IP_TYPE; 1011 cnt = IP_DATA_SEGMENTS; 1012 } 1013 1014 /* Load command entry data segments. */ 1015 ptr32 = (uint32_t *)&pkt->dseg_0_address; 1016 cp = sp->pkt->pkt_cmd_cookie; 1017 while (cnt && seg_cnt) { 1018 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 1019 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 1020 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 1021 cp->dmac_notused); 1022 } 1023 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 1024 (uint32_t)cp->dmac_size); 1025 seg_cnt--; 1026 cnt--; 1027 cp++; 1028 } 1029 1030 /* 1031 * Build continuation packets. 1032 */ 1033 if (seg_cnt) { 1034 ql_continuation_iocb(ha, cp, seg_cnt, 1035 (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING))); 1036 } 1037 1038 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1039 } 1040 1041 /* 1042 * ql_ip_24xx_iocb 1043 * Setup of IP IOCB for ISP24xx. 1044 * 1045 * Input: 1046 * ha: adapter state pointer. 1047 * sp: srb structure pointer. 1048 * arg: request queue packet. 1049 * 1050 * Context: 1051 * Interrupt or Kernel context, no mailbox commands allowed. 1052 */ 1053 void 1054 ql_ip_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg) 1055 { 1056 ddi_dma_cookie_t *cp; 1057 uint32_t *ptr32; 1058 uint16_t seg_cnt; 1059 ql_tgt_t *tq = sp->lun_queue->target_queue; 1060 ip_cmd_entry_t *pkt = arg; 1061 1062 pkt->entry_type = IP_CMD_TYPE; 1063 1064 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1065 1066 /* Set N_port handle */ 1067 ddi_put16(ha->hba_buf.acc_handle, &pkt->hdl_status, tq->loop_id); 1068 1069 /* Set ISP command timeout. */ 1070 if (sp->isp_timeout < 0x1999) { 1071 ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout_hdl, 1072 sp->isp_timeout); 1073 } 1074 1075 /* Set data segment count. */ 1076 seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt; 1077 /* Load total byte count. */ 1078 ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count, 1079 (uint32_t)sp->pkt->pkt_cmdlen); 1080 ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt); 1081 1082 /* Set control flags */ 1083 ddi_put16(ha->hba_buf.acc_handle, &pkt->control_flags, 1084 (uint16_t)(BIT_0)); 1085 1086 /* Set frame header control flags */ 1087 ddi_put16(ha->hba_buf.acc_handle, &pkt->frame_hdr_cntrl_flgs, 1088 (uint16_t)(IPCF_LAST_SEQ | IPCF_FIRST_SEQ)); 1089 1090 /* Load command data segment. */ 1091 ptr32 = (uint32_t *)&pkt->dseg_0_address; 1092 cp = sp->pkt->pkt_cmd_cookie; 1093 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 1094 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused); 1095 ddi_put32(ha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size); 1096 seg_cnt--; 1097 cp++; 1098 1099 /* 1100 * Build continuation packets. 1101 */ 1102 if (seg_cnt) { 1103 ql_continuation_iocb(ha, cp, seg_cnt, B_TRUE); 1104 } 1105 1106 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1107 } 1108 1109 /* 1110 * ql_isp_rcvbuf 1111 * Locates free buffers and places it on the receive buffer queue. 1112 * 1113 * Input: 1114 * ha = adapter state pointer. 1115 * 1116 * Context: 1117 * Interrupt or Kernel context, no mailbox commands allowed. 1118 */ 1119 void 1120 ql_isp_rcvbuf(ql_adapter_state_t *ha) 1121 { 1122 rcvbuf_t *container; 1123 uint16_t rcv_q_cnt; 1124 uint16_t index = 0; 1125 uint16_t index1 = 1; 1126 int debounce_count = QL_MAX_DEBOUNCE; 1127 ql_srb_t *sp; 1128 fc_unsol_buf_t *ubp; 1129 int ring_updated = FALSE; 1130 1131 if (CFG_IST(ha, CFG_CTRL_242581)) { 1132 ql_isp24xx_rcvbuf(ha); 1133 return; 1134 } 1135 1136 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1137 1138 /* Acquire adapter state lock. */ 1139 ADAPTER_STATE_LOCK(ha); 1140 1141 /* Calculate number of free receive buffer entries. */ 1142 index = RD16_IO_REG(ha, mailbox[8]); 1143 do { 1144 index1 = RD16_IO_REG(ha, mailbox[8]); 1145 if (index1 == index) { 1146 break; 1147 } else { 1148 index = index1; 1149 } 1150 } while (debounce_count --); 1151 1152 if (debounce_count < 0) { 1153 /* This should never happen */ 1154 EL(ha, "max mb8 debounce retries exceeded\n"); 1155 } 1156 1157 rcv_q_cnt = (uint16_t)(ha->rcvbuf_ring_index < index ? 1158 index - ha->rcvbuf_ring_index : RCVBUF_CONTAINER_CNT - 1159 (ha->rcvbuf_ring_index - index)); 1160 1161 if (rcv_q_cnt == RCVBUF_CONTAINER_CNT) { 1162 rcv_q_cnt--; 1163 } 1164 1165 /* Load all free buffers in ISP receive buffer ring. */ 1166 index = 0; 1167 while (rcv_q_cnt > (uint16_t)0 && index < QL_UB_LIMIT) { 1168 /* Locate a buffer to give. */ 1169 QL_UB_LOCK(ha); 1170 while (index < QL_UB_LIMIT) { 1171 ubp = ha->ub_array[index]; 1172 if (ubp != NULL) { 1173 sp = ubp->ub_fca_private; 1174 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) && 1175 (ha->flags & IP_INITIALIZED) && 1176 (sp->flags & SRB_UB_IN_FCA) && 1177 (!(sp->flags & (SRB_UB_IN_ISP | 1178 SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK | 1179 SRB_UB_ACQUIRED)))) { 1180 sp->flags |= SRB_UB_IN_ISP; 1181 break; 1182 } 1183 } 1184 index++; 1185 } 1186 1187 if (index < QL_UB_LIMIT) { 1188 rcv_q_cnt--; 1189 index++; 1190 container = ha->rcvbuf_ring_ptr; 1191 1192 /* 1193 * Build container. 1194 */ 1195 ddi_put32(ha->hba_buf.acc_handle, 1196 (uint32_t *)(void *)&container->bufp[0], 1197 sp->ub_buffer.cookie.dmac_address); 1198 1199 ddi_put32(ha->hba_buf.acc_handle, 1200 (uint32_t *)(void *)&container->bufp[1], 1201 sp->ub_buffer.cookie.dmac_notused); 1202 1203 ddi_put16(ha->hba_buf.acc_handle, &container->handle, 1204 LSW(sp->handle)); 1205 1206 ha->ub_outcnt++; 1207 1208 /* Adjust ring index. */ 1209 ha->rcvbuf_ring_index++; 1210 if (ha->rcvbuf_ring_index == RCVBUF_CONTAINER_CNT) { 1211 ha->rcvbuf_ring_index = 0; 1212 ha->rcvbuf_ring_ptr = ha->rcvbuf_ring_bp; 1213 } else { 1214 ha->rcvbuf_ring_ptr++; 1215 } 1216 1217 ring_updated = TRUE; 1218 } 1219 QL_UB_UNLOCK(ha); 1220 } 1221 1222 if (ring_updated) { 1223 /* Sync queue. */ 1224 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 1225 (off_t)RCVBUF_Q_BUFFER_OFFSET, (size_t)RCVBUF_QUEUE_SIZE, 1226 DDI_DMA_SYNC_FORDEV); 1227 1228 /* Set chip new ring index. */ 1229 WRT16_IO_REG(ha, mailbox[8], ha->rcvbuf_ring_index); 1230 } 1231 1232 /* Release adapter state lock. */ 1233 ADAPTER_STATE_UNLOCK(ha); 1234 1235 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1236 } 1237 1238 /* 1239 * ql_isp24xx_rcvbuf 1240 * Locates free buffers and send it to adapter. 1241 * 1242 * Input: 1243 * ha = adapter state pointer. 1244 * 1245 * Context: 1246 * Interrupt or Kernel context, no mailbox commands allowed. 1247 */ 1248 static void 1249 ql_isp24xx_rcvbuf(ql_adapter_state_t *ha) 1250 { 1251 rcvbuf_t *container; 1252 uint16_t index; 1253 ql_srb_t *sp; 1254 fc_unsol_buf_t *ubp; 1255 int rval; 1256 ip_buf_pool_entry_t *pkt = NULL; 1257 1258 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1259 1260 for (;;) { 1261 /* Locate a buffer to give. */ 1262 QL_UB_LOCK(ha); 1263 for (index = 0; index < QL_UB_LIMIT; index++) { 1264 ubp = ha->ub_array[index]; 1265 if (ubp != NULL) { 1266 sp = ubp->ub_fca_private; 1267 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) && 1268 (ha->flags & IP_INITIALIZED) && 1269 (sp->flags & SRB_UB_IN_FCA) && 1270 (!(sp->flags & (SRB_UB_IN_ISP | 1271 SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK | 1272 SRB_UB_ACQUIRED)))) { 1273 ha->ub_outcnt++; 1274 sp->flags |= SRB_UB_IN_ISP; 1275 break; 1276 } 1277 } 1278 } 1279 QL_UB_UNLOCK(ha); 1280 if (index == QL_UB_LIMIT) { 1281 break; 1282 } 1283 1284 /* Get IOCB packet for buffers. */ 1285 if (pkt == NULL) { 1286 rval = ql_req_pkt(ha, (request_t **)&pkt); 1287 if (rval != QL_SUCCESS) { 1288 EL(ha, "failed, ql_req_pkt=%x\n", rval); 1289 QL_UB_LOCK(ha); 1290 ha->ub_outcnt--; 1291 sp->flags &= ~SRB_UB_IN_ISP; 1292 QL_UB_UNLOCK(ha); 1293 break; 1294 } 1295 pkt->entry_type = IP_BUF_POOL_TYPE; 1296 container = &pkt->buffers[0]; 1297 } 1298 1299 /* 1300 * Build container. 1301 */ 1302 ddi_put32(ha->hba_buf.acc_handle, &container->bufp[0], 1303 sp->ub_buffer.cookie.dmac_address); 1304 ddi_put32(ha->hba_buf.acc_handle, &container->bufp[1], 1305 sp->ub_buffer.cookie.dmac_notused); 1306 ddi_put16(ha->hba_buf.acc_handle, &container->handle, 1307 LSW(sp->handle)); 1308 1309 pkt->buffer_count++; 1310 container++; 1311 1312 if (pkt->buffer_count == IP_POOL_BUFFERS) { 1313 ql_isp_cmd(ha); 1314 pkt = NULL; 1315 } 1316 } 1317 1318 if (pkt != NULL) { 1319 ql_isp_cmd(ha); 1320 } 1321 1322 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1323 } 1324