1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/kthread.h> 29 #include <linux/pci.h> 30 #include <linux/spinlock.h> 31 #include <linux/ctype.h> 32 #include <linux/aer.h> 33 #include <linux/slab.h> 34 #include <linux/firmware.h> 35 #include <linux/miscdevice.h> 36 37 #include <scsi/scsi.h> 38 #include <scsi/scsi_device.h> 39 #include <scsi/scsi_host.h> 40 #include <scsi/scsi_transport_fc.h> 41 42 #include "lpfc_hw4.h" 43 #include "lpfc_hw.h" 44 #include "lpfc_sli.h" 45 #include "lpfc_sli4.h" 46 #include "lpfc_nl.h" 47 #include "lpfc_disc.h" 48 #include "lpfc_scsi.h" 49 #include "lpfc.h" 50 #include "lpfc_logmsg.h" 51 #include "lpfc_crtn.h" 52 #include "lpfc_vport.h" 53 #include "lpfc_version.h" 54 55 char *_dump_buf_data; 56 unsigned long _dump_buf_data_order; 57 char *_dump_buf_dif; 58 unsigned long _dump_buf_dif_order; 59 spinlock_t _dump_buf_lock; 60 61 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 62 static int lpfc_post_rcv_buf(struct lpfc_hba *); 63 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 64 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 65 static int lpfc_setup_endian_order(struct lpfc_hba *); 66 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 67 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 68 static void lpfc_init_sgl_list(struct lpfc_hba *); 69 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 70 static void lpfc_free_active_sgl(struct lpfc_hba *); 71 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 72 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 73 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 74 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 75 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 76 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 77 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 78 79 static struct scsi_transport_template *lpfc_transport_template = NULL; 80 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 81 static DEFINE_IDR(lpfc_hba_index); 82 83 /** 84 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 85 * @phba: pointer to lpfc hba data structure. 86 * 87 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 88 * mailbox command. It retrieves the revision information from the HBA and 89 * collects the Vital Product Data (VPD) about the HBA for preparing the 90 * configuration of the HBA. 91 * 92 * Return codes: 93 * 0 - success. 94 * -ERESTART - requests the SLI layer to reset the HBA and try again. 95 * Any other value - indicates an error. 96 **/ 97 int 98 lpfc_config_port_prep(struct lpfc_hba *phba) 99 { 100 lpfc_vpd_t *vp = &phba->vpd; 101 int i = 0, rc; 102 LPFC_MBOXQ_t *pmb; 103 MAILBOX_t *mb; 104 char *lpfc_vpd_data = NULL; 105 uint16_t offset = 0; 106 static char licensed[56] = 107 "key unlock for use with gnu public licensed code only\0"; 108 static int init_key = 1; 109 110 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 111 if (!pmb) { 112 phba->link_state = LPFC_HBA_ERROR; 113 return -ENOMEM; 114 } 115 116 mb = &pmb->u.mb; 117 phba->link_state = LPFC_INIT_MBX_CMDS; 118 119 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 120 if (init_key) { 121 uint32_t *ptext = (uint32_t *) licensed; 122 123 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 124 *ptext = cpu_to_be32(*ptext); 125 init_key = 0; 126 } 127 128 lpfc_read_nv(phba, pmb); 129 memset((char*)mb->un.varRDnvp.rsvd3, 0, 130 sizeof (mb->un.varRDnvp.rsvd3)); 131 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 132 sizeof (licensed)); 133 134 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 135 136 if (rc != MBX_SUCCESS) { 137 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 138 "0324 Config Port initialization " 139 "error, mbxCmd x%x READ_NVPARM, " 140 "mbxStatus x%x\n", 141 mb->mbxCommand, mb->mbxStatus); 142 mempool_free(pmb, phba->mbox_mem_pool); 143 return -ERESTART; 144 } 145 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 146 sizeof(phba->wwnn)); 147 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 148 sizeof(phba->wwpn)); 149 } 150 151 phba->sli3_options = 0x0; 152 153 /* Setup and issue mailbox READ REV command */ 154 lpfc_read_rev(phba, pmb); 155 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 156 if (rc != MBX_SUCCESS) { 157 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 158 "0439 Adapter failed to init, mbxCmd x%x " 159 "READ_REV, mbxStatus x%x\n", 160 mb->mbxCommand, mb->mbxStatus); 161 mempool_free( pmb, phba->mbox_mem_pool); 162 return -ERESTART; 163 } 164 165 166 /* 167 * The value of rr must be 1 since the driver set the cv field to 1. 168 * This setting requires the FW to set all revision fields. 169 */ 170 if (mb->un.varRdRev.rr == 0) { 171 vp->rev.rBit = 0; 172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 173 "0440 Adapter failed to init, READ_REV has " 174 "missing revision information.\n"); 175 mempool_free(pmb, phba->mbox_mem_pool); 176 return -ERESTART; 177 } 178 179 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 180 mempool_free(pmb, phba->mbox_mem_pool); 181 return -EINVAL; 182 } 183 184 /* Save information as VPD data */ 185 vp->rev.rBit = 1; 186 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 187 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 188 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 189 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 190 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 191 vp->rev.biuRev = mb->un.varRdRev.biuRev; 192 vp->rev.smRev = mb->un.varRdRev.smRev; 193 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 194 vp->rev.endecRev = mb->un.varRdRev.endecRev; 195 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 196 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 197 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 198 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 199 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 200 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 201 202 /* If the sli feature level is less then 9, we must 203 * tear down all RPIs and VPIs on link down if NPIV 204 * is enabled. 205 */ 206 if (vp->rev.feaLevelHigh < 9) 207 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 208 209 if (lpfc_is_LC_HBA(phba->pcidev->device)) 210 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 211 sizeof (phba->RandomData)); 212 213 /* Get adapter VPD information */ 214 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 215 if (!lpfc_vpd_data) 216 goto out_free_mbox; 217 do { 218 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 219 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 220 221 if (rc != MBX_SUCCESS) { 222 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 223 "0441 VPD not present on adapter, " 224 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 225 mb->mbxCommand, mb->mbxStatus); 226 mb->un.varDmp.word_cnt = 0; 227 } 228 /* dump mem may return a zero when finished or we got a 229 * mailbox error, either way we are done. 230 */ 231 if (mb->un.varDmp.word_cnt == 0) 232 break; 233 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 234 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 235 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 236 lpfc_vpd_data + offset, 237 mb->un.varDmp.word_cnt); 238 offset += mb->un.varDmp.word_cnt; 239 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 240 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 241 242 kfree(lpfc_vpd_data); 243 out_free_mbox: 244 mempool_free(pmb, phba->mbox_mem_pool); 245 return 0; 246 } 247 248 /** 249 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 250 * @phba: pointer to lpfc hba data structure. 251 * @pmboxq: pointer to the driver internal queue element for mailbox command. 252 * 253 * This is the completion handler for driver's configuring asynchronous event 254 * mailbox command to the device. If the mailbox command returns successfully, 255 * it will set internal async event support flag to 1; otherwise, it will 256 * set internal async event support flag to 0. 257 **/ 258 static void 259 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 260 { 261 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 262 phba->temp_sensor_support = 1; 263 else 264 phba->temp_sensor_support = 0; 265 mempool_free(pmboxq, phba->mbox_mem_pool); 266 return; 267 } 268 269 /** 270 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 271 * @phba: pointer to lpfc hba data structure. 272 * @pmboxq: pointer to the driver internal queue element for mailbox command. 273 * 274 * This is the completion handler for dump mailbox command for getting 275 * wake up parameters. When this command complete, the response contain 276 * Option rom version of the HBA. This function translate the version number 277 * into a human readable string and store it in OptionROMVersion. 278 **/ 279 static void 280 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 281 { 282 struct prog_id *prg; 283 uint32_t prog_id_word; 284 char dist = ' '; 285 /* character array used for decoding dist type. */ 286 char dist_char[] = "nabx"; 287 288 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 289 mempool_free(pmboxq, phba->mbox_mem_pool); 290 return; 291 } 292 293 prg = (struct prog_id *) &prog_id_word; 294 295 /* word 7 contain option rom version */ 296 prog_id_word = pmboxq->u.mb.un.varWords[7]; 297 298 /* Decode the Option rom version word to a readable string */ 299 if (prg->dist < 4) 300 dist = dist_char[prg->dist]; 301 302 if ((prg->dist == 3) && (prg->num == 0)) 303 sprintf(phba->OptionROMVersion, "%d.%d%d", 304 prg->ver, prg->rev, prg->lev); 305 else 306 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 307 prg->ver, prg->rev, prg->lev, 308 dist, prg->num); 309 mempool_free(pmboxq, phba->mbox_mem_pool); 310 return; 311 } 312 313 /** 314 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 315 * cfg_soft_wwnn, cfg_soft_wwpn 316 * @vport: pointer to lpfc vport data structure. 317 * 318 * 319 * Return codes 320 * None. 321 **/ 322 void 323 lpfc_update_vport_wwn(struct lpfc_vport *vport) 324 { 325 /* If the soft name exists then update it using the service params */ 326 if (vport->phba->cfg_soft_wwnn) 327 u64_to_wwn(vport->phba->cfg_soft_wwnn, 328 vport->fc_sparam.nodeName.u.wwn); 329 if (vport->phba->cfg_soft_wwpn) 330 u64_to_wwn(vport->phba->cfg_soft_wwpn, 331 vport->fc_sparam.portName.u.wwn); 332 333 /* 334 * If the name is empty or there exists a soft name 335 * then copy the service params name, otherwise use the fc name 336 */ 337 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 338 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 339 sizeof(struct lpfc_name)); 340 else 341 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 342 sizeof(struct lpfc_name)); 343 344 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) 345 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 346 sizeof(struct lpfc_name)); 347 else 348 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 349 sizeof(struct lpfc_name)); 350 } 351 352 /** 353 * lpfc_config_port_post - Perform lpfc initialization after config port 354 * @phba: pointer to lpfc hba data structure. 355 * 356 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 357 * command call. It performs all internal resource and state setups on the 358 * port: post IOCB buffers, enable appropriate host interrupt attentions, 359 * ELS ring timers, etc. 360 * 361 * Return codes 362 * 0 - success. 363 * Any other value - error. 364 **/ 365 int 366 lpfc_config_port_post(struct lpfc_hba *phba) 367 { 368 struct lpfc_vport *vport = phba->pport; 369 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 370 LPFC_MBOXQ_t *pmb; 371 MAILBOX_t *mb; 372 struct lpfc_dmabuf *mp; 373 struct lpfc_sli *psli = &phba->sli; 374 uint32_t status, timeout; 375 int i, j; 376 int rc; 377 378 spin_lock_irq(&phba->hbalock); 379 /* 380 * If the Config port completed correctly the HBA is not 381 * over heated any more. 382 */ 383 if (phba->over_temp_state == HBA_OVER_TEMP) 384 phba->over_temp_state = HBA_NORMAL_TEMP; 385 spin_unlock_irq(&phba->hbalock); 386 387 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 388 if (!pmb) { 389 phba->link_state = LPFC_HBA_ERROR; 390 return -ENOMEM; 391 } 392 mb = &pmb->u.mb; 393 394 /* Get login parameters for NID. */ 395 rc = lpfc_read_sparam(phba, pmb, 0); 396 if (rc) { 397 mempool_free(pmb, phba->mbox_mem_pool); 398 return -ENOMEM; 399 } 400 401 pmb->vport = vport; 402 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 403 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 404 "0448 Adapter failed init, mbxCmd x%x " 405 "READ_SPARM mbxStatus x%x\n", 406 mb->mbxCommand, mb->mbxStatus); 407 phba->link_state = LPFC_HBA_ERROR; 408 mp = (struct lpfc_dmabuf *) pmb->context1; 409 mempool_free(pmb, phba->mbox_mem_pool); 410 lpfc_mbuf_free(phba, mp->virt, mp->phys); 411 kfree(mp); 412 return -EIO; 413 } 414 415 mp = (struct lpfc_dmabuf *) pmb->context1; 416 417 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 418 lpfc_mbuf_free(phba, mp->virt, mp->phys); 419 kfree(mp); 420 pmb->context1 = NULL; 421 lpfc_update_vport_wwn(vport); 422 423 /* Update the fc_host data structures with new wwn. */ 424 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 425 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 426 fc_host_max_npiv_vports(shost) = phba->max_vpi; 427 428 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 429 /* This should be consolidated into parse_vpd ? - mr */ 430 if (phba->SerialNumber[0] == 0) { 431 uint8_t *outptr; 432 433 outptr = &vport->fc_nodename.u.s.IEEE[0]; 434 for (i = 0; i < 12; i++) { 435 status = *outptr++; 436 j = ((status & 0xf0) >> 4); 437 if (j <= 9) 438 phba->SerialNumber[i] = 439 (char)((uint8_t) 0x30 + (uint8_t) j); 440 else 441 phba->SerialNumber[i] = 442 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 443 i++; 444 j = (status & 0xf); 445 if (j <= 9) 446 phba->SerialNumber[i] = 447 (char)((uint8_t) 0x30 + (uint8_t) j); 448 else 449 phba->SerialNumber[i] = 450 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 451 } 452 } 453 454 lpfc_read_config(phba, pmb); 455 pmb->vport = vport; 456 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 457 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 458 "0453 Adapter failed to init, mbxCmd x%x " 459 "READ_CONFIG, mbxStatus x%x\n", 460 mb->mbxCommand, mb->mbxStatus); 461 phba->link_state = LPFC_HBA_ERROR; 462 mempool_free( pmb, phba->mbox_mem_pool); 463 return -EIO; 464 } 465 466 /* Check if the port is disabled */ 467 lpfc_sli_read_link_ste(phba); 468 469 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 470 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 471 phba->cfg_hba_queue_depth = 472 (mb->un.varRdConfig.max_xri + 1) - 473 lpfc_sli4_get_els_iocb_cnt(phba); 474 475 phba->lmt = mb->un.varRdConfig.lmt; 476 477 /* Get the default values for Model Name and Description */ 478 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 479 480 phba->link_state = LPFC_LINK_DOWN; 481 482 /* Only process IOCBs on ELS ring till hba_state is READY */ 483 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr) 484 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 485 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr) 486 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 487 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr) 488 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 489 490 /* Post receive buffers for desired rings */ 491 if (phba->sli_rev != 3) 492 lpfc_post_rcv_buf(phba); 493 494 /* 495 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 496 */ 497 if (phba->intr_type == MSIX) { 498 rc = lpfc_config_msi(phba, pmb); 499 if (rc) { 500 mempool_free(pmb, phba->mbox_mem_pool); 501 return -EIO; 502 } 503 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 504 if (rc != MBX_SUCCESS) { 505 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 506 "0352 Config MSI mailbox command " 507 "failed, mbxCmd x%x, mbxStatus x%x\n", 508 pmb->u.mb.mbxCommand, 509 pmb->u.mb.mbxStatus); 510 mempool_free(pmb, phba->mbox_mem_pool); 511 return -EIO; 512 } 513 } 514 515 spin_lock_irq(&phba->hbalock); 516 /* Initialize ERATT handling flag */ 517 phba->hba_flag &= ~HBA_ERATT_HANDLED; 518 519 /* Enable appropriate host interrupts */ 520 if (lpfc_readl(phba->HCregaddr, &status)) { 521 spin_unlock_irq(&phba->hbalock); 522 return -EIO; 523 } 524 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 525 if (psli->num_rings > 0) 526 status |= HC_R0INT_ENA; 527 if (psli->num_rings > 1) 528 status |= HC_R1INT_ENA; 529 if (psli->num_rings > 2) 530 status |= HC_R2INT_ENA; 531 if (psli->num_rings > 3) 532 status |= HC_R3INT_ENA; 533 534 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 535 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 536 status &= ~(HC_R0INT_ENA); 537 538 writel(status, phba->HCregaddr); 539 readl(phba->HCregaddr); /* flush */ 540 spin_unlock_irq(&phba->hbalock); 541 542 /* Set up ring-0 (ELS) timer */ 543 timeout = phba->fc_ratov * 2; 544 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 545 /* Set up heart beat (HB) timer */ 546 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 547 phba->hb_outstanding = 0; 548 phba->last_completion_time = jiffies; 549 /* Set up error attention (ERATT) polling timer */ 550 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 551 552 if (phba->hba_flag & LINK_DISABLED) { 553 lpfc_printf_log(phba, 554 KERN_ERR, LOG_INIT, 555 "2598 Adapter Link is disabled.\n"); 556 lpfc_down_link(phba, pmb); 557 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 558 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 559 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 560 lpfc_printf_log(phba, 561 KERN_ERR, LOG_INIT, 562 "2599 Adapter failed to issue DOWN_LINK" 563 " mbox command rc 0x%x\n", rc); 564 565 mempool_free(pmb, phba->mbox_mem_pool); 566 return -EIO; 567 } 568 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 569 mempool_free(pmb, phba->mbox_mem_pool); 570 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 571 if (rc) 572 return rc; 573 } 574 /* MBOX buffer will be freed in mbox compl */ 575 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 576 if (!pmb) { 577 phba->link_state = LPFC_HBA_ERROR; 578 return -ENOMEM; 579 } 580 581 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 582 pmb->mbox_cmpl = lpfc_config_async_cmpl; 583 pmb->vport = phba->pport; 584 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 585 586 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 587 lpfc_printf_log(phba, 588 KERN_ERR, 589 LOG_INIT, 590 "0456 Adapter failed to issue " 591 "ASYNCEVT_ENABLE mbox status x%x\n", 592 rc); 593 mempool_free(pmb, phba->mbox_mem_pool); 594 } 595 596 /* Get Option rom version */ 597 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 598 if (!pmb) { 599 phba->link_state = LPFC_HBA_ERROR; 600 return -ENOMEM; 601 } 602 603 lpfc_dump_wakeup_param(phba, pmb); 604 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 605 pmb->vport = phba->pport; 606 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 607 608 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 609 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 610 "to get Option ROM version status x%x\n", rc); 611 mempool_free(pmb, phba->mbox_mem_pool); 612 } 613 614 return 0; 615 } 616 617 /** 618 * lpfc_hba_init_link - Initialize the FC link 619 * @phba: pointer to lpfc hba data structure. 620 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 621 * 622 * This routine will issue the INIT_LINK mailbox command call. 623 * It is available to other drivers through the lpfc_hba data 624 * structure for use as a delayed link up mechanism with the 625 * module parameter lpfc_suppress_link_up. 626 * 627 * Return code 628 * 0 - success 629 * Any other value - error 630 **/ 631 int 632 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 633 { 634 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 635 } 636 637 /** 638 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 639 * @phba: pointer to lpfc hba data structure. 640 * @fc_topology: desired fc topology. 641 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 642 * 643 * This routine will issue the INIT_LINK mailbox command call. 644 * It is available to other drivers through the lpfc_hba data 645 * structure for use as a delayed link up mechanism with the 646 * module parameter lpfc_suppress_link_up. 647 * 648 * Return code 649 * 0 - success 650 * Any other value - error 651 **/ 652 int 653 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 654 uint32_t flag) 655 { 656 struct lpfc_vport *vport = phba->pport; 657 LPFC_MBOXQ_t *pmb; 658 MAILBOX_t *mb; 659 int rc; 660 661 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 662 if (!pmb) { 663 phba->link_state = LPFC_HBA_ERROR; 664 return -ENOMEM; 665 } 666 mb = &pmb->u.mb; 667 pmb->vport = vport; 668 669 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 670 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 671 !(phba->lmt & LMT_1Gb)) || 672 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 673 !(phba->lmt & LMT_2Gb)) || 674 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 675 !(phba->lmt & LMT_4Gb)) || 676 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 677 !(phba->lmt & LMT_8Gb)) || 678 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 679 !(phba->lmt & LMT_10Gb)) || 680 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 681 !(phba->lmt & LMT_16Gb))) { 682 /* Reset link speed to auto */ 683 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 684 "1302 Invalid speed for this board:%d " 685 "Reset link speed to auto.\n", 686 phba->cfg_link_speed); 687 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 688 } 689 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 690 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 691 if (phba->sli_rev < LPFC_SLI_REV4) 692 lpfc_set_loopback_flag(phba); 693 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 694 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 696 "0498 Adapter failed to init, mbxCmd x%x " 697 "INIT_LINK, mbxStatus x%x\n", 698 mb->mbxCommand, mb->mbxStatus); 699 if (phba->sli_rev <= LPFC_SLI_REV3) { 700 /* Clear all interrupt enable conditions */ 701 writel(0, phba->HCregaddr); 702 readl(phba->HCregaddr); /* flush */ 703 /* Clear all pending interrupts */ 704 writel(0xffffffff, phba->HAregaddr); 705 readl(phba->HAregaddr); /* flush */ 706 } 707 phba->link_state = LPFC_HBA_ERROR; 708 if (rc != MBX_BUSY || flag == MBX_POLL) 709 mempool_free(pmb, phba->mbox_mem_pool); 710 return -EIO; 711 } 712 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 713 if (flag == MBX_POLL) 714 mempool_free(pmb, phba->mbox_mem_pool); 715 716 return 0; 717 } 718 719 /** 720 * lpfc_hba_down_link - this routine downs the FC link 721 * @phba: pointer to lpfc hba data structure. 722 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 723 * 724 * This routine will issue the DOWN_LINK mailbox command call. 725 * It is available to other drivers through the lpfc_hba data 726 * structure for use to stop the link. 727 * 728 * Return code 729 * 0 - success 730 * Any other value - error 731 **/ 732 int 733 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 734 { 735 LPFC_MBOXQ_t *pmb; 736 int rc; 737 738 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 739 if (!pmb) { 740 phba->link_state = LPFC_HBA_ERROR; 741 return -ENOMEM; 742 } 743 744 lpfc_printf_log(phba, 745 KERN_ERR, LOG_INIT, 746 "0491 Adapter Link is disabled.\n"); 747 lpfc_down_link(phba, pmb); 748 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 749 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 750 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 751 lpfc_printf_log(phba, 752 KERN_ERR, LOG_INIT, 753 "2522 Adapter failed to issue DOWN_LINK" 754 " mbox command rc 0x%x\n", rc); 755 756 mempool_free(pmb, phba->mbox_mem_pool); 757 return -EIO; 758 } 759 if (flag == MBX_POLL) 760 mempool_free(pmb, phba->mbox_mem_pool); 761 762 return 0; 763 } 764 765 /** 766 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 767 * @phba: pointer to lpfc HBA data structure. 768 * 769 * This routine will do LPFC uninitialization before the HBA is reset when 770 * bringing down the SLI Layer. 771 * 772 * Return codes 773 * 0 - success. 774 * Any other value - error. 775 **/ 776 int 777 lpfc_hba_down_prep(struct lpfc_hba *phba) 778 { 779 struct lpfc_vport **vports; 780 int i; 781 782 if (phba->sli_rev <= LPFC_SLI_REV3) { 783 /* Disable interrupts */ 784 writel(0, phba->HCregaddr); 785 readl(phba->HCregaddr); /* flush */ 786 } 787 788 if (phba->pport->load_flag & FC_UNLOADING) 789 lpfc_cleanup_discovery_resources(phba->pport); 790 else { 791 vports = lpfc_create_vport_work_array(phba); 792 if (vports != NULL) 793 for (i = 0; i <= phba->max_vports && 794 vports[i] != NULL; i++) 795 lpfc_cleanup_discovery_resources(vports[i]); 796 lpfc_destroy_vport_work_array(phba, vports); 797 } 798 return 0; 799 } 800 801 /** 802 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 803 * @phba: pointer to lpfc HBA data structure. 804 * 805 * This routine will do uninitialization after the HBA is reset when bring 806 * down the SLI Layer. 807 * 808 * Return codes 809 * 0 - success. 810 * Any other value - error. 811 **/ 812 static int 813 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 814 { 815 struct lpfc_sli *psli = &phba->sli; 816 struct lpfc_sli_ring *pring; 817 struct lpfc_dmabuf *mp, *next_mp; 818 LIST_HEAD(completions); 819 int i; 820 821 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 822 lpfc_sli_hbqbuf_free_all(phba); 823 else { 824 /* Cleanup preposted buffers on the ELS ring */ 825 pring = &psli->ring[LPFC_ELS_RING]; 826 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 827 list_del(&mp->list); 828 pring->postbufq_cnt--; 829 lpfc_mbuf_free(phba, mp->virt, mp->phys); 830 kfree(mp); 831 } 832 } 833 834 spin_lock_irq(&phba->hbalock); 835 for (i = 0; i < psli->num_rings; i++) { 836 pring = &psli->ring[i]; 837 838 /* At this point in time the HBA is either reset or DOA. Either 839 * way, nothing should be on txcmplq as it will NEVER complete. 840 */ 841 list_splice_init(&pring->txcmplq, &completions); 842 pring->txcmplq_cnt = 0; 843 spin_unlock_irq(&phba->hbalock); 844 845 /* Cancel all the IOCBs from the completions list */ 846 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 847 IOERR_SLI_ABORTED); 848 849 lpfc_sli_abort_iocb_ring(phba, pring); 850 spin_lock_irq(&phba->hbalock); 851 } 852 spin_unlock_irq(&phba->hbalock); 853 854 return 0; 855 } 856 857 /** 858 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 859 * @phba: pointer to lpfc HBA data structure. 860 * 861 * This routine will do uninitialization after the HBA is reset when bring 862 * down the SLI Layer. 863 * 864 * Return codes 865 * 0 - success. 866 * Any other value - error. 867 **/ 868 static int 869 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 870 { 871 struct lpfc_scsi_buf *psb, *psb_next; 872 LIST_HEAD(aborts); 873 int ret; 874 unsigned long iflag = 0; 875 struct lpfc_sglq *sglq_entry = NULL; 876 877 ret = lpfc_hba_down_post_s3(phba); 878 if (ret) 879 return ret; 880 /* At this point in time the HBA is either reset or DOA. Either 881 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 882 * on the lpfc_sgl_list so that it can either be freed if the 883 * driver is unloading or reposted if the driver is restarting 884 * the port. 885 */ 886 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 887 /* scsl_buf_list */ 888 /* abts_sgl_list_lock required because worker thread uses this 889 * list. 890 */ 891 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 892 list_for_each_entry(sglq_entry, 893 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 894 sglq_entry->state = SGL_FREED; 895 896 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 897 &phba->sli4_hba.lpfc_sgl_list); 898 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 899 /* abts_scsi_buf_list_lock required because worker thread uses this 900 * list. 901 */ 902 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 903 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 904 &aborts); 905 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 906 spin_unlock_irq(&phba->hbalock); 907 908 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 909 psb->pCmd = NULL; 910 psb->status = IOSTAT_SUCCESS; 911 } 912 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 913 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 914 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 915 return 0; 916 } 917 918 /** 919 * lpfc_hba_down_post - Wrapper func for hba down post routine 920 * @phba: pointer to lpfc HBA data structure. 921 * 922 * This routine wraps the actual SLI3 or SLI4 routine for performing 923 * uninitialization after the HBA is reset when bring down the SLI Layer. 924 * 925 * Return codes 926 * 0 - success. 927 * Any other value - error. 928 **/ 929 int 930 lpfc_hba_down_post(struct lpfc_hba *phba) 931 { 932 return (*phba->lpfc_hba_down_post)(phba); 933 } 934 935 /** 936 * lpfc_hb_timeout - The HBA-timer timeout handler 937 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 938 * 939 * This is the HBA-timer timeout handler registered to the lpfc driver. When 940 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 941 * work-port-events bitmap and the worker thread is notified. This timeout 942 * event will be used by the worker thread to invoke the actual timeout 943 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 944 * be performed in the timeout handler and the HBA timeout event bit shall 945 * be cleared by the worker thread after it has taken the event bitmap out. 946 **/ 947 static void 948 lpfc_hb_timeout(unsigned long ptr) 949 { 950 struct lpfc_hba *phba; 951 uint32_t tmo_posted; 952 unsigned long iflag; 953 954 phba = (struct lpfc_hba *)ptr; 955 956 /* Check for heart beat timeout conditions */ 957 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 958 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 959 if (!tmo_posted) 960 phba->pport->work_port_events |= WORKER_HB_TMO; 961 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 962 963 /* Tell the worker thread there is work to do */ 964 if (!tmo_posted) 965 lpfc_worker_wake_up(phba); 966 return; 967 } 968 969 /** 970 * lpfc_rrq_timeout - The RRQ-timer timeout handler 971 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 972 * 973 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 974 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 975 * work-port-events bitmap and the worker thread is notified. This timeout 976 * event will be used by the worker thread to invoke the actual timeout 977 * handler routine, lpfc_rrq_handler. Any periodical operations will 978 * be performed in the timeout handler and the RRQ timeout event bit shall 979 * be cleared by the worker thread after it has taken the event bitmap out. 980 **/ 981 static void 982 lpfc_rrq_timeout(unsigned long ptr) 983 { 984 struct lpfc_hba *phba; 985 unsigned long iflag; 986 987 phba = (struct lpfc_hba *)ptr; 988 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 989 phba->hba_flag |= HBA_RRQ_ACTIVE; 990 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 991 lpfc_worker_wake_up(phba); 992 } 993 994 /** 995 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 996 * @phba: pointer to lpfc hba data structure. 997 * @pmboxq: pointer to the driver internal queue element for mailbox command. 998 * 999 * This is the callback function to the lpfc heart-beat mailbox command. 1000 * If configured, the lpfc driver issues the heart-beat mailbox command to 1001 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1002 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1003 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1004 * heart-beat outstanding state. Once the mailbox command comes back and 1005 * no error conditions detected, the heart-beat mailbox command timer is 1006 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1007 * state is cleared for the next heart-beat. If the timer expired with the 1008 * heart-beat outstanding state set, the driver will put the HBA offline. 1009 **/ 1010 static void 1011 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1012 { 1013 unsigned long drvr_flag; 1014 1015 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1016 phba->hb_outstanding = 0; 1017 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1018 1019 /* Check and reset heart-beat timer is necessary */ 1020 mempool_free(pmboxq, phba->mbox_mem_pool); 1021 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1022 !(phba->link_state == LPFC_HBA_ERROR) && 1023 !(phba->pport->load_flag & FC_UNLOADING)) 1024 mod_timer(&phba->hb_tmofunc, 1025 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1026 return; 1027 } 1028 1029 /** 1030 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1031 * @phba: pointer to lpfc hba data structure. 1032 * 1033 * This is the actual HBA-timer timeout handler to be invoked by the worker 1034 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1035 * handler performs any periodic operations needed for the device. If such 1036 * periodic event has already been attended to either in the interrupt handler 1037 * or by processing slow-ring or fast-ring events within the HBA-timer 1038 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1039 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1040 * is configured and there is no heart-beat mailbox command outstanding, a 1041 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1042 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1043 * to offline. 1044 **/ 1045 void 1046 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1047 { 1048 struct lpfc_vport **vports; 1049 LPFC_MBOXQ_t *pmboxq; 1050 struct lpfc_dmabuf *buf_ptr; 1051 int retval, i; 1052 struct lpfc_sli *psli = &phba->sli; 1053 LIST_HEAD(completions); 1054 1055 vports = lpfc_create_vport_work_array(phba); 1056 if (vports != NULL) 1057 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1058 lpfc_rcv_seq_check_edtov(vports[i]); 1059 lpfc_destroy_vport_work_array(phba, vports); 1060 1061 if ((phba->link_state == LPFC_HBA_ERROR) || 1062 (phba->pport->load_flag & FC_UNLOADING) || 1063 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1064 return; 1065 1066 spin_lock_irq(&phba->pport->work_port_lock); 1067 1068 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 1069 jiffies)) { 1070 spin_unlock_irq(&phba->pport->work_port_lock); 1071 if (!phba->hb_outstanding) 1072 mod_timer(&phba->hb_tmofunc, 1073 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1074 else 1075 mod_timer(&phba->hb_tmofunc, 1076 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1077 return; 1078 } 1079 spin_unlock_irq(&phba->pport->work_port_lock); 1080 1081 if (phba->elsbuf_cnt && 1082 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1083 spin_lock_irq(&phba->hbalock); 1084 list_splice_init(&phba->elsbuf, &completions); 1085 phba->elsbuf_cnt = 0; 1086 phba->elsbuf_prev_cnt = 0; 1087 spin_unlock_irq(&phba->hbalock); 1088 1089 while (!list_empty(&completions)) { 1090 list_remove_head(&completions, buf_ptr, 1091 struct lpfc_dmabuf, list); 1092 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1093 kfree(buf_ptr); 1094 } 1095 } 1096 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1097 1098 /* If there is no heart beat outstanding, issue a heartbeat command */ 1099 if (phba->cfg_enable_hba_heartbeat) { 1100 if (!phba->hb_outstanding) { 1101 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1102 (list_empty(&psli->mboxq))) { 1103 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1104 GFP_KERNEL); 1105 if (!pmboxq) { 1106 mod_timer(&phba->hb_tmofunc, 1107 jiffies + 1108 HZ * LPFC_HB_MBOX_INTERVAL); 1109 return; 1110 } 1111 1112 lpfc_heart_beat(phba, pmboxq); 1113 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1114 pmboxq->vport = phba->pport; 1115 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1116 MBX_NOWAIT); 1117 1118 if (retval != MBX_BUSY && 1119 retval != MBX_SUCCESS) { 1120 mempool_free(pmboxq, 1121 phba->mbox_mem_pool); 1122 mod_timer(&phba->hb_tmofunc, 1123 jiffies + 1124 HZ * LPFC_HB_MBOX_INTERVAL); 1125 return; 1126 } 1127 phba->skipped_hb = 0; 1128 phba->hb_outstanding = 1; 1129 } else if (time_before_eq(phba->last_completion_time, 1130 phba->skipped_hb)) { 1131 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1132 "2857 Last completion time not " 1133 " updated in %d ms\n", 1134 jiffies_to_msecs(jiffies 1135 - phba->last_completion_time)); 1136 } else 1137 phba->skipped_hb = jiffies; 1138 1139 mod_timer(&phba->hb_tmofunc, 1140 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1141 return; 1142 } else { 1143 /* 1144 * If heart beat timeout called with hb_outstanding set 1145 * we need to give the hb mailbox cmd a chance to 1146 * complete or TMO. 1147 */ 1148 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1149 "0459 Adapter heartbeat still out" 1150 "standing:last compl time was %d ms.\n", 1151 jiffies_to_msecs(jiffies 1152 - phba->last_completion_time)); 1153 mod_timer(&phba->hb_tmofunc, 1154 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1155 } 1156 } 1157 } 1158 1159 /** 1160 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1161 * @phba: pointer to lpfc hba data structure. 1162 * 1163 * This routine is called to bring the HBA offline when HBA hardware error 1164 * other than Port Error 6 has been detected. 1165 **/ 1166 static void 1167 lpfc_offline_eratt(struct lpfc_hba *phba) 1168 { 1169 struct lpfc_sli *psli = &phba->sli; 1170 1171 spin_lock_irq(&phba->hbalock); 1172 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1173 spin_unlock_irq(&phba->hbalock); 1174 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1175 1176 lpfc_offline(phba); 1177 lpfc_reset_barrier(phba); 1178 spin_lock_irq(&phba->hbalock); 1179 lpfc_sli_brdreset(phba); 1180 spin_unlock_irq(&phba->hbalock); 1181 lpfc_hba_down_post(phba); 1182 lpfc_sli_brdready(phba, HS_MBRDY); 1183 lpfc_unblock_mgmt_io(phba); 1184 phba->link_state = LPFC_HBA_ERROR; 1185 return; 1186 } 1187 1188 /** 1189 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1190 * @phba: pointer to lpfc hba data structure. 1191 * 1192 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1193 * other than Port Error 6 has been detected. 1194 **/ 1195 static void 1196 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1197 { 1198 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1199 lpfc_offline(phba); 1200 lpfc_sli4_brdreset(phba); 1201 lpfc_hba_down_post(phba); 1202 lpfc_sli4_post_status_check(phba); 1203 lpfc_unblock_mgmt_io(phba); 1204 phba->link_state = LPFC_HBA_ERROR; 1205 } 1206 1207 /** 1208 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1209 * @phba: pointer to lpfc hba data structure. 1210 * 1211 * This routine is invoked to handle the deferred HBA hardware error 1212 * conditions. This type of error is indicated by HBA by setting ER1 1213 * and another ER bit in the host status register. The driver will 1214 * wait until the ER1 bit clears before handling the error condition. 1215 **/ 1216 static void 1217 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1218 { 1219 uint32_t old_host_status = phba->work_hs; 1220 struct lpfc_sli_ring *pring; 1221 struct lpfc_sli *psli = &phba->sli; 1222 1223 /* If the pci channel is offline, ignore possible errors, 1224 * since we cannot communicate with the pci card anyway. 1225 */ 1226 if (pci_channel_offline(phba->pcidev)) { 1227 spin_lock_irq(&phba->hbalock); 1228 phba->hba_flag &= ~DEFER_ERATT; 1229 spin_unlock_irq(&phba->hbalock); 1230 return; 1231 } 1232 1233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1234 "0479 Deferred Adapter Hardware Error " 1235 "Data: x%x x%x x%x\n", 1236 phba->work_hs, 1237 phba->work_status[0], phba->work_status[1]); 1238 1239 spin_lock_irq(&phba->hbalock); 1240 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1241 spin_unlock_irq(&phba->hbalock); 1242 1243 1244 /* 1245 * Firmware stops when it triggred erratt. That could cause the I/Os 1246 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1247 * SCSI layer retry it after re-establishing link. 1248 */ 1249 pring = &psli->ring[psli->fcp_ring]; 1250 lpfc_sli_abort_iocb_ring(phba, pring); 1251 1252 /* 1253 * There was a firmware error. Take the hba offline and then 1254 * attempt to restart it. 1255 */ 1256 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1257 lpfc_offline(phba); 1258 1259 /* Wait for the ER1 bit to clear.*/ 1260 while (phba->work_hs & HS_FFER1) { 1261 msleep(100); 1262 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1263 phba->work_hs = UNPLUG_ERR ; 1264 break; 1265 } 1266 /* If driver is unloading let the worker thread continue */ 1267 if (phba->pport->load_flag & FC_UNLOADING) { 1268 phba->work_hs = 0; 1269 break; 1270 } 1271 } 1272 1273 /* 1274 * This is to ptrotect against a race condition in which 1275 * first write to the host attention register clear the 1276 * host status register. 1277 */ 1278 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1279 phba->work_hs = old_host_status & ~HS_FFER1; 1280 1281 spin_lock_irq(&phba->hbalock); 1282 phba->hba_flag &= ~DEFER_ERATT; 1283 spin_unlock_irq(&phba->hbalock); 1284 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1285 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1286 } 1287 1288 static void 1289 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1290 { 1291 struct lpfc_board_event_header board_event; 1292 struct Scsi_Host *shost; 1293 1294 board_event.event_type = FC_REG_BOARD_EVENT; 1295 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1296 shost = lpfc_shost_from_vport(phba->pport); 1297 fc_host_post_vendor_event(shost, fc_get_event_number(), 1298 sizeof(board_event), 1299 (char *) &board_event, 1300 LPFC_NL_VENDOR_ID); 1301 } 1302 1303 /** 1304 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1305 * @phba: pointer to lpfc hba data structure. 1306 * 1307 * This routine is invoked to handle the following HBA hardware error 1308 * conditions: 1309 * 1 - HBA error attention interrupt 1310 * 2 - DMA ring index out of range 1311 * 3 - Mailbox command came back as unknown 1312 **/ 1313 static void 1314 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1315 { 1316 struct lpfc_vport *vport = phba->pport; 1317 struct lpfc_sli *psli = &phba->sli; 1318 struct lpfc_sli_ring *pring; 1319 uint32_t event_data; 1320 unsigned long temperature; 1321 struct temp_event temp_event_data; 1322 struct Scsi_Host *shost; 1323 1324 /* If the pci channel is offline, ignore possible errors, 1325 * since we cannot communicate with the pci card anyway. 1326 */ 1327 if (pci_channel_offline(phba->pcidev)) { 1328 spin_lock_irq(&phba->hbalock); 1329 phba->hba_flag &= ~DEFER_ERATT; 1330 spin_unlock_irq(&phba->hbalock); 1331 return; 1332 } 1333 1334 /* If resets are disabled then leave the HBA alone and return */ 1335 if (!phba->cfg_enable_hba_reset) 1336 return; 1337 1338 /* Send an internal error event to mgmt application */ 1339 lpfc_board_errevt_to_mgmt(phba); 1340 1341 if (phba->hba_flag & DEFER_ERATT) 1342 lpfc_handle_deferred_eratt(phba); 1343 1344 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1345 if (phba->work_hs & HS_FFER6) 1346 /* Re-establishing Link */ 1347 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1348 "1301 Re-establishing Link " 1349 "Data: x%x x%x x%x\n", 1350 phba->work_hs, phba->work_status[0], 1351 phba->work_status[1]); 1352 if (phba->work_hs & HS_FFER8) 1353 /* Device Zeroization */ 1354 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1355 "2861 Host Authentication device " 1356 "zeroization Data:x%x x%x x%x\n", 1357 phba->work_hs, phba->work_status[0], 1358 phba->work_status[1]); 1359 1360 spin_lock_irq(&phba->hbalock); 1361 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1362 spin_unlock_irq(&phba->hbalock); 1363 1364 /* 1365 * Firmware stops when it triggled erratt with HS_FFER6. 1366 * That could cause the I/Os dropped by the firmware. 1367 * Error iocb (I/O) on txcmplq and let the SCSI layer 1368 * retry it after re-establishing link. 1369 */ 1370 pring = &psli->ring[psli->fcp_ring]; 1371 lpfc_sli_abort_iocb_ring(phba, pring); 1372 1373 /* 1374 * There was a firmware error. Take the hba offline and then 1375 * attempt to restart it. 1376 */ 1377 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1378 lpfc_offline(phba); 1379 lpfc_sli_brdrestart(phba); 1380 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1381 lpfc_unblock_mgmt_io(phba); 1382 return; 1383 } 1384 lpfc_unblock_mgmt_io(phba); 1385 } else if (phba->work_hs & HS_CRIT_TEMP) { 1386 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1387 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1388 temp_event_data.event_code = LPFC_CRIT_TEMP; 1389 temp_event_data.data = (uint32_t)temperature; 1390 1391 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1392 "0406 Adapter maximum temperature exceeded " 1393 "(%ld), taking this port offline " 1394 "Data: x%x x%x x%x\n", 1395 temperature, phba->work_hs, 1396 phba->work_status[0], phba->work_status[1]); 1397 1398 shost = lpfc_shost_from_vport(phba->pport); 1399 fc_host_post_vendor_event(shost, fc_get_event_number(), 1400 sizeof(temp_event_data), 1401 (char *) &temp_event_data, 1402 SCSI_NL_VID_TYPE_PCI 1403 | PCI_VENDOR_ID_EMULEX); 1404 1405 spin_lock_irq(&phba->hbalock); 1406 phba->over_temp_state = HBA_OVER_TEMP; 1407 spin_unlock_irq(&phba->hbalock); 1408 lpfc_offline_eratt(phba); 1409 1410 } else { 1411 /* The if clause above forces this code path when the status 1412 * failure is a value other than FFER6. Do not call the offline 1413 * twice. This is the adapter hardware error path. 1414 */ 1415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1416 "0457 Adapter Hardware Error " 1417 "Data: x%x x%x x%x\n", 1418 phba->work_hs, 1419 phba->work_status[0], phba->work_status[1]); 1420 1421 event_data = FC_REG_DUMP_EVENT; 1422 shost = lpfc_shost_from_vport(vport); 1423 fc_host_post_vendor_event(shost, fc_get_event_number(), 1424 sizeof(event_data), (char *) &event_data, 1425 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1426 1427 lpfc_offline_eratt(phba); 1428 } 1429 return; 1430 } 1431 1432 /** 1433 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1434 * @phba: pointer to lpfc hba data structure. 1435 * @mbx_action: flag for mailbox shutdown action. 1436 * 1437 * This routine is invoked to perform an SLI4 port PCI function reset in 1438 * response to port status register polling attention. It waits for port 1439 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1440 * During this process, interrupt vectors are freed and later requested 1441 * for handling possible port resource change. 1442 **/ 1443 static int 1444 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action) 1445 { 1446 int rc; 1447 uint32_t intr_mode; 1448 1449 /* 1450 * On error status condition, driver need to wait for port 1451 * ready before performing reset. 1452 */ 1453 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1454 if (!rc) { 1455 /* need reset: attempt for port recovery */ 1456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1457 "2887 Reset Needed: Attempting Port " 1458 "Recovery...\n"); 1459 lpfc_offline_prep(phba, mbx_action); 1460 lpfc_offline(phba); 1461 /* release interrupt for possible resource change */ 1462 lpfc_sli4_disable_intr(phba); 1463 lpfc_sli_brdrestart(phba); 1464 /* request and enable interrupt */ 1465 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1466 if (intr_mode == LPFC_INTR_ERROR) { 1467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1468 "3175 Failed to enable interrupt\n"); 1469 return -EIO; 1470 } else { 1471 phba->intr_mode = intr_mode; 1472 } 1473 rc = lpfc_online(phba); 1474 if (rc == 0) 1475 lpfc_unblock_mgmt_io(phba); 1476 } 1477 return rc; 1478 } 1479 1480 /** 1481 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1482 * @phba: pointer to lpfc hba data structure. 1483 * 1484 * This routine is invoked to handle the SLI4 HBA hardware error attention 1485 * conditions. 1486 **/ 1487 static void 1488 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1489 { 1490 struct lpfc_vport *vport = phba->pport; 1491 uint32_t event_data; 1492 struct Scsi_Host *shost; 1493 uint32_t if_type; 1494 struct lpfc_register portstat_reg = {0}; 1495 uint32_t reg_err1, reg_err2; 1496 uint32_t uerrlo_reg, uemasklo_reg; 1497 uint32_t pci_rd_rc1, pci_rd_rc2; 1498 int rc; 1499 1500 /* If the pci channel is offline, ignore possible errors, since 1501 * we cannot communicate with the pci card anyway. 1502 */ 1503 if (pci_channel_offline(phba->pcidev)) 1504 return; 1505 /* If resets are disabled then leave the HBA alone and return */ 1506 if (!phba->cfg_enable_hba_reset) 1507 return; 1508 1509 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1510 switch (if_type) { 1511 case LPFC_SLI_INTF_IF_TYPE_0: 1512 pci_rd_rc1 = lpfc_readl( 1513 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1514 &uerrlo_reg); 1515 pci_rd_rc2 = lpfc_readl( 1516 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1517 &uemasklo_reg); 1518 /* consider PCI bus read error as pci_channel_offline */ 1519 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1520 return; 1521 lpfc_sli4_offline_eratt(phba); 1522 break; 1523 case LPFC_SLI_INTF_IF_TYPE_2: 1524 pci_rd_rc1 = lpfc_readl( 1525 phba->sli4_hba.u.if_type2.STATUSregaddr, 1526 &portstat_reg.word0); 1527 /* consider PCI bus read error as pci_channel_offline */ 1528 if (pci_rd_rc1 == -EIO) { 1529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1530 "3151 PCI bus read access failure: x%x\n", 1531 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1532 return; 1533 } 1534 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1535 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1536 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1537 /* TODO: Register for Overtemp async events. */ 1538 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1539 "2889 Port Overtemperature event, " 1540 "taking port offline\n"); 1541 spin_lock_irq(&phba->hbalock); 1542 phba->over_temp_state = HBA_OVER_TEMP; 1543 spin_unlock_irq(&phba->hbalock); 1544 lpfc_sli4_offline_eratt(phba); 1545 break; 1546 } 1547 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1548 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) 1549 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1550 "3143 Port Down: Firmware Restarted\n"); 1551 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1552 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1554 "3144 Port Down: Debug Dump\n"); 1555 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1556 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1558 "3145 Port Down: Provisioning\n"); 1559 1560 /* Check port status register for function reset */ 1561 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT); 1562 if (rc == 0) { 1563 /* don't report event on forced debug dump */ 1564 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1565 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1566 return; 1567 else 1568 break; 1569 } 1570 /* fall through for not able to recover */ 1571 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1572 "3152 Unrecoverable error, bring the port " 1573 "offline\n"); 1574 lpfc_sli4_offline_eratt(phba); 1575 break; 1576 case LPFC_SLI_INTF_IF_TYPE_1: 1577 default: 1578 break; 1579 } 1580 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1581 "3123 Report dump event to upper layer\n"); 1582 /* Send an internal error event to mgmt application */ 1583 lpfc_board_errevt_to_mgmt(phba); 1584 1585 event_data = FC_REG_DUMP_EVENT; 1586 shost = lpfc_shost_from_vport(vport); 1587 fc_host_post_vendor_event(shost, fc_get_event_number(), 1588 sizeof(event_data), (char *) &event_data, 1589 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1590 } 1591 1592 /** 1593 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1594 * @phba: pointer to lpfc HBA data structure. 1595 * 1596 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1597 * routine from the API jump table function pointer from the lpfc_hba struct. 1598 * 1599 * Return codes 1600 * 0 - success. 1601 * Any other value - error. 1602 **/ 1603 void 1604 lpfc_handle_eratt(struct lpfc_hba *phba) 1605 { 1606 (*phba->lpfc_handle_eratt)(phba); 1607 } 1608 1609 /** 1610 * lpfc_handle_latt - The HBA link event handler 1611 * @phba: pointer to lpfc hba data structure. 1612 * 1613 * This routine is invoked from the worker thread to handle a HBA host 1614 * attention link event. 1615 **/ 1616 void 1617 lpfc_handle_latt(struct lpfc_hba *phba) 1618 { 1619 struct lpfc_vport *vport = phba->pport; 1620 struct lpfc_sli *psli = &phba->sli; 1621 LPFC_MBOXQ_t *pmb; 1622 volatile uint32_t control; 1623 struct lpfc_dmabuf *mp; 1624 int rc = 0; 1625 1626 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1627 if (!pmb) { 1628 rc = 1; 1629 goto lpfc_handle_latt_err_exit; 1630 } 1631 1632 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1633 if (!mp) { 1634 rc = 2; 1635 goto lpfc_handle_latt_free_pmb; 1636 } 1637 1638 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1639 if (!mp->virt) { 1640 rc = 3; 1641 goto lpfc_handle_latt_free_mp; 1642 } 1643 1644 /* Cleanup any outstanding ELS commands */ 1645 lpfc_els_flush_all_cmd(phba); 1646 1647 psli->slistat.link_event++; 1648 lpfc_read_topology(phba, pmb, mp); 1649 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1650 pmb->vport = vport; 1651 /* Block ELS IOCBs until we have processed this mbox command */ 1652 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1653 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1654 if (rc == MBX_NOT_FINISHED) { 1655 rc = 4; 1656 goto lpfc_handle_latt_free_mbuf; 1657 } 1658 1659 /* Clear Link Attention in HA REG */ 1660 spin_lock_irq(&phba->hbalock); 1661 writel(HA_LATT, phba->HAregaddr); 1662 readl(phba->HAregaddr); /* flush */ 1663 spin_unlock_irq(&phba->hbalock); 1664 1665 return; 1666 1667 lpfc_handle_latt_free_mbuf: 1668 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1669 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1670 lpfc_handle_latt_free_mp: 1671 kfree(mp); 1672 lpfc_handle_latt_free_pmb: 1673 mempool_free(pmb, phba->mbox_mem_pool); 1674 lpfc_handle_latt_err_exit: 1675 /* Enable Link attention interrupts */ 1676 spin_lock_irq(&phba->hbalock); 1677 psli->sli_flag |= LPFC_PROCESS_LA; 1678 control = readl(phba->HCregaddr); 1679 control |= HC_LAINT_ENA; 1680 writel(control, phba->HCregaddr); 1681 readl(phba->HCregaddr); /* flush */ 1682 1683 /* Clear Link Attention in HA REG */ 1684 writel(HA_LATT, phba->HAregaddr); 1685 readl(phba->HAregaddr); /* flush */ 1686 spin_unlock_irq(&phba->hbalock); 1687 lpfc_linkdown(phba); 1688 phba->link_state = LPFC_HBA_ERROR; 1689 1690 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1691 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1692 1693 return; 1694 } 1695 1696 /** 1697 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1698 * @phba: pointer to lpfc hba data structure. 1699 * @vpd: pointer to the vital product data. 1700 * @len: length of the vital product data in bytes. 1701 * 1702 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1703 * an array of characters. In this routine, the ModelName, ProgramType, and 1704 * ModelDesc, etc. fields of the phba data structure will be populated. 1705 * 1706 * Return codes 1707 * 0 - pointer to the VPD passed in is NULL 1708 * 1 - success 1709 **/ 1710 int 1711 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1712 { 1713 uint8_t lenlo, lenhi; 1714 int Length; 1715 int i, j; 1716 int finished = 0; 1717 int index = 0; 1718 1719 if (!vpd) 1720 return 0; 1721 1722 /* Vital Product */ 1723 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1724 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1725 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1726 (uint32_t) vpd[3]); 1727 while (!finished && (index < (len - 4))) { 1728 switch (vpd[index]) { 1729 case 0x82: 1730 case 0x91: 1731 index += 1; 1732 lenlo = vpd[index]; 1733 index += 1; 1734 lenhi = vpd[index]; 1735 index += 1; 1736 i = ((((unsigned short)lenhi) << 8) + lenlo); 1737 index += i; 1738 break; 1739 case 0x90: 1740 index += 1; 1741 lenlo = vpd[index]; 1742 index += 1; 1743 lenhi = vpd[index]; 1744 index += 1; 1745 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1746 if (Length > len - index) 1747 Length = len - index; 1748 while (Length > 0) { 1749 /* Look for Serial Number */ 1750 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1751 index += 2; 1752 i = vpd[index]; 1753 index += 1; 1754 j = 0; 1755 Length -= (3+i); 1756 while(i--) { 1757 phba->SerialNumber[j++] = vpd[index++]; 1758 if (j == 31) 1759 break; 1760 } 1761 phba->SerialNumber[j] = 0; 1762 continue; 1763 } 1764 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1765 phba->vpd_flag |= VPD_MODEL_DESC; 1766 index += 2; 1767 i = vpd[index]; 1768 index += 1; 1769 j = 0; 1770 Length -= (3+i); 1771 while(i--) { 1772 phba->ModelDesc[j++] = vpd[index++]; 1773 if (j == 255) 1774 break; 1775 } 1776 phba->ModelDesc[j] = 0; 1777 continue; 1778 } 1779 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1780 phba->vpd_flag |= VPD_MODEL_NAME; 1781 index += 2; 1782 i = vpd[index]; 1783 index += 1; 1784 j = 0; 1785 Length -= (3+i); 1786 while(i--) { 1787 phba->ModelName[j++] = vpd[index++]; 1788 if (j == 79) 1789 break; 1790 } 1791 phba->ModelName[j] = 0; 1792 continue; 1793 } 1794 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1795 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1796 index += 2; 1797 i = vpd[index]; 1798 index += 1; 1799 j = 0; 1800 Length -= (3+i); 1801 while(i--) { 1802 phba->ProgramType[j++] = vpd[index++]; 1803 if (j == 255) 1804 break; 1805 } 1806 phba->ProgramType[j] = 0; 1807 continue; 1808 } 1809 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1810 phba->vpd_flag |= VPD_PORT; 1811 index += 2; 1812 i = vpd[index]; 1813 index += 1; 1814 j = 0; 1815 Length -= (3+i); 1816 while(i--) { 1817 if ((phba->sli_rev == LPFC_SLI_REV4) && 1818 (phba->sli4_hba.pport_name_sta == 1819 LPFC_SLI4_PPNAME_GET)) { 1820 j++; 1821 index++; 1822 } else 1823 phba->Port[j++] = vpd[index++]; 1824 if (j == 19) 1825 break; 1826 } 1827 if ((phba->sli_rev != LPFC_SLI_REV4) || 1828 (phba->sli4_hba.pport_name_sta == 1829 LPFC_SLI4_PPNAME_NON)) 1830 phba->Port[j] = 0; 1831 continue; 1832 } 1833 else { 1834 index += 2; 1835 i = vpd[index]; 1836 index += 1; 1837 index += i; 1838 Length -= (3 + i); 1839 } 1840 } 1841 finished = 0; 1842 break; 1843 case 0x78: 1844 finished = 1; 1845 break; 1846 default: 1847 index ++; 1848 break; 1849 } 1850 } 1851 1852 return(1); 1853 } 1854 1855 /** 1856 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1857 * @phba: pointer to lpfc hba data structure. 1858 * @mdp: pointer to the data structure to hold the derived model name. 1859 * @descp: pointer to the data structure to hold the derived description. 1860 * 1861 * This routine retrieves HBA's description based on its registered PCI device 1862 * ID. The @descp passed into this function points to an array of 256 chars. It 1863 * shall be returned with the model name, maximum speed, and the host bus type. 1864 * The @mdp passed into this function points to an array of 80 chars. When the 1865 * function returns, the @mdp will be filled with the model name. 1866 **/ 1867 static void 1868 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1869 { 1870 lpfc_vpd_t *vp; 1871 uint16_t dev_id = phba->pcidev->device; 1872 int max_speed; 1873 int GE = 0; 1874 int oneConnect = 0; /* default is not a oneConnect */ 1875 struct { 1876 char *name; 1877 char *bus; 1878 char *function; 1879 } m = {"<Unknown>", "", ""}; 1880 1881 if (mdp && mdp[0] != '\0' 1882 && descp && descp[0] != '\0') 1883 return; 1884 1885 if (phba->lmt & LMT_16Gb) 1886 max_speed = 16; 1887 else if (phba->lmt & LMT_10Gb) 1888 max_speed = 10; 1889 else if (phba->lmt & LMT_8Gb) 1890 max_speed = 8; 1891 else if (phba->lmt & LMT_4Gb) 1892 max_speed = 4; 1893 else if (phba->lmt & LMT_2Gb) 1894 max_speed = 2; 1895 else 1896 max_speed = 1; 1897 1898 vp = &phba->vpd; 1899 1900 switch (dev_id) { 1901 case PCI_DEVICE_ID_FIREFLY: 1902 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1903 break; 1904 case PCI_DEVICE_ID_SUPERFLY: 1905 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1906 m = (typeof(m)){"LP7000", "PCI", 1907 "Fibre Channel Adapter"}; 1908 else 1909 m = (typeof(m)){"LP7000E", "PCI", 1910 "Fibre Channel Adapter"}; 1911 break; 1912 case PCI_DEVICE_ID_DRAGONFLY: 1913 m = (typeof(m)){"LP8000", "PCI", 1914 "Fibre Channel Adapter"}; 1915 break; 1916 case PCI_DEVICE_ID_CENTAUR: 1917 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1918 m = (typeof(m)){"LP9002", "PCI", 1919 "Fibre Channel Adapter"}; 1920 else 1921 m = (typeof(m)){"LP9000", "PCI", 1922 "Fibre Channel Adapter"}; 1923 break; 1924 case PCI_DEVICE_ID_RFLY: 1925 m = (typeof(m)){"LP952", "PCI", 1926 "Fibre Channel Adapter"}; 1927 break; 1928 case PCI_DEVICE_ID_PEGASUS: 1929 m = (typeof(m)){"LP9802", "PCI-X", 1930 "Fibre Channel Adapter"}; 1931 break; 1932 case PCI_DEVICE_ID_THOR: 1933 m = (typeof(m)){"LP10000", "PCI-X", 1934 "Fibre Channel Adapter"}; 1935 break; 1936 case PCI_DEVICE_ID_VIPER: 1937 m = (typeof(m)){"LPX1000", "PCI-X", 1938 "Fibre Channel Adapter"}; 1939 break; 1940 case PCI_DEVICE_ID_PFLY: 1941 m = (typeof(m)){"LP982", "PCI-X", 1942 "Fibre Channel Adapter"}; 1943 break; 1944 case PCI_DEVICE_ID_TFLY: 1945 m = (typeof(m)){"LP1050", "PCI-X", 1946 "Fibre Channel Adapter"}; 1947 break; 1948 case PCI_DEVICE_ID_HELIOS: 1949 m = (typeof(m)){"LP11000", "PCI-X2", 1950 "Fibre Channel Adapter"}; 1951 break; 1952 case PCI_DEVICE_ID_HELIOS_SCSP: 1953 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1954 "Fibre Channel Adapter"}; 1955 break; 1956 case PCI_DEVICE_ID_HELIOS_DCSP: 1957 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1958 "Fibre Channel Adapter"}; 1959 break; 1960 case PCI_DEVICE_ID_NEPTUNE: 1961 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1962 break; 1963 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1964 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1965 break; 1966 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1967 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1968 break; 1969 case PCI_DEVICE_ID_BMID: 1970 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1971 break; 1972 case PCI_DEVICE_ID_BSMB: 1973 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1974 break; 1975 case PCI_DEVICE_ID_ZEPHYR: 1976 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1977 break; 1978 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1979 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1980 break; 1981 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1982 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1983 GE = 1; 1984 break; 1985 case PCI_DEVICE_ID_ZMID: 1986 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1987 break; 1988 case PCI_DEVICE_ID_ZSMB: 1989 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1990 break; 1991 case PCI_DEVICE_ID_LP101: 1992 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1993 break; 1994 case PCI_DEVICE_ID_LP10000S: 1995 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1996 break; 1997 case PCI_DEVICE_ID_LP11000S: 1998 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1999 break; 2000 case PCI_DEVICE_ID_LPE11000S: 2001 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 2002 break; 2003 case PCI_DEVICE_ID_SAT: 2004 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2005 break; 2006 case PCI_DEVICE_ID_SAT_MID: 2007 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2008 break; 2009 case PCI_DEVICE_ID_SAT_SMB: 2010 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2011 break; 2012 case PCI_DEVICE_ID_SAT_DCSP: 2013 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2014 break; 2015 case PCI_DEVICE_ID_SAT_SCSP: 2016 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2017 break; 2018 case PCI_DEVICE_ID_SAT_S: 2019 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2020 break; 2021 case PCI_DEVICE_ID_HORNET: 2022 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 2023 GE = 1; 2024 break; 2025 case PCI_DEVICE_ID_PROTEUS_VF: 2026 m = (typeof(m)){"LPev12000", "PCIe IOV", 2027 "Fibre Channel Adapter"}; 2028 break; 2029 case PCI_DEVICE_ID_PROTEUS_PF: 2030 m = (typeof(m)){"LPev12000", "PCIe IOV", 2031 "Fibre Channel Adapter"}; 2032 break; 2033 case PCI_DEVICE_ID_PROTEUS_S: 2034 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2035 "Fibre Channel Adapter"}; 2036 break; 2037 case PCI_DEVICE_ID_TIGERSHARK: 2038 oneConnect = 1; 2039 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2040 break; 2041 case PCI_DEVICE_ID_TOMCAT: 2042 oneConnect = 1; 2043 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2044 break; 2045 case PCI_DEVICE_ID_FALCON: 2046 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2047 "EmulexSecure Fibre"}; 2048 break; 2049 case PCI_DEVICE_ID_BALIUS: 2050 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2051 "Fibre Channel Adapter"}; 2052 break; 2053 case PCI_DEVICE_ID_LANCER_FC: 2054 case PCI_DEVICE_ID_LANCER_FC_VF: 2055 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2056 break; 2057 case PCI_DEVICE_ID_LANCER_FCOE: 2058 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2059 oneConnect = 1; 2060 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2061 break; 2062 case PCI_DEVICE_ID_SKYHAWK: 2063 case PCI_DEVICE_ID_SKYHAWK_VF: 2064 oneConnect = 1; 2065 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2066 break; 2067 default: 2068 m = (typeof(m)){"Unknown", "", ""}; 2069 break; 2070 } 2071 2072 if (mdp && mdp[0] == '\0') 2073 snprintf(mdp, 79,"%s", m.name); 2074 /* 2075 * oneConnect hba requires special processing, they are all initiators 2076 * and we put the port number on the end 2077 */ 2078 if (descp && descp[0] == '\0') { 2079 if (oneConnect) 2080 snprintf(descp, 255, 2081 "Emulex OneConnect %s, %s Initiator, Port %s", 2082 m.name, m.function, 2083 phba->Port); 2084 else 2085 snprintf(descp, 255, 2086 "Emulex %s %d%s %s %s", 2087 m.name, max_speed, (GE) ? "GE" : "Gb", 2088 m.bus, m.function); 2089 } 2090 } 2091 2092 /** 2093 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2094 * @phba: pointer to lpfc hba data structure. 2095 * @pring: pointer to a IOCB ring. 2096 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2097 * 2098 * This routine posts a given number of IOCBs with the associated DMA buffer 2099 * descriptors specified by the cnt argument to the given IOCB ring. 2100 * 2101 * Return codes 2102 * The number of IOCBs NOT able to be posted to the IOCB ring. 2103 **/ 2104 int 2105 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2106 { 2107 IOCB_t *icmd; 2108 struct lpfc_iocbq *iocb; 2109 struct lpfc_dmabuf *mp1, *mp2; 2110 2111 cnt += pring->missbufcnt; 2112 2113 /* While there are buffers to post */ 2114 while (cnt > 0) { 2115 /* Allocate buffer for command iocb */ 2116 iocb = lpfc_sli_get_iocbq(phba); 2117 if (iocb == NULL) { 2118 pring->missbufcnt = cnt; 2119 return cnt; 2120 } 2121 icmd = &iocb->iocb; 2122 2123 /* 2 buffers can be posted per command */ 2124 /* Allocate buffer to post */ 2125 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2126 if (mp1) 2127 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2128 if (!mp1 || !mp1->virt) { 2129 kfree(mp1); 2130 lpfc_sli_release_iocbq(phba, iocb); 2131 pring->missbufcnt = cnt; 2132 return cnt; 2133 } 2134 2135 INIT_LIST_HEAD(&mp1->list); 2136 /* Allocate buffer to post */ 2137 if (cnt > 1) { 2138 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2139 if (mp2) 2140 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2141 &mp2->phys); 2142 if (!mp2 || !mp2->virt) { 2143 kfree(mp2); 2144 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2145 kfree(mp1); 2146 lpfc_sli_release_iocbq(phba, iocb); 2147 pring->missbufcnt = cnt; 2148 return cnt; 2149 } 2150 2151 INIT_LIST_HEAD(&mp2->list); 2152 } else { 2153 mp2 = NULL; 2154 } 2155 2156 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2157 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2158 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2159 icmd->ulpBdeCount = 1; 2160 cnt--; 2161 if (mp2) { 2162 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2163 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2164 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2165 cnt--; 2166 icmd->ulpBdeCount = 2; 2167 } 2168 2169 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2170 icmd->ulpLe = 1; 2171 2172 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2173 IOCB_ERROR) { 2174 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2175 kfree(mp1); 2176 cnt++; 2177 if (mp2) { 2178 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2179 kfree(mp2); 2180 cnt++; 2181 } 2182 lpfc_sli_release_iocbq(phba, iocb); 2183 pring->missbufcnt = cnt; 2184 return cnt; 2185 } 2186 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2187 if (mp2) 2188 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2189 } 2190 pring->missbufcnt = 0; 2191 return 0; 2192 } 2193 2194 /** 2195 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2196 * @phba: pointer to lpfc hba data structure. 2197 * 2198 * This routine posts initial receive IOCB buffers to the ELS ring. The 2199 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2200 * set to 64 IOCBs. 2201 * 2202 * Return codes 2203 * 0 - success (currently always success) 2204 **/ 2205 static int 2206 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2207 { 2208 struct lpfc_sli *psli = &phba->sli; 2209 2210 /* Ring 0, ELS / CT buffers */ 2211 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2212 /* Ring 2 - FCP no buffers needed */ 2213 2214 return 0; 2215 } 2216 2217 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2218 2219 /** 2220 * lpfc_sha_init - Set up initial array of hash table entries 2221 * @HashResultPointer: pointer to an array as hash table. 2222 * 2223 * This routine sets up the initial values to the array of hash table entries 2224 * for the LC HBAs. 2225 **/ 2226 static void 2227 lpfc_sha_init(uint32_t * HashResultPointer) 2228 { 2229 HashResultPointer[0] = 0x67452301; 2230 HashResultPointer[1] = 0xEFCDAB89; 2231 HashResultPointer[2] = 0x98BADCFE; 2232 HashResultPointer[3] = 0x10325476; 2233 HashResultPointer[4] = 0xC3D2E1F0; 2234 } 2235 2236 /** 2237 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2238 * @HashResultPointer: pointer to an initial/result hash table. 2239 * @HashWorkingPointer: pointer to an working hash table. 2240 * 2241 * This routine iterates an initial hash table pointed by @HashResultPointer 2242 * with the values from the working hash table pointeed by @HashWorkingPointer. 2243 * The results are putting back to the initial hash table, returned through 2244 * the @HashResultPointer as the result hash table. 2245 **/ 2246 static void 2247 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2248 { 2249 int t; 2250 uint32_t TEMP; 2251 uint32_t A, B, C, D, E; 2252 t = 16; 2253 do { 2254 HashWorkingPointer[t] = 2255 S(1, 2256 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2257 8] ^ 2258 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2259 } while (++t <= 79); 2260 t = 0; 2261 A = HashResultPointer[0]; 2262 B = HashResultPointer[1]; 2263 C = HashResultPointer[2]; 2264 D = HashResultPointer[3]; 2265 E = HashResultPointer[4]; 2266 2267 do { 2268 if (t < 20) { 2269 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2270 } else if (t < 40) { 2271 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2272 } else if (t < 60) { 2273 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2274 } else { 2275 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2276 } 2277 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2278 E = D; 2279 D = C; 2280 C = S(30, B); 2281 B = A; 2282 A = TEMP; 2283 } while (++t <= 79); 2284 2285 HashResultPointer[0] += A; 2286 HashResultPointer[1] += B; 2287 HashResultPointer[2] += C; 2288 HashResultPointer[3] += D; 2289 HashResultPointer[4] += E; 2290 2291 } 2292 2293 /** 2294 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2295 * @RandomChallenge: pointer to the entry of host challenge random number array. 2296 * @HashWorking: pointer to the entry of the working hash array. 2297 * 2298 * This routine calculates the working hash array referred by @HashWorking 2299 * from the challenge random numbers associated with the host, referred by 2300 * @RandomChallenge. The result is put into the entry of the working hash 2301 * array and returned by reference through @HashWorking. 2302 **/ 2303 static void 2304 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2305 { 2306 *HashWorking = (*RandomChallenge ^ *HashWorking); 2307 } 2308 2309 /** 2310 * lpfc_hba_init - Perform special handling for LC HBA initialization 2311 * @phba: pointer to lpfc hba data structure. 2312 * @hbainit: pointer to an array of unsigned 32-bit integers. 2313 * 2314 * This routine performs the special handling for LC HBA initialization. 2315 **/ 2316 void 2317 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2318 { 2319 int t; 2320 uint32_t *HashWorking; 2321 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2322 2323 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2324 if (!HashWorking) 2325 return; 2326 2327 HashWorking[0] = HashWorking[78] = *pwwnn++; 2328 HashWorking[1] = HashWorking[79] = *pwwnn; 2329 2330 for (t = 0; t < 7; t++) 2331 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2332 2333 lpfc_sha_init(hbainit); 2334 lpfc_sha_iterate(hbainit, HashWorking); 2335 kfree(HashWorking); 2336 } 2337 2338 /** 2339 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2340 * @vport: pointer to a virtual N_Port data structure. 2341 * 2342 * This routine performs the necessary cleanups before deleting the @vport. 2343 * It invokes the discovery state machine to perform necessary state 2344 * transitions and to release the ndlps associated with the @vport. Note, 2345 * the physical port is treated as @vport 0. 2346 **/ 2347 void 2348 lpfc_cleanup(struct lpfc_vport *vport) 2349 { 2350 struct lpfc_hba *phba = vport->phba; 2351 struct lpfc_nodelist *ndlp, *next_ndlp; 2352 int i = 0; 2353 2354 if (phba->link_state > LPFC_LINK_DOWN) 2355 lpfc_port_link_failure(vport); 2356 2357 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2358 if (!NLP_CHK_NODE_ACT(ndlp)) { 2359 ndlp = lpfc_enable_node(vport, ndlp, 2360 NLP_STE_UNUSED_NODE); 2361 if (!ndlp) 2362 continue; 2363 spin_lock_irq(&phba->ndlp_lock); 2364 NLP_SET_FREE_REQ(ndlp); 2365 spin_unlock_irq(&phba->ndlp_lock); 2366 /* Trigger the release of the ndlp memory */ 2367 lpfc_nlp_put(ndlp); 2368 continue; 2369 } 2370 spin_lock_irq(&phba->ndlp_lock); 2371 if (NLP_CHK_FREE_REQ(ndlp)) { 2372 /* The ndlp should not be in memory free mode already */ 2373 spin_unlock_irq(&phba->ndlp_lock); 2374 continue; 2375 } else 2376 /* Indicate request for freeing ndlp memory */ 2377 NLP_SET_FREE_REQ(ndlp); 2378 spin_unlock_irq(&phba->ndlp_lock); 2379 2380 if (vport->port_type != LPFC_PHYSICAL_PORT && 2381 ndlp->nlp_DID == Fabric_DID) { 2382 /* Just free up ndlp with Fabric_DID for vports */ 2383 lpfc_nlp_put(ndlp); 2384 continue; 2385 } 2386 2387 /* take care of nodes in unused state before the state 2388 * machine taking action. 2389 */ 2390 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2391 lpfc_nlp_put(ndlp); 2392 continue; 2393 } 2394 2395 if (ndlp->nlp_type & NLP_FABRIC) 2396 lpfc_disc_state_machine(vport, ndlp, NULL, 2397 NLP_EVT_DEVICE_RECOVERY); 2398 2399 lpfc_disc_state_machine(vport, ndlp, NULL, 2400 NLP_EVT_DEVICE_RM); 2401 } 2402 2403 /* At this point, ALL ndlp's should be gone 2404 * because of the previous NLP_EVT_DEVICE_RM. 2405 * Lets wait for this to happen, if needed. 2406 */ 2407 while (!list_empty(&vport->fc_nodes)) { 2408 if (i++ > 3000) { 2409 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2410 "0233 Nodelist not empty\n"); 2411 list_for_each_entry_safe(ndlp, next_ndlp, 2412 &vport->fc_nodes, nlp_listp) { 2413 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2414 LOG_NODE, 2415 "0282 did:x%x ndlp:x%p " 2416 "usgmap:x%x refcnt:%d\n", 2417 ndlp->nlp_DID, (void *)ndlp, 2418 ndlp->nlp_usg_map, 2419 atomic_read( 2420 &ndlp->kref.refcount)); 2421 } 2422 break; 2423 } 2424 2425 /* Wait for any activity on ndlps to settle */ 2426 msleep(10); 2427 } 2428 lpfc_cleanup_vports_rrqs(vport, NULL); 2429 } 2430 2431 /** 2432 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2433 * @vport: pointer to a virtual N_Port data structure. 2434 * 2435 * This routine stops all the timers associated with a @vport. This function 2436 * is invoked before disabling or deleting a @vport. Note that the physical 2437 * port is treated as @vport 0. 2438 **/ 2439 void 2440 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2441 { 2442 del_timer_sync(&vport->els_tmofunc); 2443 del_timer_sync(&vport->fc_fdmitmo); 2444 del_timer_sync(&vport->delayed_disc_tmo); 2445 lpfc_can_disctmo(vport); 2446 return; 2447 } 2448 2449 /** 2450 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2451 * @phba: pointer to lpfc hba data structure. 2452 * 2453 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2454 * caller of this routine should already hold the host lock. 2455 **/ 2456 void 2457 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2458 { 2459 /* Clear pending FCF rediscovery wait flag */ 2460 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2461 2462 /* Now, try to stop the timer */ 2463 del_timer(&phba->fcf.redisc_wait); 2464 } 2465 2466 /** 2467 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2468 * @phba: pointer to lpfc hba data structure. 2469 * 2470 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2471 * checks whether the FCF rediscovery wait timer is pending with the host 2472 * lock held before proceeding with disabling the timer and clearing the 2473 * wait timer pendig flag. 2474 **/ 2475 void 2476 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2477 { 2478 spin_lock_irq(&phba->hbalock); 2479 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2480 /* FCF rediscovery timer already fired or stopped */ 2481 spin_unlock_irq(&phba->hbalock); 2482 return; 2483 } 2484 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2485 /* Clear failover in progress flags */ 2486 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2487 spin_unlock_irq(&phba->hbalock); 2488 } 2489 2490 /** 2491 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2492 * @phba: pointer to lpfc hba data structure. 2493 * 2494 * This routine stops all the timers associated with a HBA. This function is 2495 * invoked before either putting a HBA offline or unloading the driver. 2496 **/ 2497 void 2498 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2499 { 2500 lpfc_stop_vport_timers(phba->pport); 2501 del_timer_sync(&phba->sli.mbox_tmo); 2502 del_timer_sync(&phba->fabric_block_timer); 2503 del_timer_sync(&phba->eratt_poll); 2504 del_timer_sync(&phba->hb_tmofunc); 2505 if (phba->sli_rev == LPFC_SLI_REV4) { 2506 del_timer_sync(&phba->rrq_tmr); 2507 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2508 } 2509 phba->hb_outstanding = 0; 2510 2511 switch (phba->pci_dev_grp) { 2512 case LPFC_PCI_DEV_LP: 2513 /* Stop any LightPulse device specific driver timers */ 2514 del_timer_sync(&phba->fcp_poll_timer); 2515 break; 2516 case LPFC_PCI_DEV_OC: 2517 /* Stop any OneConnect device sepcific driver timers */ 2518 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2519 break; 2520 default: 2521 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2522 "0297 Invalid device group (x%x)\n", 2523 phba->pci_dev_grp); 2524 break; 2525 } 2526 return; 2527 } 2528 2529 /** 2530 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2531 * @phba: pointer to lpfc hba data structure. 2532 * 2533 * This routine marks a HBA's management interface as blocked. Once the HBA's 2534 * management interface is marked as blocked, all the user space access to 2535 * the HBA, whether they are from sysfs interface or libdfc interface will 2536 * all be blocked. The HBA is set to block the management interface when the 2537 * driver prepares the HBA interface for online or offline. 2538 **/ 2539 static void 2540 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2541 { 2542 unsigned long iflag; 2543 uint8_t actcmd = MBX_HEARTBEAT; 2544 unsigned long timeout; 2545 2546 spin_lock_irqsave(&phba->hbalock, iflag); 2547 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2548 spin_unlock_irqrestore(&phba->hbalock, iflag); 2549 if (mbx_action == LPFC_MBX_NO_WAIT) 2550 return; 2551 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2552 spin_lock_irqsave(&phba->hbalock, iflag); 2553 if (phba->sli.mbox_active) { 2554 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2555 /* Determine how long we might wait for the active mailbox 2556 * command to be gracefully completed by firmware. 2557 */ 2558 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2559 phba->sli.mbox_active) * 1000) + jiffies; 2560 } 2561 spin_unlock_irqrestore(&phba->hbalock, iflag); 2562 2563 /* Wait for the outstnading mailbox command to complete */ 2564 while (phba->sli.mbox_active) { 2565 /* Check active mailbox complete status every 2ms */ 2566 msleep(2); 2567 if (time_after(jiffies, timeout)) { 2568 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2569 "2813 Mgmt IO is Blocked %x " 2570 "- mbox cmd %x still active\n", 2571 phba->sli.sli_flag, actcmd); 2572 break; 2573 } 2574 } 2575 } 2576 2577 /** 2578 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 2579 * @phba: pointer to lpfc hba data structure. 2580 * 2581 * Allocate RPIs for all active remote nodes. This is needed whenever 2582 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 2583 * is to fixup the temporary rpi assignments. 2584 **/ 2585 void 2586 lpfc_sli4_node_prep(struct lpfc_hba *phba) 2587 { 2588 struct lpfc_nodelist *ndlp, *next_ndlp; 2589 struct lpfc_vport **vports; 2590 int i; 2591 2592 if (phba->sli_rev != LPFC_SLI_REV4) 2593 return; 2594 2595 vports = lpfc_create_vport_work_array(phba); 2596 if (vports != NULL) { 2597 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2598 if (vports[i]->load_flag & FC_UNLOADING) 2599 continue; 2600 2601 list_for_each_entry_safe(ndlp, next_ndlp, 2602 &vports[i]->fc_nodes, 2603 nlp_listp) { 2604 if (NLP_CHK_NODE_ACT(ndlp)) 2605 ndlp->nlp_rpi = 2606 lpfc_sli4_alloc_rpi(phba); 2607 } 2608 } 2609 } 2610 lpfc_destroy_vport_work_array(phba, vports); 2611 } 2612 2613 /** 2614 * lpfc_online - Initialize and bring a HBA online 2615 * @phba: pointer to lpfc hba data structure. 2616 * 2617 * This routine initializes the HBA and brings a HBA online. During this 2618 * process, the management interface is blocked to prevent user space access 2619 * to the HBA interfering with the driver initialization. 2620 * 2621 * Return codes 2622 * 0 - successful 2623 * 1 - failed 2624 **/ 2625 int 2626 lpfc_online(struct lpfc_hba *phba) 2627 { 2628 struct lpfc_vport *vport; 2629 struct lpfc_vport **vports; 2630 int i; 2631 2632 if (!phba) 2633 return 0; 2634 vport = phba->pport; 2635 2636 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2637 return 0; 2638 2639 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2640 "0458 Bring Adapter online\n"); 2641 2642 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 2643 2644 if (!lpfc_sli_queue_setup(phba)) { 2645 lpfc_unblock_mgmt_io(phba); 2646 return 1; 2647 } 2648 2649 if (phba->sli_rev == LPFC_SLI_REV4) { 2650 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2651 lpfc_unblock_mgmt_io(phba); 2652 return 1; 2653 } 2654 } else { 2655 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2656 lpfc_unblock_mgmt_io(phba); 2657 return 1; 2658 } 2659 } 2660 2661 vports = lpfc_create_vport_work_array(phba); 2662 if (vports != NULL) 2663 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2664 struct Scsi_Host *shost; 2665 shost = lpfc_shost_from_vport(vports[i]); 2666 spin_lock_irq(shost->host_lock); 2667 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2668 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2669 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2670 if (phba->sli_rev == LPFC_SLI_REV4) 2671 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2672 spin_unlock_irq(shost->host_lock); 2673 } 2674 lpfc_destroy_vport_work_array(phba, vports); 2675 2676 lpfc_unblock_mgmt_io(phba); 2677 return 0; 2678 } 2679 2680 /** 2681 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2682 * @phba: pointer to lpfc hba data structure. 2683 * 2684 * This routine marks a HBA's management interface as not blocked. Once the 2685 * HBA's management interface is marked as not blocked, all the user space 2686 * access to the HBA, whether they are from sysfs interface or libdfc 2687 * interface will be allowed. The HBA is set to block the management interface 2688 * when the driver prepares the HBA interface for online or offline and then 2689 * set to unblock the management interface afterwards. 2690 **/ 2691 void 2692 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2693 { 2694 unsigned long iflag; 2695 2696 spin_lock_irqsave(&phba->hbalock, iflag); 2697 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2698 spin_unlock_irqrestore(&phba->hbalock, iflag); 2699 } 2700 2701 /** 2702 * lpfc_offline_prep - Prepare a HBA to be brought offline 2703 * @phba: pointer to lpfc hba data structure. 2704 * 2705 * This routine is invoked to prepare a HBA to be brought offline. It performs 2706 * unregistration login to all the nodes on all vports and flushes the mailbox 2707 * queue to make it ready to be brought offline. 2708 **/ 2709 void 2710 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 2711 { 2712 struct lpfc_vport *vport = phba->pport; 2713 struct lpfc_nodelist *ndlp, *next_ndlp; 2714 struct lpfc_vport **vports; 2715 struct Scsi_Host *shost; 2716 int i; 2717 2718 if (vport->fc_flag & FC_OFFLINE_MODE) 2719 return; 2720 2721 lpfc_block_mgmt_io(phba, mbx_action); 2722 2723 lpfc_linkdown(phba); 2724 2725 /* Issue an unreg_login to all nodes on all vports */ 2726 vports = lpfc_create_vport_work_array(phba); 2727 if (vports != NULL) { 2728 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2729 if (vports[i]->load_flag & FC_UNLOADING) 2730 continue; 2731 shost = lpfc_shost_from_vport(vports[i]); 2732 spin_lock_irq(shost->host_lock); 2733 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2734 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2735 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2736 spin_unlock_irq(shost->host_lock); 2737 2738 shost = lpfc_shost_from_vport(vports[i]); 2739 list_for_each_entry_safe(ndlp, next_ndlp, 2740 &vports[i]->fc_nodes, 2741 nlp_listp) { 2742 if (!NLP_CHK_NODE_ACT(ndlp)) 2743 continue; 2744 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2745 continue; 2746 if (ndlp->nlp_type & NLP_FABRIC) { 2747 lpfc_disc_state_machine(vports[i], ndlp, 2748 NULL, NLP_EVT_DEVICE_RECOVERY); 2749 lpfc_disc_state_machine(vports[i], ndlp, 2750 NULL, NLP_EVT_DEVICE_RM); 2751 } 2752 spin_lock_irq(shost->host_lock); 2753 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2754 spin_unlock_irq(shost->host_lock); 2755 /* 2756 * Whenever an SLI4 port goes offline, free the 2757 * RPI. Get a new RPI when the adapter port 2758 * comes back online. 2759 */ 2760 if (phba->sli_rev == LPFC_SLI_REV4) 2761 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 2762 lpfc_unreg_rpi(vports[i], ndlp); 2763 } 2764 } 2765 } 2766 lpfc_destroy_vport_work_array(phba, vports); 2767 2768 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 2769 } 2770 2771 /** 2772 * lpfc_offline - Bring a HBA offline 2773 * @phba: pointer to lpfc hba data structure. 2774 * 2775 * This routine actually brings a HBA offline. It stops all the timers 2776 * associated with the HBA, brings down the SLI layer, and eventually 2777 * marks the HBA as in offline state for the upper layer protocol. 2778 **/ 2779 void 2780 lpfc_offline(struct lpfc_hba *phba) 2781 { 2782 struct Scsi_Host *shost; 2783 struct lpfc_vport **vports; 2784 int i; 2785 2786 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2787 return; 2788 2789 /* stop port and all timers associated with this hba */ 2790 lpfc_stop_port(phba); 2791 vports = lpfc_create_vport_work_array(phba); 2792 if (vports != NULL) 2793 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2794 lpfc_stop_vport_timers(vports[i]); 2795 lpfc_destroy_vport_work_array(phba, vports); 2796 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2797 "0460 Bring Adapter offline\n"); 2798 /* Bring down the SLI Layer and cleanup. The HBA is offline 2799 now. */ 2800 lpfc_sli_hba_down(phba); 2801 spin_lock_irq(&phba->hbalock); 2802 phba->work_ha = 0; 2803 spin_unlock_irq(&phba->hbalock); 2804 vports = lpfc_create_vport_work_array(phba); 2805 if (vports != NULL) 2806 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2807 shost = lpfc_shost_from_vport(vports[i]); 2808 spin_lock_irq(shost->host_lock); 2809 vports[i]->work_port_events = 0; 2810 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2811 spin_unlock_irq(shost->host_lock); 2812 } 2813 lpfc_destroy_vport_work_array(phba, vports); 2814 } 2815 2816 /** 2817 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2818 * @phba: pointer to lpfc hba data structure. 2819 * 2820 * This routine is to free all the SCSI buffers and IOCBs from the driver 2821 * list back to kernel. It is called from lpfc_pci_remove_one to free 2822 * the internal resources before the device is removed from the system. 2823 **/ 2824 static void 2825 lpfc_scsi_free(struct lpfc_hba *phba) 2826 { 2827 struct lpfc_scsi_buf *sb, *sb_next; 2828 struct lpfc_iocbq *io, *io_next; 2829 2830 spin_lock_irq(&phba->hbalock); 2831 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2832 spin_lock(&phba->scsi_buf_list_lock); 2833 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2834 list_del(&sb->list); 2835 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2836 sb->dma_handle); 2837 kfree(sb); 2838 phba->total_scsi_bufs--; 2839 } 2840 spin_unlock(&phba->scsi_buf_list_lock); 2841 2842 /* Release all the lpfc_iocbq entries maintained by this host. */ 2843 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2844 list_del(&io->list); 2845 kfree(io); 2846 phba->total_iocbq_bufs--; 2847 } 2848 2849 spin_unlock_irq(&phba->hbalock); 2850 } 2851 2852 /** 2853 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping 2854 * @phba: pointer to lpfc hba data structure. 2855 * 2856 * This routine first calculates the sizes of the current els and allocated 2857 * scsi sgl lists, and then goes through all sgls to updates the physical 2858 * XRIs assigned due to port function reset. During port initialization, the 2859 * current els and allocated scsi sgl lists are 0s. 2860 * 2861 * Return codes 2862 * 0 - successful (for now, it always returns 0) 2863 **/ 2864 int 2865 lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) 2866 { 2867 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 2868 struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL; 2869 uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt; 2870 LIST_HEAD(els_sgl_list); 2871 LIST_HEAD(scsi_sgl_list); 2872 int rc; 2873 2874 /* 2875 * update on pci function's els xri-sgl list 2876 */ 2877 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 2878 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 2879 /* els xri-sgl expanded */ 2880 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 2881 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2882 "3157 ELS xri-sgl count increased from " 2883 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 2884 els_xri_cnt); 2885 /* allocate the additional els sgls */ 2886 for (i = 0; i < xri_cnt; i++) { 2887 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 2888 GFP_KERNEL); 2889 if (sglq_entry == NULL) { 2890 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2891 "2562 Failure to allocate an " 2892 "ELS sgl entry:%d\n", i); 2893 rc = -ENOMEM; 2894 goto out_free_mem; 2895 } 2896 sglq_entry->buff_type = GEN_BUFF_TYPE; 2897 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 2898 &sglq_entry->phys); 2899 if (sglq_entry->virt == NULL) { 2900 kfree(sglq_entry); 2901 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2902 "2563 Failure to allocate an " 2903 "ELS mbuf:%d\n", i); 2904 rc = -ENOMEM; 2905 goto out_free_mem; 2906 } 2907 sglq_entry->sgl = sglq_entry->virt; 2908 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 2909 sglq_entry->state = SGL_FREED; 2910 list_add_tail(&sglq_entry->list, &els_sgl_list); 2911 } 2912 spin_lock(&phba->hbalock); 2913 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 2914 spin_unlock(&phba->hbalock); 2915 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 2916 /* els xri-sgl shrinked */ 2917 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 2918 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2919 "3158 ELS xri-sgl count decreased from " 2920 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 2921 els_xri_cnt); 2922 spin_lock_irq(&phba->hbalock); 2923 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list); 2924 spin_unlock_irq(&phba->hbalock); 2925 /* release extra els sgls from list */ 2926 for (i = 0; i < xri_cnt; i++) { 2927 list_remove_head(&els_sgl_list, 2928 sglq_entry, struct lpfc_sglq, list); 2929 if (sglq_entry) { 2930 lpfc_mbuf_free(phba, sglq_entry->virt, 2931 sglq_entry->phys); 2932 kfree(sglq_entry); 2933 } 2934 } 2935 spin_lock_irq(&phba->hbalock); 2936 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 2937 spin_unlock_irq(&phba->hbalock); 2938 } else 2939 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2940 "3163 ELS xri-sgl count unchanged: %d\n", 2941 els_xri_cnt); 2942 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 2943 2944 /* update xris to els sgls on the list */ 2945 sglq_entry = NULL; 2946 sglq_entry_next = NULL; 2947 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 2948 &phba->sli4_hba.lpfc_sgl_list, list) { 2949 lxri = lpfc_sli4_next_xritag(phba); 2950 if (lxri == NO_XRI) { 2951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2952 "2400 Failed to allocate xri for " 2953 "ELS sgl\n"); 2954 rc = -ENOMEM; 2955 goto out_free_mem; 2956 } 2957 sglq_entry->sli4_lxritag = lxri; 2958 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 2959 } 2960 2961 /* 2962 * update on pci function's allocated scsi xri-sgl list 2963 */ 2964 phba->total_scsi_bufs = 0; 2965 2966 /* maximum number of xris available for scsi buffers */ 2967 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - 2968 els_xri_cnt; 2969 2970 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2971 "2401 Current allocated SCSI xri-sgl count:%d, " 2972 "maximum SCSI xri count:%d\n", 2973 phba->sli4_hba.scsi_xri_cnt, 2974 phba->sli4_hba.scsi_xri_max); 2975 2976 spin_lock_irq(&phba->scsi_buf_list_lock); 2977 list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list); 2978 spin_unlock_irq(&phba->scsi_buf_list_lock); 2979 2980 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { 2981 /* max scsi xri shrinked below the allocated scsi buffers */ 2982 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - 2983 phba->sli4_hba.scsi_xri_max; 2984 /* release the extra allocated scsi buffers */ 2985 for (i = 0; i < scsi_xri_cnt; i++) { 2986 list_remove_head(&scsi_sgl_list, psb, 2987 struct lpfc_scsi_buf, list); 2988 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data, 2989 psb->dma_handle); 2990 kfree(psb); 2991 } 2992 spin_lock_irq(&phba->scsi_buf_list_lock); 2993 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; 2994 spin_unlock_irq(&phba->scsi_buf_list_lock); 2995 } 2996 2997 /* update xris associated to remaining allocated scsi buffers */ 2998 psb = NULL; 2999 psb_next = NULL; 3000 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { 3001 lxri = lpfc_sli4_next_xritag(phba); 3002 if (lxri == NO_XRI) { 3003 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3004 "2560 Failed to allocate xri for " 3005 "scsi buffer\n"); 3006 rc = -ENOMEM; 3007 goto out_free_mem; 3008 } 3009 psb->cur_iocbq.sli4_lxritag = lxri; 3010 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3011 } 3012 spin_lock(&phba->scsi_buf_list_lock); 3013 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list); 3014 spin_unlock(&phba->scsi_buf_list_lock); 3015 3016 return 0; 3017 3018 out_free_mem: 3019 lpfc_free_els_sgl_list(phba); 3020 lpfc_scsi_free(phba); 3021 return rc; 3022 } 3023 3024 /** 3025 * lpfc_create_port - Create an FC port 3026 * @phba: pointer to lpfc hba data structure. 3027 * @instance: a unique integer ID to this FC port. 3028 * @dev: pointer to the device data structure. 3029 * 3030 * This routine creates a FC port for the upper layer protocol. The FC port 3031 * can be created on top of either a physical port or a virtual port provided 3032 * by the HBA. This routine also allocates a SCSI host data structure (shost) 3033 * and associates the FC port created before adding the shost into the SCSI 3034 * layer. 3035 * 3036 * Return codes 3037 * @vport - pointer to the virtual N_Port data structure. 3038 * NULL - port create failed. 3039 **/ 3040 struct lpfc_vport * 3041 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 3042 { 3043 struct lpfc_vport *vport; 3044 struct Scsi_Host *shost; 3045 int error = 0; 3046 3047 if (dev != &phba->pcidev->dev) 3048 shost = scsi_host_alloc(&lpfc_vport_template, 3049 sizeof(struct lpfc_vport)); 3050 else 3051 shost = scsi_host_alloc(&lpfc_template, 3052 sizeof(struct lpfc_vport)); 3053 if (!shost) 3054 goto out; 3055 3056 vport = (struct lpfc_vport *) shost->hostdata; 3057 vport->phba = phba; 3058 vport->load_flag |= FC_LOADING; 3059 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3060 vport->fc_rscn_flush = 0; 3061 3062 lpfc_get_vport_cfgparam(vport); 3063 shost->unique_id = instance; 3064 shost->max_id = LPFC_MAX_TARGET; 3065 shost->max_lun = vport->cfg_max_luns; 3066 shost->this_id = -1; 3067 shost->max_cmd_len = 16; 3068 if (phba->sli_rev == LPFC_SLI_REV4) { 3069 shost->dma_boundary = 3070 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 3071 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 3072 } 3073 3074 /* 3075 * Set initial can_queue value since 0 is no longer supported and 3076 * scsi_add_host will fail. This will be adjusted later based on the 3077 * max xri value determined in hba setup. 3078 */ 3079 shost->can_queue = phba->cfg_hba_queue_depth - 10; 3080 if (dev != &phba->pcidev->dev) { 3081 shost->transportt = lpfc_vport_transport_template; 3082 vport->port_type = LPFC_NPIV_PORT; 3083 } else { 3084 shost->transportt = lpfc_transport_template; 3085 vport->port_type = LPFC_PHYSICAL_PORT; 3086 } 3087 3088 /* Initialize all internally managed lists. */ 3089 INIT_LIST_HEAD(&vport->fc_nodes); 3090 INIT_LIST_HEAD(&vport->rcv_buffer_list); 3091 spin_lock_init(&vport->work_port_lock); 3092 3093 init_timer(&vport->fc_disctmo); 3094 vport->fc_disctmo.function = lpfc_disc_timeout; 3095 vport->fc_disctmo.data = (unsigned long)vport; 3096 3097 init_timer(&vport->fc_fdmitmo); 3098 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 3099 vport->fc_fdmitmo.data = (unsigned long)vport; 3100 3101 init_timer(&vport->els_tmofunc); 3102 vport->els_tmofunc.function = lpfc_els_timeout; 3103 vport->els_tmofunc.data = (unsigned long)vport; 3104 3105 init_timer(&vport->delayed_disc_tmo); 3106 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; 3107 vport->delayed_disc_tmo.data = (unsigned long)vport; 3108 3109 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 3110 if (error) 3111 goto out_put_shost; 3112 3113 spin_lock_irq(&phba->hbalock); 3114 list_add_tail(&vport->listentry, &phba->port_list); 3115 spin_unlock_irq(&phba->hbalock); 3116 return vport; 3117 3118 out_put_shost: 3119 scsi_host_put(shost); 3120 out: 3121 return NULL; 3122 } 3123 3124 /** 3125 * destroy_port - destroy an FC port 3126 * @vport: pointer to an lpfc virtual N_Port data structure. 3127 * 3128 * This routine destroys a FC port from the upper layer protocol. All the 3129 * resources associated with the port are released. 3130 **/ 3131 void 3132 destroy_port(struct lpfc_vport *vport) 3133 { 3134 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3135 struct lpfc_hba *phba = vport->phba; 3136 3137 lpfc_debugfs_terminate(vport); 3138 fc_remove_host(shost); 3139 scsi_remove_host(shost); 3140 3141 spin_lock_irq(&phba->hbalock); 3142 list_del_init(&vport->listentry); 3143 spin_unlock_irq(&phba->hbalock); 3144 3145 lpfc_cleanup(vport); 3146 return; 3147 } 3148 3149 /** 3150 * lpfc_get_instance - Get a unique integer ID 3151 * 3152 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 3153 * uses the kernel idr facility to perform the task. 3154 * 3155 * Return codes: 3156 * instance - a unique integer ID allocated as the new instance. 3157 * -1 - lpfc get instance failed. 3158 **/ 3159 int 3160 lpfc_get_instance(void) 3161 { 3162 int instance = 0; 3163 3164 /* Assign an unused number */ 3165 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 3166 return -1; 3167 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 3168 return -1; 3169 return instance; 3170 } 3171 3172 /** 3173 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 3174 * @shost: pointer to SCSI host data structure. 3175 * @time: elapsed time of the scan in jiffies. 3176 * 3177 * This routine is called by the SCSI layer with a SCSI host to determine 3178 * whether the scan host is finished. 3179 * 3180 * Note: there is no scan_start function as adapter initialization will have 3181 * asynchronously kicked off the link initialization. 3182 * 3183 * Return codes 3184 * 0 - SCSI host scan is not over yet. 3185 * 1 - SCSI host scan is over. 3186 **/ 3187 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 3188 { 3189 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3190 struct lpfc_hba *phba = vport->phba; 3191 int stat = 0; 3192 3193 spin_lock_irq(shost->host_lock); 3194 3195 if (vport->load_flag & FC_UNLOADING) { 3196 stat = 1; 3197 goto finished; 3198 } 3199 if (time >= 30 * HZ) { 3200 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3201 "0461 Scanning longer than 30 " 3202 "seconds. Continuing initialization\n"); 3203 stat = 1; 3204 goto finished; 3205 } 3206 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 3207 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3208 "0465 Link down longer than 15 " 3209 "seconds. Continuing initialization\n"); 3210 stat = 1; 3211 goto finished; 3212 } 3213 3214 if (vport->port_state != LPFC_VPORT_READY) 3215 goto finished; 3216 if (vport->num_disc_nodes || vport->fc_prli_sent) 3217 goto finished; 3218 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 3219 goto finished; 3220 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 3221 goto finished; 3222 3223 stat = 1; 3224 3225 finished: 3226 spin_unlock_irq(shost->host_lock); 3227 return stat; 3228 } 3229 3230 /** 3231 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 3232 * @shost: pointer to SCSI host data structure. 3233 * 3234 * This routine initializes a given SCSI host attributes on a FC port. The 3235 * SCSI host can be either on top of a physical port or a virtual port. 3236 **/ 3237 void lpfc_host_attrib_init(struct Scsi_Host *shost) 3238 { 3239 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3240 struct lpfc_hba *phba = vport->phba; 3241 /* 3242 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 3243 */ 3244 3245 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 3246 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 3247 fc_host_supported_classes(shost) = FC_COS_CLASS3; 3248 3249 memset(fc_host_supported_fc4s(shost), 0, 3250 sizeof(fc_host_supported_fc4s(shost))); 3251 fc_host_supported_fc4s(shost)[2] = 1; 3252 fc_host_supported_fc4s(shost)[7] = 1; 3253 3254 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 3255 sizeof fc_host_symbolic_name(shost)); 3256 3257 fc_host_supported_speeds(shost) = 0; 3258 if (phba->lmt & LMT_16Gb) 3259 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 3260 if (phba->lmt & LMT_10Gb) 3261 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 3262 if (phba->lmt & LMT_8Gb) 3263 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 3264 if (phba->lmt & LMT_4Gb) 3265 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 3266 if (phba->lmt & LMT_2Gb) 3267 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 3268 if (phba->lmt & LMT_1Gb) 3269 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 3270 3271 fc_host_maxframe_size(shost) = 3272 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 3273 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 3274 3275 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 3276 3277 /* This value is also unchanging */ 3278 memset(fc_host_active_fc4s(shost), 0, 3279 sizeof(fc_host_active_fc4s(shost))); 3280 fc_host_active_fc4s(shost)[2] = 1; 3281 fc_host_active_fc4s(shost)[7] = 1; 3282 3283 fc_host_max_npiv_vports(shost) = phba->max_vpi; 3284 spin_lock_irq(shost->host_lock); 3285 vport->load_flag &= ~FC_LOADING; 3286 spin_unlock_irq(shost->host_lock); 3287 } 3288 3289 /** 3290 * lpfc_stop_port_s3 - Stop SLI3 device port 3291 * @phba: pointer to lpfc hba data structure. 3292 * 3293 * This routine is invoked to stop an SLI3 device port, it stops the device 3294 * from generating interrupts and stops the device driver's timers for the 3295 * device. 3296 **/ 3297 static void 3298 lpfc_stop_port_s3(struct lpfc_hba *phba) 3299 { 3300 /* Clear all interrupt enable conditions */ 3301 writel(0, phba->HCregaddr); 3302 readl(phba->HCregaddr); /* flush */ 3303 /* Clear all pending interrupts */ 3304 writel(0xffffffff, phba->HAregaddr); 3305 readl(phba->HAregaddr); /* flush */ 3306 3307 /* Reset some HBA SLI setup states */ 3308 lpfc_stop_hba_timers(phba); 3309 phba->pport->work_port_events = 0; 3310 } 3311 3312 /** 3313 * lpfc_stop_port_s4 - Stop SLI4 device port 3314 * @phba: pointer to lpfc hba data structure. 3315 * 3316 * This routine is invoked to stop an SLI4 device port, it stops the device 3317 * from generating interrupts and stops the device driver's timers for the 3318 * device. 3319 **/ 3320 static void 3321 lpfc_stop_port_s4(struct lpfc_hba *phba) 3322 { 3323 /* Reset some HBA SLI4 setup states */ 3324 lpfc_stop_hba_timers(phba); 3325 phba->pport->work_port_events = 0; 3326 phba->sli4_hba.intr_enable = 0; 3327 } 3328 3329 /** 3330 * lpfc_stop_port - Wrapper function for stopping hba port 3331 * @phba: Pointer to HBA context object. 3332 * 3333 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 3334 * the API jump table function pointer from the lpfc_hba struct. 3335 **/ 3336 void 3337 lpfc_stop_port(struct lpfc_hba *phba) 3338 { 3339 phba->lpfc_stop_port(phba); 3340 } 3341 3342 /** 3343 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 3344 * @phba: Pointer to hba for which this call is being executed. 3345 * 3346 * This routine starts the timer waiting for the FCF rediscovery to complete. 3347 **/ 3348 void 3349 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 3350 { 3351 unsigned long fcf_redisc_wait_tmo = 3352 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 3353 /* Start fcf rediscovery wait period timer */ 3354 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 3355 spin_lock_irq(&phba->hbalock); 3356 /* Allow action to new fcf asynchronous event */ 3357 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 3358 /* Mark the FCF rediscovery pending state */ 3359 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 3360 spin_unlock_irq(&phba->hbalock); 3361 } 3362 3363 /** 3364 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 3365 * @ptr: Map to lpfc_hba data structure pointer. 3366 * 3367 * This routine is invoked when waiting for FCF table rediscover has been 3368 * timed out. If new FCF record(s) has (have) been discovered during the 3369 * wait period, a new FCF event shall be added to the FCOE async event 3370 * list, and then worker thread shall be waked up for processing from the 3371 * worker thread context. 3372 **/ 3373 void 3374 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 3375 { 3376 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3377 3378 /* Don't send FCF rediscovery event if timer cancelled */ 3379 spin_lock_irq(&phba->hbalock); 3380 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3381 spin_unlock_irq(&phba->hbalock); 3382 return; 3383 } 3384 /* Clear FCF rediscovery timer pending flag */ 3385 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3386 /* FCF rediscovery event to worker thread */ 3387 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 3388 spin_unlock_irq(&phba->hbalock); 3389 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3390 "2776 FCF rediscover quiescent timer expired\n"); 3391 /* wake up worker thread */ 3392 lpfc_worker_wake_up(phba); 3393 } 3394 3395 /** 3396 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3397 * @phba: pointer to lpfc hba data structure. 3398 * @acqe_link: pointer to the async link completion queue entry. 3399 * 3400 * This routine is to parse the SLI4 link-attention link fault code and 3401 * translate it into the base driver's read link attention mailbox command 3402 * status. 3403 * 3404 * Return: Link-attention status in terms of base driver's coding. 3405 **/ 3406 static uint16_t 3407 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3408 struct lpfc_acqe_link *acqe_link) 3409 { 3410 uint16_t latt_fault; 3411 3412 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3413 case LPFC_ASYNC_LINK_FAULT_NONE: 3414 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3415 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3416 latt_fault = 0; 3417 break; 3418 default: 3419 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3420 "0398 Invalid link fault code: x%x\n", 3421 bf_get(lpfc_acqe_link_fault, acqe_link)); 3422 latt_fault = MBXERR_ERROR; 3423 break; 3424 } 3425 return latt_fault; 3426 } 3427 3428 /** 3429 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3430 * @phba: pointer to lpfc hba data structure. 3431 * @acqe_link: pointer to the async link completion queue entry. 3432 * 3433 * This routine is to parse the SLI4 link attention type and translate it 3434 * into the base driver's link attention type coding. 3435 * 3436 * Return: Link attention type in terms of base driver's coding. 3437 **/ 3438 static uint8_t 3439 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3440 struct lpfc_acqe_link *acqe_link) 3441 { 3442 uint8_t att_type; 3443 3444 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3445 case LPFC_ASYNC_LINK_STATUS_DOWN: 3446 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3447 att_type = LPFC_ATT_LINK_DOWN; 3448 break; 3449 case LPFC_ASYNC_LINK_STATUS_UP: 3450 /* Ignore physical link up events - wait for logical link up */ 3451 att_type = LPFC_ATT_RESERVED; 3452 break; 3453 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3454 att_type = LPFC_ATT_LINK_UP; 3455 break; 3456 default: 3457 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3458 "0399 Invalid link attention type: x%x\n", 3459 bf_get(lpfc_acqe_link_status, acqe_link)); 3460 att_type = LPFC_ATT_RESERVED; 3461 break; 3462 } 3463 return att_type; 3464 } 3465 3466 /** 3467 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3468 * @phba: pointer to lpfc hba data structure. 3469 * @acqe_link: pointer to the async link completion queue entry. 3470 * 3471 * This routine is to parse the SLI4 link-attention link speed and translate 3472 * it into the base driver's link-attention link speed coding. 3473 * 3474 * Return: Link-attention link speed in terms of base driver's coding. 3475 **/ 3476 static uint8_t 3477 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3478 struct lpfc_acqe_link *acqe_link) 3479 { 3480 uint8_t link_speed; 3481 3482 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3483 case LPFC_ASYNC_LINK_SPEED_ZERO: 3484 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3485 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3486 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3487 break; 3488 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3489 link_speed = LPFC_LINK_SPEED_1GHZ; 3490 break; 3491 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3492 link_speed = LPFC_LINK_SPEED_10GHZ; 3493 break; 3494 default: 3495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3496 "0483 Invalid link-attention link speed: x%x\n", 3497 bf_get(lpfc_acqe_link_speed, acqe_link)); 3498 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3499 break; 3500 } 3501 return link_speed; 3502 } 3503 3504 /** 3505 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3506 * @phba: pointer to lpfc hba data structure. 3507 * @acqe_link: pointer to the async link completion queue entry. 3508 * 3509 * This routine is to handle the SLI4 asynchronous FCoE link event. 3510 **/ 3511 static void 3512 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3513 struct lpfc_acqe_link *acqe_link) 3514 { 3515 struct lpfc_dmabuf *mp; 3516 LPFC_MBOXQ_t *pmb; 3517 MAILBOX_t *mb; 3518 struct lpfc_mbx_read_top *la; 3519 uint8_t att_type; 3520 int rc; 3521 3522 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3523 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 3524 return; 3525 phba->fcoe_eventtag = acqe_link->event_tag; 3526 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3527 if (!pmb) { 3528 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3529 "0395 The mboxq allocation failed\n"); 3530 return; 3531 } 3532 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3533 if (!mp) { 3534 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3535 "0396 The lpfc_dmabuf allocation failed\n"); 3536 goto out_free_pmb; 3537 } 3538 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3539 if (!mp->virt) { 3540 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3541 "0397 The mbuf allocation failed\n"); 3542 goto out_free_dmabuf; 3543 } 3544 3545 /* Cleanup any outstanding ELS commands */ 3546 lpfc_els_flush_all_cmd(phba); 3547 3548 /* Block ELS IOCBs until we have done process link event */ 3549 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3550 3551 /* Update link event statistics */ 3552 phba->sli.slistat.link_event++; 3553 3554 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3555 lpfc_read_topology(phba, pmb, mp); 3556 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3557 pmb->vport = phba->pport; 3558 3559 /* Keep the link status for extra SLI4 state machine reference */ 3560 phba->sli4_hba.link_state.speed = 3561 bf_get(lpfc_acqe_link_speed, acqe_link); 3562 phba->sli4_hba.link_state.duplex = 3563 bf_get(lpfc_acqe_link_duplex, acqe_link); 3564 phba->sli4_hba.link_state.status = 3565 bf_get(lpfc_acqe_link_status, acqe_link); 3566 phba->sli4_hba.link_state.type = 3567 bf_get(lpfc_acqe_link_type, acqe_link); 3568 phba->sli4_hba.link_state.number = 3569 bf_get(lpfc_acqe_link_number, acqe_link); 3570 phba->sli4_hba.link_state.fault = 3571 bf_get(lpfc_acqe_link_fault, acqe_link); 3572 phba->sli4_hba.link_state.logical_speed = 3573 bf_get(lpfc_acqe_logical_link_speed, acqe_link); 3574 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3575 "2900 Async FC/FCoE Link event - Speed:%dGBit " 3576 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 3577 "Logical speed:%dMbps Fault:%d\n", 3578 phba->sli4_hba.link_state.speed, 3579 phba->sli4_hba.link_state.topology, 3580 phba->sli4_hba.link_state.status, 3581 phba->sli4_hba.link_state.type, 3582 phba->sli4_hba.link_state.number, 3583 phba->sli4_hba.link_state.logical_speed * 10, 3584 phba->sli4_hba.link_state.fault); 3585 /* 3586 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3587 * topology info. Note: Optional for non FC-AL ports. 3588 */ 3589 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3590 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3591 if (rc == MBX_NOT_FINISHED) 3592 goto out_free_dmabuf; 3593 return; 3594 } 3595 /* 3596 * For FCoE Mode: fill in all the topology information we need and call 3597 * the READ_TOPOLOGY completion routine to continue without actually 3598 * sending the READ_TOPOLOGY mailbox command to the port. 3599 */ 3600 /* Parse and translate status field */ 3601 mb = &pmb->u.mb; 3602 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3603 3604 /* Parse and translate link attention fields */ 3605 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3606 la->eventTag = acqe_link->event_tag; 3607 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 3608 bf_set(lpfc_mbx_read_top_link_spd, la, 3609 lpfc_sli4_parse_latt_link_speed(phba, acqe_link)); 3610 3611 /* Fake the the following irrelvant fields */ 3612 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 3613 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 3614 bf_set(lpfc_mbx_read_top_il, la, 0); 3615 bf_set(lpfc_mbx_read_top_pb, la, 0); 3616 bf_set(lpfc_mbx_read_top_fa, la, 0); 3617 bf_set(lpfc_mbx_read_top_mm, la, 0); 3618 3619 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3620 lpfc_mbx_cmpl_read_topology(phba, pmb); 3621 3622 return; 3623 3624 out_free_dmabuf: 3625 kfree(mp); 3626 out_free_pmb: 3627 mempool_free(pmb, phba->mbox_mem_pool); 3628 } 3629 3630 /** 3631 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 3632 * @phba: pointer to lpfc hba data structure. 3633 * @acqe_fc: pointer to the async fc completion queue entry. 3634 * 3635 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 3636 * that the event was received and then issue a read_topology mailbox command so 3637 * that the rest of the driver will treat it the same as SLI3. 3638 **/ 3639 static void 3640 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 3641 { 3642 struct lpfc_dmabuf *mp; 3643 LPFC_MBOXQ_t *pmb; 3644 int rc; 3645 3646 if (bf_get(lpfc_trailer_type, acqe_fc) != 3647 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 3648 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3649 "2895 Non FC link Event detected.(%d)\n", 3650 bf_get(lpfc_trailer_type, acqe_fc)); 3651 return; 3652 } 3653 /* Keep the link status for extra SLI4 state machine reference */ 3654 phba->sli4_hba.link_state.speed = 3655 bf_get(lpfc_acqe_fc_la_speed, acqe_fc); 3656 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 3657 phba->sli4_hba.link_state.topology = 3658 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 3659 phba->sli4_hba.link_state.status = 3660 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 3661 phba->sli4_hba.link_state.type = 3662 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 3663 phba->sli4_hba.link_state.number = 3664 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 3665 phba->sli4_hba.link_state.fault = 3666 bf_get(lpfc_acqe_link_fault, acqe_fc); 3667 phba->sli4_hba.link_state.logical_speed = 3668 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); 3669 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3670 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 3671 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 3672 "%dMbps Fault:%d\n", 3673 phba->sli4_hba.link_state.speed, 3674 phba->sli4_hba.link_state.topology, 3675 phba->sli4_hba.link_state.status, 3676 phba->sli4_hba.link_state.type, 3677 phba->sli4_hba.link_state.number, 3678 phba->sli4_hba.link_state.logical_speed * 10, 3679 phba->sli4_hba.link_state.fault); 3680 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3681 if (!pmb) { 3682 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3683 "2897 The mboxq allocation failed\n"); 3684 return; 3685 } 3686 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3687 if (!mp) { 3688 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3689 "2898 The lpfc_dmabuf allocation failed\n"); 3690 goto out_free_pmb; 3691 } 3692 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3693 if (!mp->virt) { 3694 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3695 "2899 The mbuf allocation failed\n"); 3696 goto out_free_dmabuf; 3697 } 3698 3699 /* Cleanup any outstanding ELS commands */ 3700 lpfc_els_flush_all_cmd(phba); 3701 3702 /* Block ELS IOCBs until we have done process link event */ 3703 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3704 3705 /* Update link event statistics */ 3706 phba->sli.slistat.link_event++; 3707 3708 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3709 lpfc_read_topology(phba, pmb, mp); 3710 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3711 pmb->vport = phba->pport; 3712 3713 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3714 if (rc == MBX_NOT_FINISHED) 3715 goto out_free_dmabuf; 3716 return; 3717 3718 out_free_dmabuf: 3719 kfree(mp); 3720 out_free_pmb: 3721 mempool_free(pmb, phba->mbox_mem_pool); 3722 } 3723 3724 /** 3725 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 3726 * @phba: pointer to lpfc hba data structure. 3727 * @acqe_fc: pointer to the async SLI completion queue entry. 3728 * 3729 * This routine is to handle the SLI4 asynchronous SLI events. 3730 **/ 3731 static void 3732 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 3733 { 3734 char port_name; 3735 char message[80]; 3736 uint8_t status; 3737 struct lpfc_acqe_misconfigured_event *misconfigured; 3738 3739 /* special case misconfigured event as it contains data for all ports */ 3740 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 3741 LPFC_SLI_INTF_IF_TYPE_2) || 3742 (bf_get(lpfc_trailer_type, acqe_sli) != 3743 LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) { 3744 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3745 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 3746 "x%08x SLI Event Type:%d\n", 3747 acqe_sli->event_data1, acqe_sli->event_data2, 3748 bf_get(lpfc_trailer_type, acqe_sli)); 3749 return; 3750 } 3751 3752 port_name = phba->Port[0]; 3753 if (port_name == 0x00) 3754 port_name = '?'; /* get port name is empty */ 3755 3756 misconfigured = (struct lpfc_acqe_misconfigured_event *) 3757 &acqe_sli->event_data1; 3758 3759 /* fetch the status for this port */ 3760 switch (phba->sli4_hba.lnk_info.lnk_no) { 3761 case LPFC_LINK_NUMBER_0: 3762 status = bf_get(lpfc_sli_misconfigured_port0, 3763 &misconfigured->theEvent); 3764 break; 3765 case LPFC_LINK_NUMBER_1: 3766 status = bf_get(lpfc_sli_misconfigured_port1, 3767 &misconfigured->theEvent); 3768 break; 3769 case LPFC_LINK_NUMBER_2: 3770 status = bf_get(lpfc_sli_misconfigured_port2, 3771 &misconfigured->theEvent); 3772 break; 3773 case LPFC_LINK_NUMBER_3: 3774 status = bf_get(lpfc_sli_misconfigured_port3, 3775 &misconfigured->theEvent); 3776 break; 3777 default: 3778 status = ~LPFC_SLI_EVENT_STATUS_VALID; 3779 break; 3780 } 3781 3782 switch (status) { 3783 case LPFC_SLI_EVENT_STATUS_VALID: 3784 return; /* no message if the sfp is okay */ 3785 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 3786 sprintf(message, "Not installed"); 3787 break; 3788 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 3789 sprintf(message, 3790 "Optics of two types installed"); 3791 break; 3792 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 3793 sprintf(message, "Incompatible optics"); 3794 break; 3795 default: 3796 /* firmware is reporting a status we don't know about */ 3797 sprintf(message, "Unknown event status x%02x", status); 3798 break; 3799 } 3800 3801 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3802 "3176 Misconfigured Physical Port - " 3803 "Port Name %c %s\n", port_name, message); 3804 } 3805 3806 /** 3807 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 3808 * @vport: pointer to vport data structure. 3809 * 3810 * This routine is to perform Clear Virtual Link (CVL) on a vport in 3811 * response to a CVL event. 3812 * 3813 * Return the pointer to the ndlp with the vport if successful, otherwise 3814 * return NULL. 3815 **/ 3816 static struct lpfc_nodelist * 3817 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 3818 { 3819 struct lpfc_nodelist *ndlp; 3820 struct Scsi_Host *shost; 3821 struct lpfc_hba *phba; 3822 3823 if (!vport) 3824 return NULL; 3825 phba = vport->phba; 3826 if (!phba) 3827 return NULL; 3828 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3829 if (!ndlp) { 3830 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3831 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3832 if (!ndlp) 3833 return 0; 3834 lpfc_nlp_init(vport, ndlp, Fabric_DID); 3835 /* Set the node type */ 3836 ndlp->nlp_type |= NLP_FABRIC; 3837 /* Put ndlp onto node list */ 3838 lpfc_enqueue_node(vport, ndlp); 3839 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3840 /* re-setup ndlp without removing from node list */ 3841 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 3842 if (!ndlp) 3843 return 0; 3844 } 3845 if ((phba->pport->port_state < LPFC_FLOGI) && 3846 (phba->pport->port_state != LPFC_VPORT_FAILED)) 3847 return NULL; 3848 /* If virtual link is not yet instantiated ignore CVL */ 3849 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 3850 && (vport->port_state != LPFC_VPORT_FAILED)) 3851 return NULL; 3852 shost = lpfc_shost_from_vport(vport); 3853 if (!shost) 3854 return NULL; 3855 lpfc_linkdown_port(vport); 3856 lpfc_cleanup_pending_mbox(vport); 3857 spin_lock_irq(shost->host_lock); 3858 vport->fc_flag |= FC_VPORT_CVL_RCVD; 3859 spin_unlock_irq(shost->host_lock); 3860 3861 return ndlp; 3862 } 3863 3864 /** 3865 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 3866 * @vport: pointer to lpfc hba data structure. 3867 * 3868 * This routine is to perform Clear Virtual Link (CVL) on all vports in 3869 * response to a FCF dead event. 3870 **/ 3871 static void 3872 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 3873 { 3874 struct lpfc_vport **vports; 3875 int i; 3876 3877 vports = lpfc_create_vport_work_array(phba); 3878 if (vports) 3879 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3880 lpfc_sli4_perform_vport_cvl(vports[i]); 3881 lpfc_destroy_vport_work_array(phba, vports); 3882 } 3883 3884 /** 3885 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 3886 * @phba: pointer to lpfc hba data structure. 3887 * @acqe_link: pointer to the async fcoe completion queue entry. 3888 * 3889 * This routine is to handle the SLI4 asynchronous fcoe event. 3890 **/ 3891 static void 3892 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 3893 struct lpfc_acqe_fip *acqe_fip) 3894 { 3895 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 3896 int rc; 3897 struct lpfc_vport *vport; 3898 struct lpfc_nodelist *ndlp; 3899 struct Scsi_Host *shost; 3900 int active_vlink_present; 3901 struct lpfc_vport **vports; 3902 int i; 3903 3904 phba->fc_eventTag = acqe_fip->event_tag; 3905 phba->fcoe_eventtag = acqe_fip->event_tag; 3906 switch (event_type) { 3907 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 3908 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 3909 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 3910 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3911 LOG_DISCOVERY, 3912 "2546 New FCF event, evt_tag:x%x, " 3913 "index:x%x\n", 3914 acqe_fip->event_tag, 3915 acqe_fip->index); 3916 else 3917 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3918 LOG_DISCOVERY, 3919 "2788 FCF param modified event, " 3920 "evt_tag:x%x, index:x%x\n", 3921 acqe_fip->event_tag, 3922 acqe_fip->index); 3923 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3924 /* 3925 * During period of FCF discovery, read the FCF 3926 * table record indexed by the event to update 3927 * FCF roundrobin failover eligible FCF bmask. 3928 */ 3929 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3930 LOG_DISCOVERY, 3931 "2779 Read FCF (x%x) for updating " 3932 "roundrobin FCF failover bmask\n", 3933 acqe_fip->index); 3934 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 3935 } 3936 3937 /* If the FCF discovery is in progress, do nothing. */ 3938 spin_lock_irq(&phba->hbalock); 3939 if (phba->hba_flag & FCF_TS_INPROG) { 3940 spin_unlock_irq(&phba->hbalock); 3941 break; 3942 } 3943 /* If fast FCF failover rescan event is pending, do nothing */ 3944 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3945 spin_unlock_irq(&phba->hbalock); 3946 break; 3947 } 3948 3949 /* If the FCF has been in discovered state, do nothing. */ 3950 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 3951 spin_unlock_irq(&phba->hbalock); 3952 break; 3953 } 3954 spin_unlock_irq(&phba->hbalock); 3955 3956 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3957 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3958 "2770 Start FCF table scan per async FCF " 3959 "event, evt_tag:x%x, index:x%x\n", 3960 acqe_fip->event_tag, acqe_fip->index); 3961 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3962 LPFC_FCOE_FCF_GET_FIRST); 3963 if (rc) 3964 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3965 "2547 Issue FCF scan read FCF mailbox " 3966 "command failed (x%x)\n", rc); 3967 break; 3968 3969 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 3970 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3971 "2548 FCF Table full count 0x%x tag 0x%x\n", 3972 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 3973 acqe_fip->event_tag); 3974 break; 3975 3976 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 3977 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 3978 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3979 "2549 FCF (x%x) disconnected from network, " 3980 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 3981 /* 3982 * If we are in the middle of FCF failover process, clear 3983 * the corresponding FCF bit in the roundrobin bitmap. 3984 */ 3985 spin_lock_irq(&phba->hbalock); 3986 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3987 spin_unlock_irq(&phba->hbalock); 3988 /* Update FLOGI FCF failover eligible FCF bmask */ 3989 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 3990 break; 3991 } 3992 spin_unlock_irq(&phba->hbalock); 3993 3994 /* If the event is not for currently used fcf do nothing */ 3995 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 3996 break; 3997 3998 /* 3999 * Otherwise, request the port to rediscover the entire FCF 4000 * table for a fast recovery from case that the current FCF 4001 * is no longer valid as we are not in the middle of FCF 4002 * failover process already. 4003 */ 4004 spin_lock_irq(&phba->hbalock); 4005 /* Mark the fast failover process in progress */ 4006 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 4007 spin_unlock_irq(&phba->hbalock); 4008 4009 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4010 "2771 Start FCF fast failover process due to " 4011 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 4012 "\n", acqe_fip->event_tag, acqe_fip->index); 4013 rc = lpfc_sli4_redisc_fcf_table(phba); 4014 if (rc) { 4015 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4016 LOG_DISCOVERY, 4017 "2772 Issue FCF rediscover mabilbox " 4018 "command failed, fail through to FCF " 4019 "dead event\n"); 4020 spin_lock_irq(&phba->hbalock); 4021 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 4022 spin_unlock_irq(&phba->hbalock); 4023 /* 4024 * Last resort will fail over by treating this 4025 * as a link down to FCF registration. 4026 */ 4027 lpfc_sli4_fcf_dead_failthrough(phba); 4028 } else { 4029 /* Reset FCF roundrobin bmask for new discovery */ 4030 lpfc_sli4_clear_fcf_rr_bmask(phba); 4031 /* 4032 * Handling fast FCF failover to a DEAD FCF event is 4033 * considered equalivant to receiving CVL to all vports. 4034 */ 4035 lpfc_sli4_perform_all_vport_cvl(phba); 4036 } 4037 break; 4038 case LPFC_FIP_EVENT_TYPE_CVL: 4039 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4040 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4041 "2718 Clear Virtual Link Received for VPI 0x%x" 4042 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 4043 4044 vport = lpfc_find_vport_by_vpid(phba, 4045 acqe_fip->index); 4046 ndlp = lpfc_sli4_perform_vport_cvl(vport); 4047 if (!ndlp) 4048 break; 4049 active_vlink_present = 0; 4050 4051 vports = lpfc_create_vport_work_array(phba); 4052 if (vports) { 4053 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 4054 i++) { 4055 if ((!(vports[i]->fc_flag & 4056 FC_VPORT_CVL_RCVD)) && 4057 (vports[i]->port_state > LPFC_FDISC)) { 4058 active_vlink_present = 1; 4059 break; 4060 } 4061 } 4062 lpfc_destroy_vport_work_array(phba, vports); 4063 } 4064 4065 if (active_vlink_present) { 4066 /* 4067 * If there are other active VLinks present, 4068 * re-instantiate the Vlink using FDISC. 4069 */ 4070 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 4071 shost = lpfc_shost_from_vport(vport); 4072 spin_lock_irq(shost->host_lock); 4073 ndlp->nlp_flag |= NLP_DELAY_TMO; 4074 spin_unlock_irq(shost->host_lock); 4075 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 4076 vport->port_state = LPFC_FDISC; 4077 } else { 4078 /* 4079 * Otherwise, we request port to rediscover 4080 * the entire FCF table for a fast recovery 4081 * from possible case that the current FCF 4082 * is no longer valid if we are not already 4083 * in the FCF failover process. 4084 */ 4085 spin_lock_irq(&phba->hbalock); 4086 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4087 spin_unlock_irq(&phba->hbalock); 4088 break; 4089 } 4090 /* Mark the fast failover process in progress */ 4091 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 4092 spin_unlock_irq(&phba->hbalock); 4093 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 4094 LOG_DISCOVERY, 4095 "2773 Start FCF failover per CVL, " 4096 "evt_tag:x%x\n", acqe_fip->event_tag); 4097 rc = lpfc_sli4_redisc_fcf_table(phba); 4098 if (rc) { 4099 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4100 LOG_DISCOVERY, 4101 "2774 Issue FCF rediscover " 4102 "mabilbox command failed, " 4103 "through to CVL event\n"); 4104 spin_lock_irq(&phba->hbalock); 4105 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 4106 spin_unlock_irq(&phba->hbalock); 4107 /* 4108 * Last resort will be re-try on the 4109 * the current registered FCF entry. 4110 */ 4111 lpfc_retry_pport_discovery(phba); 4112 } else 4113 /* 4114 * Reset FCF roundrobin bmask for new 4115 * discovery. 4116 */ 4117 lpfc_sli4_clear_fcf_rr_bmask(phba); 4118 } 4119 break; 4120 default: 4121 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4122 "0288 Unknown FCoE event type 0x%x event tag " 4123 "0x%x\n", event_type, acqe_fip->event_tag); 4124 break; 4125 } 4126 } 4127 4128 /** 4129 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 4130 * @phba: pointer to lpfc hba data structure. 4131 * @acqe_link: pointer to the async dcbx completion queue entry. 4132 * 4133 * This routine is to handle the SLI4 asynchronous dcbx event. 4134 **/ 4135 static void 4136 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 4137 struct lpfc_acqe_dcbx *acqe_dcbx) 4138 { 4139 phba->fc_eventTag = acqe_dcbx->event_tag; 4140 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4141 "0290 The SLI4 DCBX asynchronous event is not " 4142 "handled yet\n"); 4143 } 4144 4145 /** 4146 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 4147 * @phba: pointer to lpfc hba data structure. 4148 * @acqe_link: pointer to the async grp5 completion queue entry. 4149 * 4150 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 4151 * is an asynchronous notified of a logical link speed change. The Port 4152 * reports the logical link speed in units of 10Mbps. 4153 **/ 4154 static void 4155 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 4156 struct lpfc_acqe_grp5 *acqe_grp5) 4157 { 4158 uint16_t prev_ll_spd; 4159 4160 phba->fc_eventTag = acqe_grp5->event_tag; 4161 phba->fcoe_eventtag = acqe_grp5->event_tag; 4162 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 4163 phba->sli4_hba.link_state.logical_speed = 4164 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); 4165 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4166 "2789 GRP5 Async Event: Updating logical link speed " 4167 "from %dMbps to %dMbps\n", (prev_ll_spd * 10), 4168 (phba->sli4_hba.link_state.logical_speed*10)); 4169 } 4170 4171 /** 4172 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 4173 * @phba: pointer to lpfc hba data structure. 4174 * 4175 * This routine is invoked by the worker thread to process all the pending 4176 * SLI4 asynchronous events. 4177 **/ 4178 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 4179 { 4180 struct lpfc_cq_event *cq_event; 4181 4182 /* First, declare the async event has been handled */ 4183 spin_lock_irq(&phba->hbalock); 4184 phba->hba_flag &= ~ASYNC_EVENT; 4185 spin_unlock_irq(&phba->hbalock); 4186 /* Now, handle all the async events */ 4187 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 4188 /* Get the first event from the head of the event queue */ 4189 spin_lock_irq(&phba->hbalock); 4190 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 4191 cq_event, struct lpfc_cq_event, list); 4192 spin_unlock_irq(&phba->hbalock); 4193 /* Process the asynchronous event */ 4194 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 4195 case LPFC_TRAILER_CODE_LINK: 4196 lpfc_sli4_async_link_evt(phba, 4197 &cq_event->cqe.acqe_link); 4198 break; 4199 case LPFC_TRAILER_CODE_FCOE: 4200 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 4201 break; 4202 case LPFC_TRAILER_CODE_DCBX: 4203 lpfc_sli4_async_dcbx_evt(phba, 4204 &cq_event->cqe.acqe_dcbx); 4205 break; 4206 case LPFC_TRAILER_CODE_GRP5: 4207 lpfc_sli4_async_grp5_evt(phba, 4208 &cq_event->cqe.acqe_grp5); 4209 break; 4210 case LPFC_TRAILER_CODE_FC: 4211 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 4212 break; 4213 case LPFC_TRAILER_CODE_SLI: 4214 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 4215 break; 4216 default: 4217 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4218 "1804 Invalid asynchrous event code: " 4219 "x%x\n", bf_get(lpfc_trailer_code, 4220 &cq_event->cqe.mcqe_cmpl)); 4221 break; 4222 } 4223 /* Free the completion event processed to the free pool */ 4224 lpfc_sli4_cq_event_release(phba, cq_event); 4225 } 4226 } 4227 4228 /** 4229 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 4230 * @phba: pointer to lpfc hba data structure. 4231 * 4232 * This routine is invoked by the worker thread to process FCF table 4233 * rediscovery pending completion event. 4234 **/ 4235 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 4236 { 4237 int rc; 4238 4239 spin_lock_irq(&phba->hbalock); 4240 /* Clear FCF rediscovery timeout event */ 4241 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 4242 /* Clear driver fast failover FCF record flag */ 4243 phba->fcf.failover_rec.flag = 0; 4244 /* Set state for FCF fast failover */ 4245 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 4246 spin_unlock_irq(&phba->hbalock); 4247 4248 /* Scan FCF table from the first entry to re-discover SAN */ 4249 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4250 "2777 Start post-quiescent FCF table scan\n"); 4251 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 4252 if (rc) 4253 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4254 "2747 Issue FCF scan read FCF mailbox " 4255 "command failed 0x%x\n", rc); 4256 } 4257 4258 /** 4259 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 4260 * @phba: pointer to lpfc hba data structure. 4261 * @dev_grp: The HBA PCI-Device group number. 4262 * 4263 * This routine is invoked to set up the per HBA PCI-Device group function 4264 * API jump table entries. 4265 * 4266 * Return: 0 if success, otherwise -ENODEV 4267 **/ 4268 int 4269 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4270 { 4271 int rc; 4272 4273 /* Set up lpfc PCI-device group */ 4274 phba->pci_dev_grp = dev_grp; 4275 4276 /* The LPFC_PCI_DEV_OC uses SLI4 */ 4277 if (dev_grp == LPFC_PCI_DEV_OC) 4278 phba->sli_rev = LPFC_SLI_REV4; 4279 4280 /* Set up device INIT API function jump table */ 4281 rc = lpfc_init_api_table_setup(phba, dev_grp); 4282 if (rc) 4283 return -ENODEV; 4284 /* Set up SCSI API function jump table */ 4285 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 4286 if (rc) 4287 return -ENODEV; 4288 /* Set up SLI API function jump table */ 4289 rc = lpfc_sli_api_table_setup(phba, dev_grp); 4290 if (rc) 4291 return -ENODEV; 4292 /* Set up MBOX API function jump table */ 4293 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 4294 if (rc) 4295 return -ENODEV; 4296 4297 return 0; 4298 } 4299 4300 /** 4301 * lpfc_log_intr_mode - Log the active interrupt mode 4302 * @phba: pointer to lpfc hba data structure. 4303 * @intr_mode: active interrupt mode adopted. 4304 * 4305 * This routine it invoked to log the currently used active interrupt mode 4306 * to the device. 4307 **/ 4308 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 4309 { 4310 switch (intr_mode) { 4311 case 0: 4312 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4313 "0470 Enable INTx interrupt mode.\n"); 4314 break; 4315 case 1: 4316 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4317 "0481 Enabled MSI interrupt mode.\n"); 4318 break; 4319 case 2: 4320 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4321 "0480 Enabled MSI-X interrupt mode.\n"); 4322 break; 4323 default: 4324 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4325 "0482 Illegal interrupt mode.\n"); 4326 break; 4327 } 4328 return; 4329 } 4330 4331 /** 4332 * lpfc_enable_pci_dev - Enable a generic PCI device. 4333 * @phba: pointer to lpfc hba data structure. 4334 * 4335 * This routine is invoked to enable the PCI device that is common to all 4336 * PCI devices. 4337 * 4338 * Return codes 4339 * 0 - successful 4340 * other values - error 4341 **/ 4342 static int 4343 lpfc_enable_pci_dev(struct lpfc_hba *phba) 4344 { 4345 struct pci_dev *pdev; 4346 int bars = 0; 4347 4348 /* Obtain PCI device reference */ 4349 if (!phba->pcidev) 4350 goto out_error; 4351 else 4352 pdev = phba->pcidev; 4353 /* Select PCI BARs */ 4354 bars = pci_select_bars(pdev, IORESOURCE_MEM); 4355 /* Enable PCI device */ 4356 if (pci_enable_device_mem(pdev)) 4357 goto out_error; 4358 /* Request PCI resource for the device */ 4359 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 4360 goto out_disable_device; 4361 /* Set up device as PCI master and save state for EEH */ 4362 pci_set_master(pdev); 4363 pci_try_set_mwi(pdev); 4364 pci_save_state(pdev); 4365 4366 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 4367 if (pci_find_capability(pdev, PCI_CAP_ID_EXP)) 4368 pdev->needs_freset = 1; 4369 4370 return 0; 4371 4372 out_disable_device: 4373 pci_disable_device(pdev); 4374 out_error: 4375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4376 "1401 Failed to enable pci device, bars:x%x\n", bars); 4377 return -ENODEV; 4378 } 4379 4380 /** 4381 * lpfc_disable_pci_dev - Disable a generic PCI device. 4382 * @phba: pointer to lpfc hba data structure. 4383 * 4384 * This routine is invoked to disable the PCI device that is common to all 4385 * PCI devices. 4386 **/ 4387 static void 4388 lpfc_disable_pci_dev(struct lpfc_hba *phba) 4389 { 4390 struct pci_dev *pdev; 4391 int bars; 4392 4393 /* Obtain PCI device reference */ 4394 if (!phba->pcidev) 4395 return; 4396 else 4397 pdev = phba->pcidev; 4398 /* Select PCI BARs */ 4399 bars = pci_select_bars(pdev, IORESOURCE_MEM); 4400 /* Release PCI resource and disable PCI device */ 4401 pci_release_selected_regions(pdev, bars); 4402 pci_disable_device(pdev); 4403 /* Null out PCI private reference to driver */ 4404 pci_set_drvdata(pdev, NULL); 4405 4406 return; 4407 } 4408 4409 /** 4410 * lpfc_reset_hba - Reset a hba 4411 * @phba: pointer to lpfc hba data structure. 4412 * 4413 * This routine is invoked to reset a hba device. It brings the HBA 4414 * offline, performs a board restart, and then brings the board back 4415 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 4416 * on outstanding mailbox commands. 4417 **/ 4418 void 4419 lpfc_reset_hba(struct lpfc_hba *phba) 4420 { 4421 /* If resets are disabled then set error state and return. */ 4422 if (!phba->cfg_enable_hba_reset) { 4423 phba->link_state = LPFC_HBA_ERROR; 4424 return; 4425 } 4426 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 4427 lpfc_offline(phba); 4428 lpfc_sli_brdrestart(phba); 4429 lpfc_online(phba); 4430 lpfc_unblock_mgmt_io(phba); 4431 } 4432 4433 /** 4434 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 4435 * @phba: pointer to lpfc hba data structure. 4436 * 4437 * This function enables the PCI SR-IOV virtual functions to a physical 4438 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4439 * enable the number of virtual functions to the physical function. As 4440 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4441 * API call does not considered as an error condition for most of the device. 4442 **/ 4443 uint16_t 4444 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 4445 { 4446 struct pci_dev *pdev = phba->pcidev; 4447 uint16_t nr_virtfn; 4448 int pos; 4449 4450 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4451 if (pos == 0) 4452 return 0; 4453 4454 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 4455 return nr_virtfn; 4456 } 4457 4458 /** 4459 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 4460 * @phba: pointer to lpfc hba data structure. 4461 * @nr_vfn: number of virtual functions to be enabled. 4462 * 4463 * This function enables the PCI SR-IOV virtual functions to a physical 4464 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4465 * enable the number of virtual functions to the physical function. As 4466 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4467 * API call does not considered as an error condition for most of the device. 4468 **/ 4469 int 4470 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 4471 { 4472 struct pci_dev *pdev = phba->pcidev; 4473 uint16_t max_nr_vfn; 4474 int rc; 4475 4476 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 4477 if (nr_vfn > max_nr_vfn) { 4478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4479 "3057 Requested vfs (%d) greater than " 4480 "supported vfs (%d)", nr_vfn, max_nr_vfn); 4481 return -EINVAL; 4482 } 4483 4484 rc = pci_enable_sriov(pdev, nr_vfn); 4485 if (rc) { 4486 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4487 "2806 Failed to enable sriov on this device " 4488 "with vfn number nr_vf:%d, rc:%d\n", 4489 nr_vfn, rc); 4490 } else 4491 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4492 "2807 Successful enable sriov on this device " 4493 "with vfn number nr_vf:%d\n", nr_vfn); 4494 return rc; 4495 } 4496 4497 /** 4498 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 4499 * @phba: pointer to lpfc hba data structure. 4500 * 4501 * This routine is invoked to set up the driver internal resources specific to 4502 * support the SLI-3 HBA device it attached to. 4503 * 4504 * Return codes 4505 * 0 - successful 4506 * other values - error 4507 **/ 4508 static int 4509 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 4510 { 4511 struct lpfc_sli *psli; 4512 int rc; 4513 4514 /* 4515 * Initialize timers used by driver 4516 */ 4517 4518 /* Heartbeat timer */ 4519 init_timer(&phba->hb_tmofunc); 4520 phba->hb_tmofunc.function = lpfc_hb_timeout; 4521 phba->hb_tmofunc.data = (unsigned long)phba; 4522 4523 psli = &phba->sli; 4524 /* MBOX heartbeat timer */ 4525 init_timer(&psli->mbox_tmo); 4526 psli->mbox_tmo.function = lpfc_mbox_timeout; 4527 psli->mbox_tmo.data = (unsigned long) phba; 4528 /* FCP polling mode timer */ 4529 init_timer(&phba->fcp_poll_timer); 4530 phba->fcp_poll_timer.function = lpfc_poll_timeout; 4531 phba->fcp_poll_timer.data = (unsigned long) phba; 4532 /* Fabric block timer */ 4533 init_timer(&phba->fabric_block_timer); 4534 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4535 phba->fabric_block_timer.data = (unsigned long) phba; 4536 /* EA polling mode timer */ 4537 init_timer(&phba->eratt_poll); 4538 phba->eratt_poll.function = lpfc_poll_eratt; 4539 phba->eratt_poll.data = (unsigned long) phba; 4540 4541 /* Host attention work mask setup */ 4542 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 4543 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 4544 4545 /* Get all the module params for configuring this host */ 4546 lpfc_get_cfgparam(phba); 4547 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 4548 phba->menlo_flag |= HBA_MENLO_SUPPORT; 4549 /* check for menlo minimum sg count */ 4550 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 4551 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 4552 } 4553 4554 if (!phba->sli.ring) 4555 phba->sli.ring = (struct lpfc_sli_ring *) 4556 kzalloc(LPFC_SLI3_MAX_RING * 4557 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 4558 if (!phba->sli.ring) 4559 return -ENOMEM; 4560 4561 /* 4562 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4563 * used to create the sg_dma_buf_pool must be dynamically calculated. 4564 * 2 segments are added since the IOCB needs a command and response bde. 4565 */ 4566 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4567 sizeof(struct fcp_rsp) + 4568 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 4569 4570 if (phba->cfg_enable_bg) { 4571 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 4572 phba->cfg_sg_dma_buf_size += 4573 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 4574 } 4575 4576 /* Also reinitialize the host templates with new values. */ 4577 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4578 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4579 4580 phba->max_vpi = LPFC_MAX_VPI; 4581 /* This will be set to correct value after config_port mbox */ 4582 phba->max_vports = 0; 4583 4584 /* 4585 * Initialize the SLI Layer to run with lpfc HBAs. 4586 */ 4587 lpfc_sli_setup(phba); 4588 lpfc_sli_queue_setup(phba); 4589 4590 /* Allocate device driver memory */ 4591 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 4592 return -ENOMEM; 4593 4594 /* 4595 * Enable sr-iov virtual functions if supported and configured 4596 * through the module parameter. 4597 */ 4598 if (phba->cfg_sriov_nr_virtfn > 0) { 4599 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 4600 phba->cfg_sriov_nr_virtfn); 4601 if (rc) { 4602 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4603 "2808 Requested number of SR-IOV " 4604 "virtual functions (%d) is not " 4605 "supported\n", 4606 phba->cfg_sriov_nr_virtfn); 4607 phba->cfg_sriov_nr_virtfn = 0; 4608 } 4609 } 4610 4611 return 0; 4612 } 4613 4614 /** 4615 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 4616 * @phba: pointer to lpfc hba data structure. 4617 * 4618 * This routine is invoked to unset the driver internal resources set up 4619 * specific for supporting the SLI-3 HBA device it attached to. 4620 **/ 4621 static void 4622 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 4623 { 4624 /* Free device driver memory allocated */ 4625 lpfc_mem_free_all(phba); 4626 4627 return; 4628 } 4629 4630 /** 4631 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 4632 * @phba: pointer to lpfc hba data structure. 4633 * 4634 * This routine is invoked to set up the driver internal resources specific to 4635 * support the SLI-4 HBA device it attached to. 4636 * 4637 * Return codes 4638 * 0 - successful 4639 * other values - error 4640 **/ 4641 static int 4642 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4643 { 4644 struct lpfc_sli *psli; 4645 LPFC_MBOXQ_t *mboxq; 4646 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 4647 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4648 struct lpfc_mqe *mqe; 4649 int longs, sli_family; 4650 int sges_per_segment; 4651 4652 /* Before proceed, wait for POST done and device ready */ 4653 rc = lpfc_sli4_post_status_check(phba); 4654 if (rc) 4655 return -ENODEV; 4656 4657 /* 4658 * Initialize timers used by driver 4659 */ 4660 4661 /* Heartbeat timer */ 4662 init_timer(&phba->hb_tmofunc); 4663 phba->hb_tmofunc.function = lpfc_hb_timeout; 4664 phba->hb_tmofunc.data = (unsigned long)phba; 4665 init_timer(&phba->rrq_tmr); 4666 phba->rrq_tmr.function = lpfc_rrq_timeout; 4667 phba->rrq_tmr.data = (unsigned long)phba; 4668 4669 psli = &phba->sli; 4670 /* MBOX heartbeat timer */ 4671 init_timer(&psli->mbox_tmo); 4672 psli->mbox_tmo.function = lpfc_mbox_timeout; 4673 psli->mbox_tmo.data = (unsigned long) phba; 4674 /* Fabric block timer */ 4675 init_timer(&phba->fabric_block_timer); 4676 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4677 phba->fabric_block_timer.data = (unsigned long) phba; 4678 /* EA polling mode timer */ 4679 init_timer(&phba->eratt_poll); 4680 phba->eratt_poll.function = lpfc_poll_eratt; 4681 phba->eratt_poll.data = (unsigned long) phba; 4682 /* FCF rediscover timer */ 4683 init_timer(&phba->fcf.redisc_wait); 4684 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 4685 phba->fcf.redisc_wait.data = (unsigned long)phba; 4686 4687 /* 4688 * Control structure for handling external multi-buffer mailbox 4689 * command pass-through. 4690 */ 4691 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 4692 sizeof(struct lpfc_mbox_ext_buf_ctx)); 4693 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4694 4695 /* 4696 * We need to do a READ_CONFIG mailbox command here before 4697 * calling lpfc_get_cfgparam. For VFs this will report the 4698 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4699 * All of the resources allocated 4700 * for this Port are tied to these values. 4701 */ 4702 /* Get all the module params for configuring this host */ 4703 lpfc_get_cfgparam(phba); 4704 phba->max_vpi = LPFC_MAX_VPI; 4705 4706 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */ 4707 phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count; 4708 4709 /* This will be set to correct value after the read_config mbox */ 4710 phba->max_vports = 0; 4711 4712 /* Program the default value of vlan_id and fc_map */ 4713 phba->valid_vlan = 0; 4714 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4715 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4716 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4717 4718 /* With BlockGuard we can have multiple SGEs per Data Segemnt */ 4719 sges_per_segment = 1; 4720 if (phba->cfg_enable_bg) 4721 sges_per_segment = 2; 4722 4723 /* 4724 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 4725 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. 4726 */ 4727 if (!phba->sli.ring) 4728 phba->sli.ring = kzalloc( 4729 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) * 4730 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 4731 if (!phba->sli.ring) 4732 return -ENOMEM; 4733 /* 4734 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4735 * used to create the sg_dma_buf_pool must be dynamically calculated. 4736 * 2 segments are added since the IOCB needs a command and response bde. 4737 * To insure that the scsi sgl does not cross a 4k page boundary only 4738 * sgl sizes of must be a power of 2. 4739 */ 4740 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4741 (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) * 4742 sizeof(struct sli4_sge))); 4743 4744 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 4745 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4746 switch (sli_family) { 4747 case LPFC_SLI_INTF_FAMILY_BE2: 4748 case LPFC_SLI_INTF_FAMILY_BE3: 4749 /* There is a single hint for BE - 2 pages per BPL. */ 4750 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == 4751 LPFC_SLI_INTF_SLI_HINT1_1) 4752 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4753 break; 4754 case LPFC_SLI_INTF_FAMILY_LNCR_A0: 4755 case LPFC_SLI_INTF_FAMILY_LNCR_B0: 4756 default: 4757 break; 4758 } 4759 4760 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 4761 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 4762 dma_buf_size = dma_buf_size << 1) 4763 ; 4764 if (dma_buf_size == max_buf_size) 4765 phba->cfg_sg_seg_cnt = (dma_buf_size - 4766 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 4767 (2 * sizeof(struct sli4_sge))) / 4768 sizeof(struct sli4_sge); 4769 phba->cfg_sg_dma_buf_size = dma_buf_size; 4770 4771 /* Initialize buffer queue management fields */ 4772 hbq_count = lpfc_sli_hbq_count(); 4773 for (i = 0; i < hbq_count; ++i) 4774 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4775 INIT_LIST_HEAD(&phba->rb_pend_list); 4776 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 4777 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 4778 4779 /* 4780 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 4781 */ 4782 /* Initialize the Abort scsi buffer list used by driver */ 4783 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 4784 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 4785 /* This abort list used by worker thread */ 4786 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4787 4788 /* 4789 * Initialize driver internal slow-path work queues 4790 */ 4791 4792 /* Driver internel slow-path CQ Event pool */ 4793 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 4794 /* Response IOCB work queue list */ 4795 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 4796 /* Asynchronous event CQ Event work queue list */ 4797 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 4798 /* Fast-path XRI aborted CQ Event work queue list */ 4799 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 4800 /* Slow-path XRI aborted CQ Event work queue list */ 4801 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 4802 /* Receive queue CQ Event work queue list */ 4803 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4804 4805 /* Initialize extent block lists. */ 4806 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 4807 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 4808 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 4809 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 4810 4811 /* Initialize the driver internal SLI layer lists. */ 4812 lpfc_sli_setup(phba); 4813 lpfc_sli_queue_setup(phba); 4814 4815 /* Allocate device driver memory */ 4816 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 4817 if (rc) 4818 return -ENOMEM; 4819 4820 /* IF Type 2 ports get initialized now. */ 4821 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4822 LPFC_SLI_INTF_IF_TYPE_2) { 4823 rc = lpfc_pci_function_reset(phba); 4824 if (unlikely(rc)) 4825 return -ENODEV; 4826 } 4827 4828 /* Create the bootstrap mailbox command */ 4829 rc = lpfc_create_bootstrap_mbox(phba); 4830 if (unlikely(rc)) 4831 goto out_free_mem; 4832 4833 /* Set up the host's endian order with the device. */ 4834 rc = lpfc_setup_endian_order(phba); 4835 if (unlikely(rc)) 4836 goto out_free_bsmbx; 4837 4838 /* Set up the hba's configuration parameters. */ 4839 rc = lpfc_sli4_read_config(phba); 4840 if (unlikely(rc)) 4841 goto out_free_bsmbx; 4842 4843 /* IF Type 0 ports get initialized now. */ 4844 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4845 LPFC_SLI_INTF_IF_TYPE_0) { 4846 rc = lpfc_pci_function_reset(phba); 4847 if (unlikely(rc)) 4848 goto out_free_bsmbx; 4849 } 4850 4851 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4852 GFP_KERNEL); 4853 if (!mboxq) { 4854 rc = -ENOMEM; 4855 goto out_free_bsmbx; 4856 } 4857 4858 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 4859 lpfc_supported_pages(mboxq); 4860 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4861 if (!rc) { 4862 mqe = &mboxq->u.mqe; 4863 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 4864 LPFC_MAX_SUPPORTED_PAGES); 4865 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 4866 switch (pn_page[i]) { 4867 case LPFC_SLI4_PARAMETERS: 4868 phba->sli4_hba.pc_sli4_params.supported = 1; 4869 break; 4870 default: 4871 break; 4872 } 4873 } 4874 /* Read the port's SLI4 Parameters capabilities if supported. */ 4875 if (phba->sli4_hba.pc_sli4_params.supported) 4876 rc = lpfc_pc_sli4_params_get(phba, mboxq); 4877 if (rc) { 4878 mempool_free(mboxq, phba->mbox_mem_pool); 4879 rc = -EIO; 4880 goto out_free_bsmbx; 4881 } 4882 } 4883 /* 4884 * Get sli4 parameters that override parameters from Port capabilities. 4885 * If this call fails, it isn't critical unless the SLI4 parameters come 4886 * back in conflict. 4887 */ 4888 rc = lpfc_get_sli4_parameters(phba, mboxq); 4889 if (rc) { 4890 if (phba->sli4_hba.extents_in_use && 4891 phba->sli4_hba.rpi_hdrs_in_use) { 4892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4893 "2999 Unsupported SLI4 Parameters " 4894 "Extents and RPI headers enabled.\n"); 4895 goto out_free_bsmbx; 4896 } 4897 } 4898 mempool_free(mboxq, phba->mbox_mem_pool); 4899 /* Verify all the SLI4 queues */ 4900 rc = lpfc_sli4_queue_verify(phba); 4901 if (rc) 4902 goto out_free_bsmbx; 4903 4904 /* Create driver internal CQE event pool */ 4905 rc = lpfc_sli4_cq_event_pool_create(phba); 4906 if (rc) 4907 goto out_free_bsmbx; 4908 4909 /* Initialize sgl lists per host */ 4910 lpfc_init_sgl_list(phba); 4911 4912 /* Allocate and initialize active sgl array */ 4913 rc = lpfc_init_active_sgl_array(phba); 4914 if (rc) { 4915 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4916 "1430 Failed to initialize sgl list.\n"); 4917 goto out_destroy_cq_event_pool; 4918 } 4919 rc = lpfc_sli4_init_rpi_hdrs(phba); 4920 if (rc) { 4921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4922 "1432 Failed to initialize rpi headers.\n"); 4923 goto out_free_active_sgl; 4924 } 4925 4926 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 4927 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4928 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4929 GFP_KERNEL); 4930 if (!phba->fcf.fcf_rr_bmask) { 4931 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4932 "2759 Failed allocate memory for FCF round " 4933 "robin failover bmask\n"); 4934 rc = -ENOMEM; 4935 goto out_remove_rpi_hdrs; 4936 } 4937 4938 phba->sli4_hba.fcp_eq_hdl = 4939 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4940 phba->cfg_fcp_io_channel), GFP_KERNEL); 4941 if (!phba->sli4_hba.fcp_eq_hdl) { 4942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4943 "2572 Failed allocate memory for " 4944 "fast-path per-EQ handle array\n"); 4945 rc = -ENOMEM; 4946 goto out_free_fcf_rr_bmask; 4947 } 4948 4949 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4950 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 4951 if (!phba->sli4_hba.msix_entries) { 4952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4953 "2573 Failed allocate memory for msi-x " 4954 "interrupt vector entries\n"); 4955 rc = -ENOMEM; 4956 goto out_free_fcp_eq_hdl; 4957 } 4958 4959 /* 4960 * Enable sr-iov virtual functions if supported and configured 4961 * through the module parameter. 4962 */ 4963 if (phba->cfg_sriov_nr_virtfn > 0) { 4964 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 4965 phba->cfg_sriov_nr_virtfn); 4966 if (rc) { 4967 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4968 "3020 Requested number of SR-IOV " 4969 "virtual functions (%d) is not " 4970 "supported\n", 4971 phba->cfg_sriov_nr_virtfn); 4972 phba->cfg_sriov_nr_virtfn = 0; 4973 } 4974 } 4975 4976 return 0; 4977 4978 out_free_fcp_eq_hdl: 4979 kfree(phba->sli4_hba.fcp_eq_hdl); 4980 out_free_fcf_rr_bmask: 4981 kfree(phba->fcf.fcf_rr_bmask); 4982 out_remove_rpi_hdrs: 4983 lpfc_sli4_remove_rpi_hdrs(phba); 4984 out_free_active_sgl: 4985 lpfc_free_active_sgl(phba); 4986 out_destroy_cq_event_pool: 4987 lpfc_sli4_cq_event_pool_destroy(phba); 4988 out_free_bsmbx: 4989 lpfc_destroy_bootstrap_mbox(phba); 4990 out_free_mem: 4991 lpfc_mem_free(phba); 4992 return rc; 4993 } 4994 4995 /** 4996 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 4997 * @phba: pointer to lpfc hba data structure. 4998 * 4999 * This routine is invoked to unset the driver internal resources set up 5000 * specific for supporting the SLI-4 HBA device it attached to. 5001 **/ 5002 static void 5003 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 5004 { 5005 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 5006 5007 /* Free memory allocated for msi-x interrupt vector entries */ 5008 kfree(phba->sli4_hba.msix_entries); 5009 5010 /* Free memory allocated for fast-path work queue handles */ 5011 kfree(phba->sli4_hba.fcp_eq_hdl); 5012 5013 /* Free the allocated rpi headers. */ 5014 lpfc_sli4_remove_rpi_hdrs(phba); 5015 lpfc_sli4_remove_rpis(phba); 5016 5017 /* Free eligible FCF index bmask */ 5018 kfree(phba->fcf.fcf_rr_bmask); 5019 5020 /* Free the ELS sgl list */ 5021 lpfc_free_active_sgl(phba); 5022 lpfc_free_els_sgl_list(phba); 5023 5024 /* Free the completion queue EQ event pool */ 5025 lpfc_sli4_cq_event_release_all(phba); 5026 lpfc_sli4_cq_event_pool_destroy(phba); 5027 5028 /* Release resource identifiers. */ 5029 lpfc_sli4_dealloc_resource_identifiers(phba); 5030 5031 /* Free the bsmbx region. */ 5032 lpfc_destroy_bootstrap_mbox(phba); 5033 5034 /* Free the SLI Layer memory with SLI4 HBAs */ 5035 lpfc_mem_free_all(phba); 5036 5037 /* Free the current connect table */ 5038 list_for_each_entry_safe(conn_entry, next_conn_entry, 5039 &phba->fcf_conn_rec_list, list) { 5040 list_del_init(&conn_entry->list); 5041 kfree(conn_entry); 5042 } 5043 5044 return; 5045 } 5046 5047 /** 5048 * lpfc_init_api_table_setup - Set up init api function jump table 5049 * @phba: The hba struct for which this call is being executed. 5050 * @dev_grp: The HBA PCI-Device group number. 5051 * 5052 * This routine sets up the device INIT interface API function jump table 5053 * in @phba struct. 5054 * 5055 * Returns: 0 - success, -ENODEV - failure. 5056 **/ 5057 int 5058 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5059 { 5060 phba->lpfc_hba_init_link = lpfc_hba_init_link; 5061 phba->lpfc_hba_down_link = lpfc_hba_down_link; 5062 phba->lpfc_selective_reset = lpfc_selective_reset; 5063 switch (dev_grp) { 5064 case LPFC_PCI_DEV_LP: 5065 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 5066 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 5067 phba->lpfc_stop_port = lpfc_stop_port_s3; 5068 break; 5069 case LPFC_PCI_DEV_OC: 5070 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 5071 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 5072 phba->lpfc_stop_port = lpfc_stop_port_s4; 5073 break; 5074 default: 5075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5076 "1431 Invalid HBA PCI-device group: 0x%x\n", 5077 dev_grp); 5078 return -ENODEV; 5079 break; 5080 } 5081 return 0; 5082 } 5083 5084 /** 5085 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 5086 * @phba: pointer to lpfc hba data structure. 5087 * 5088 * This routine is invoked to set up the driver internal resources before the 5089 * device specific resource setup to support the HBA device it attached to. 5090 * 5091 * Return codes 5092 * 0 - successful 5093 * other values - error 5094 **/ 5095 static int 5096 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 5097 { 5098 /* 5099 * Driver resources common to all SLI revisions 5100 */ 5101 atomic_set(&phba->fast_event_count, 0); 5102 spin_lock_init(&phba->hbalock); 5103 5104 /* Initialize ndlp management spinlock */ 5105 spin_lock_init(&phba->ndlp_lock); 5106 5107 INIT_LIST_HEAD(&phba->port_list); 5108 INIT_LIST_HEAD(&phba->work_list); 5109 init_waitqueue_head(&phba->wait_4_mlo_m_q); 5110 5111 /* Initialize the wait queue head for the kernel thread */ 5112 init_waitqueue_head(&phba->work_waitq); 5113 5114 /* Initialize the scsi buffer list used by driver for scsi IO */ 5115 spin_lock_init(&phba->scsi_buf_list_lock); 5116 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 5117 5118 /* Initialize the fabric iocb list */ 5119 INIT_LIST_HEAD(&phba->fabric_iocb_list); 5120 5121 /* Initialize list to save ELS buffers */ 5122 INIT_LIST_HEAD(&phba->elsbuf); 5123 5124 /* Initialize FCF connection rec list */ 5125 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 5126 5127 return 0; 5128 } 5129 5130 /** 5131 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 5132 * @phba: pointer to lpfc hba data structure. 5133 * 5134 * This routine is invoked to set up the driver internal resources after the 5135 * device specific resource setup to support the HBA device it attached to. 5136 * 5137 * Return codes 5138 * 0 - successful 5139 * other values - error 5140 **/ 5141 static int 5142 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 5143 { 5144 int error; 5145 5146 /* Startup the kernel thread for this host adapter. */ 5147 phba->worker_thread = kthread_run(lpfc_do_work, phba, 5148 "lpfc_worker_%d", phba->brd_no); 5149 if (IS_ERR(phba->worker_thread)) { 5150 error = PTR_ERR(phba->worker_thread); 5151 return error; 5152 } 5153 5154 return 0; 5155 } 5156 5157 /** 5158 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 5159 * @phba: pointer to lpfc hba data structure. 5160 * 5161 * This routine is invoked to unset the driver internal resources set up after 5162 * the device specific resource setup for supporting the HBA device it 5163 * attached to. 5164 **/ 5165 static void 5166 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 5167 { 5168 /* Stop kernel worker thread */ 5169 kthread_stop(phba->worker_thread); 5170 } 5171 5172 /** 5173 * lpfc_free_iocb_list - Free iocb list. 5174 * @phba: pointer to lpfc hba data structure. 5175 * 5176 * This routine is invoked to free the driver's IOCB list and memory. 5177 **/ 5178 static void 5179 lpfc_free_iocb_list(struct lpfc_hba *phba) 5180 { 5181 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 5182 5183 spin_lock_irq(&phba->hbalock); 5184 list_for_each_entry_safe(iocbq_entry, iocbq_next, 5185 &phba->lpfc_iocb_list, list) { 5186 list_del(&iocbq_entry->list); 5187 kfree(iocbq_entry); 5188 phba->total_iocbq_bufs--; 5189 } 5190 spin_unlock_irq(&phba->hbalock); 5191 5192 return; 5193 } 5194 5195 /** 5196 * lpfc_init_iocb_list - Allocate and initialize iocb list. 5197 * @phba: pointer to lpfc hba data structure. 5198 * 5199 * This routine is invoked to allocate and initizlize the driver's IOCB 5200 * list and set up the IOCB tag array accordingly. 5201 * 5202 * Return codes 5203 * 0 - successful 5204 * other values - error 5205 **/ 5206 static int 5207 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 5208 { 5209 struct lpfc_iocbq *iocbq_entry = NULL; 5210 uint16_t iotag; 5211 int i; 5212 5213 /* Initialize and populate the iocb list per host. */ 5214 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 5215 for (i = 0; i < iocb_count; i++) { 5216 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 5217 if (iocbq_entry == NULL) { 5218 printk(KERN_ERR "%s: only allocated %d iocbs of " 5219 "expected %d count. Unloading driver.\n", 5220 __func__, i, LPFC_IOCB_LIST_CNT); 5221 goto out_free_iocbq; 5222 } 5223 5224 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 5225 if (iotag == 0) { 5226 kfree(iocbq_entry); 5227 printk(KERN_ERR "%s: failed to allocate IOTAG. " 5228 "Unloading driver.\n", __func__); 5229 goto out_free_iocbq; 5230 } 5231 iocbq_entry->sli4_lxritag = NO_XRI; 5232 iocbq_entry->sli4_xritag = NO_XRI; 5233 5234 spin_lock_irq(&phba->hbalock); 5235 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 5236 phba->total_iocbq_bufs++; 5237 spin_unlock_irq(&phba->hbalock); 5238 } 5239 5240 return 0; 5241 5242 out_free_iocbq: 5243 lpfc_free_iocb_list(phba); 5244 5245 return -ENOMEM; 5246 } 5247 5248 /** 5249 * lpfc_free_sgl_list - Free a given sgl list. 5250 * @phba: pointer to lpfc hba data structure. 5251 * @sglq_list: pointer to the head of sgl list. 5252 * 5253 * This routine is invoked to free a give sgl list and memory. 5254 **/ 5255 void 5256 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 5257 { 5258 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 5259 5260 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 5261 list_del(&sglq_entry->list); 5262 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 5263 kfree(sglq_entry); 5264 } 5265 } 5266 5267 /** 5268 * lpfc_free_els_sgl_list - Free els sgl list. 5269 * @phba: pointer to lpfc hba data structure. 5270 * 5271 * This routine is invoked to free the driver's els sgl list and memory. 5272 **/ 5273 static void 5274 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 5275 { 5276 LIST_HEAD(sglq_list); 5277 5278 /* Retrieve all els sgls from driver list */ 5279 spin_lock_irq(&phba->hbalock); 5280 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 5281 spin_unlock_irq(&phba->hbalock); 5282 5283 /* Now free the sgl list */ 5284 lpfc_free_sgl_list(phba, &sglq_list); 5285 } 5286 5287 /** 5288 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 5289 * @phba: pointer to lpfc hba data structure. 5290 * 5291 * This routine is invoked to allocate the driver's active sgl memory. 5292 * This array will hold the sglq_entry's for active IOs. 5293 **/ 5294 static int 5295 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 5296 { 5297 int size; 5298 size = sizeof(struct lpfc_sglq *); 5299 size *= phba->sli4_hba.max_cfg_param.max_xri; 5300 5301 phba->sli4_hba.lpfc_sglq_active_list = 5302 kzalloc(size, GFP_KERNEL); 5303 if (!phba->sli4_hba.lpfc_sglq_active_list) 5304 return -ENOMEM; 5305 return 0; 5306 } 5307 5308 /** 5309 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 5310 * @phba: pointer to lpfc hba data structure. 5311 * 5312 * This routine is invoked to walk through the array of active sglq entries 5313 * and free all of the resources. 5314 * This is just a place holder for now. 5315 **/ 5316 static void 5317 lpfc_free_active_sgl(struct lpfc_hba *phba) 5318 { 5319 kfree(phba->sli4_hba.lpfc_sglq_active_list); 5320 } 5321 5322 /** 5323 * lpfc_init_sgl_list - Allocate and initialize sgl list. 5324 * @phba: pointer to lpfc hba data structure. 5325 * 5326 * This routine is invoked to allocate and initizlize the driver's sgl 5327 * list and set up the sgl xritag tag array accordingly. 5328 * 5329 **/ 5330 static void 5331 lpfc_init_sgl_list(struct lpfc_hba *phba) 5332 { 5333 /* Initialize and populate the sglq list per host/VF. */ 5334 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 5335 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 5336 5337 /* els xri-sgl book keeping */ 5338 phba->sli4_hba.els_xri_cnt = 0; 5339 5340 /* scsi xri-buffer book keeping */ 5341 phba->sli4_hba.scsi_xri_cnt = 0; 5342 } 5343 5344 /** 5345 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 5346 * @phba: pointer to lpfc hba data structure. 5347 * 5348 * This routine is invoked to post rpi header templates to the 5349 * port for those SLI4 ports that do not support extents. This routine 5350 * posts a PAGE_SIZE memory region to the port to hold up to 5351 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 5352 * and should be called only when interrupts are disabled. 5353 * 5354 * Return codes 5355 * 0 - successful 5356 * -ERROR - otherwise. 5357 **/ 5358 int 5359 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 5360 { 5361 int rc = 0; 5362 struct lpfc_rpi_hdr *rpi_hdr; 5363 5364 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 5365 if (!phba->sli4_hba.rpi_hdrs_in_use) 5366 return rc; 5367 if (phba->sli4_hba.extents_in_use) 5368 return -EIO; 5369 5370 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 5371 if (!rpi_hdr) { 5372 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5373 "0391 Error during rpi post operation\n"); 5374 lpfc_sli4_remove_rpis(phba); 5375 rc = -ENODEV; 5376 } 5377 5378 return rc; 5379 } 5380 5381 /** 5382 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 5383 * @phba: pointer to lpfc hba data structure. 5384 * 5385 * This routine is invoked to allocate a single 4KB memory region to 5386 * support rpis and stores them in the phba. This single region 5387 * provides support for up to 64 rpis. The region is used globally 5388 * by the device. 5389 * 5390 * Returns: 5391 * A valid rpi hdr on success. 5392 * A NULL pointer on any failure. 5393 **/ 5394 struct lpfc_rpi_hdr * 5395 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 5396 { 5397 uint16_t rpi_limit, curr_rpi_range; 5398 struct lpfc_dmabuf *dmabuf; 5399 struct lpfc_rpi_hdr *rpi_hdr; 5400 uint32_t rpi_count; 5401 5402 /* 5403 * If the SLI4 port supports extents, posting the rpi header isn't 5404 * required. Set the expected maximum count and let the actual value 5405 * get set when extents are fully allocated. 5406 */ 5407 if (!phba->sli4_hba.rpi_hdrs_in_use) 5408 return NULL; 5409 if (phba->sli4_hba.extents_in_use) 5410 return NULL; 5411 5412 /* The limit on the logical index is just the max_rpi count. */ 5413 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 5414 phba->sli4_hba.max_cfg_param.max_rpi - 1; 5415 5416 spin_lock_irq(&phba->hbalock); 5417 /* 5418 * Establish the starting RPI in this header block. The starting 5419 * rpi is normalized to a zero base because the physical rpi is 5420 * port based. 5421 */ 5422 curr_rpi_range = phba->sli4_hba.next_rpi; 5423 spin_unlock_irq(&phba->hbalock); 5424 5425 /* 5426 * The port has a limited number of rpis. The increment here 5427 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 5428 * and to allow the full max_rpi range per port. 5429 */ 5430 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 5431 rpi_count = rpi_limit - curr_rpi_range; 5432 else 5433 rpi_count = LPFC_RPI_HDR_COUNT; 5434 5435 if (!rpi_count) 5436 return NULL; 5437 /* 5438 * First allocate the protocol header region for the port. The 5439 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 5440 */ 5441 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5442 if (!dmabuf) 5443 return NULL; 5444 5445 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5446 LPFC_HDR_TEMPLATE_SIZE, 5447 &dmabuf->phys, 5448 GFP_KERNEL); 5449 if (!dmabuf->virt) { 5450 rpi_hdr = NULL; 5451 goto err_free_dmabuf; 5452 } 5453 5454 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 5455 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 5456 rpi_hdr = NULL; 5457 goto err_free_coherent; 5458 } 5459 5460 /* Save the rpi header data for cleanup later. */ 5461 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 5462 if (!rpi_hdr) 5463 goto err_free_coherent; 5464 5465 rpi_hdr->dmabuf = dmabuf; 5466 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 5467 rpi_hdr->page_count = 1; 5468 spin_lock_irq(&phba->hbalock); 5469 5470 /* The rpi_hdr stores the logical index only. */ 5471 rpi_hdr->start_rpi = curr_rpi_range; 5472 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 5473 5474 /* 5475 * The next_rpi stores the next logical module-64 rpi value used 5476 * to post physical rpis in subsequent rpi postings. 5477 */ 5478 phba->sli4_hba.next_rpi += rpi_count; 5479 spin_unlock_irq(&phba->hbalock); 5480 return rpi_hdr; 5481 5482 err_free_coherent: 5483 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 5484 dmabuf->virt, dmabuf->phys); 5485 err_free_dmabuf: 5486 kfree(dmabuf); 5487 return NULL; 5488 } 5489 5490 /** 5491 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 5492 * @phba: pointer to lpfc hba data structure. 5493 * 5494 * This routine is invoked to remove all memory resources allocated 5495 * to support rpis for SLI4 ports not supporting extents. This routine 5496 * presumes the caller has released all rpis consumed by fabric or port 5497 * logins and is prepared to have the header pages removed. 5498 **/ 5499 void 5500 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 5501 { 5502 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 5503 5504 if (!phba->sli4_hba.rpi_hdrs_in_use) 5505 goto exit; 5506 5507 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 5508 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 5509 list_del(&rpi_hdr->list); 5510 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 5511 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 5512 kfree(rpi_hdr->dmabuf); 5513 kfree(rpi_hdr); 5514 } 5515 exit: 5516 /* There are no rpis available to the port now. */ 5517 phba->sli4_hba.next_rpi = 0; 5518 } 5519 5520 /** 5521 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 5522 * @pdev: pointer to pci device data structure. 5523 * 5524 * This routine is invoked to allocate the driver hba data structure for an 5525 * HBA device. If the allocation is successful, the phba reference to the 5526 * PCI device data structure is set. 5527 * 5528 * Return codes 5529 * pointer to @phba - successful 5530 * NULL - error 5531 **/ 5532 static struct lpfc_hba * 5533 lpfc_hba_alloc(struct pci_dev *pdev) 5534 { 5535 struct lpfc_hba *phba; 5536 5537 /* Allocate memory for HBA structure */ 5538 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 5539 if (!phba) { 5540 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 5541 return NULL; 5542 } 5543 5544 /* Set reference to PCI device in HBA structure */ 5545 phba->pcidev = pdev; 5546 5547 /* Assign an unused board number */ 5548 phba->brd_no = lpfc_get_instance(); 5549 if (phba->brd_no < 0) { 5550 kfree(phba); 5551 return NULL; 5552 } 5553 5554 spin_lock_init(&phba->ct_ev_lock); 5555 INIT_LIST_HEAD(&phba->ct_ev_waiters); 5556 5557 return phba; 5558 } 5559 5560 /** 5561 * lpfc_hba_free - Free driver hba data structure with a device. 5562 * @phba: pointer to lpfc hba data structure. 5563 * 5564 * This routine is invoked to free the driver hba data structure with an 5565 * HBA device. 5566 **/ 5567 static void 5568 lpfc_hba_free(struct lpfc_hba *phba) 5569 { 5570 /* Release the driver assigned board number */ 5571 idr_remove(&lpfc_hba_index, phba->brd_no); 5572 5573 /* Free memory allocated with sli rings */ 5574 kfree(phba->sli.ring); 5575 phba->sli.ring = NULL; 5576 5577 kfree(phba); 5578 return; 5579 } 5580 5581 /** 5582 * lpfc_create_shost - Create hba physical port with associated scsi host. 5583 * @phba: pointer to lpfc hba data structure. 5584 * 5585 * This routine is invoked to create HBA physical port and associate a SCSI 5586 * host with it. 5587 * 5588 * Return codes 5589 * 0 - successful 5590 * other values - error 5591 **/ 5592 static int 5593 lpfc_create_shost(struct lpfc_hba *phba) 5594 { 5595 struct lpfc_vport *vport; 5596 struct Scsi_Host *shost; 5597 5598 /* Initialize HBA FC structure */ 5599 phba->fc_edtov = FF_DEF_EDTOV; 5600 phba->fc_ratov = FF_DEF_RATOV; 5601 phba->fc_altov = FF_DEF_ALTOV; 5602 phba->fc_arbtov = FF_DEF_ARBTOV; 5603 5604 atomic_set(&phba->sdev_cnt, 0); 5605 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 5606 if (!vport) 5607 return -ENODEV; 5608 5609 shost = lpfc_shost_from_vport(vport); 5610 phba->pport = vport; 5611 lpfc_debugfs_initialize(vport); 5612 /* Put reference to SCSI host to driver's device private data */ 5613 pci_set_drvdata(phba->pcidev, shost); 5614 5615 return 0; 5616 } 5617 5618 /** 5619 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 5620 * @phba: pointer to lpfc hba data structure. 5621 * 5622 * This routine is invoked to destroy HBA physical port and the associated 5623 * SCSI host. 5624 **/ 5625 static void 5626 lpfc_destroy_shost(struct lpfc_hba *phba) 5627 { 5628 struct lpfc_vport *vport = phba->pport; 5629 5630 /* Destroy physical port that associated with the SCSI host */ 5631 destroy_port(vport); 5632 5633 return; 5634 } 5635 5636 /** 5637 * lpfc_setup_bg - Setup Block guard structures and debug areas. 5638 * @phba: pointer to lpfc hba data structure. 5639 * @shost: the shost to be used to detect Block guard settings. 5640 * 5641 * This routine sets up the local Block guard protocol settings for @shost. 5642 * This routine also allocates memory for debugging bg buffers. 5643 **/ 5644 static void 5645 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 5646 { 5647 uint32_t old_mask; 5648 uint32_t old_guard; 5649 5650 int pagecnt = 10; 5651 if (lpfc_prot_mask && lpfc_prot_guard) { 5652 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5653 "1478 Registering BlockGuard with the " 5654 "SCSI layer\n"); 5655 5656 old_mask = lpfc_prot_mask; 5657 old_guard = lpfc_prot_guard; 5658 5659 /* Only allow supported values */ 5660 lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 5661 SHOST_DIX_TYPE0_PROTECTION | 5662 SHOST_DIX_TYPE1_PROTECTION); 5663 lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC); 5664 5665 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 5666 if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 5667 lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 5668 5669 if (lpfc_prot_mask && lpfc_prot_guard) { 5670 if ((old_mask != lpfc_prot_mask) || 5671 (old_guard != lpfc_prot_guard)) 5672 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5673 "1475 Registering BlockGuard with the " 5674 "SCSI layer: mask %d guard %d\n", 5675 lpfc_prot_mask, lpfc_prot_guard); 5676 5677 scsi_host_set_prot(shost, lpfc_prot_mask); 5678 scsi_host_set_guard(shost, lpfc_prot_guard); 5679 } else 5680 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5681 "1479 Not Registering BlockGuard with the SCSI " 5682 "layer, Bad protection parameters: %d %d\n", 5683 old_mask, old_guard); 5684 } 5685 5686 if (!_dump_buf_data) { 5687 while (pagecnt) { 5688 spin_lock_init(&_dump_buf_lock); 5689 _dump_buf_data = 5690 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5691 if (_dump_buf_data) { 5692 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5693 "9043 BLKGRD: allocated %d pages for " 5694 "_dump_buf_data at 0x%p\n", 5695 (1 << pagecnt), _dump_buf_data); 5696 _dump_buf_data_order = pagecnt; 5697 memset(_dump_buf_data, 0, 5698 ((1 << PAGE_SHIFT) << pagecnt)); 5699 break; 5700 } else 5701 --pagecnt; 5702 } 5703 if (!_dump_buf_data_order) 5704 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5705 "9044 BLKGRD: ERROR unable to allocate " 5706 "memory for hexdump\n"); 5707 } else 5708 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5709 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 5710 "\n", _dump_buf_data); 5711 if (!_dump_buf_dif) { 5712 while (pagecnt) { 5713 _dump_buf_dif = 5714 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5715 if (_dump_buf_dif) { 5716 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5717 "9046 BLKGRD: allocated %d pages for " 5718 "_dump_buf_dif at 0x%p\n", 5719 (1 << pagecnt), _dump_buf_dif); 5720 _dump_buf_dif_order = pagecnt; 5721 memset(_dump_buf_dif, 0, 5722 ((1 << PAGE_SHIFT) << pagecnt)); 5723 break; 5724 } else 5725 --pagecnt; 5726 } 5727 if (!_dump_buf_dif_order) 5728 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5729 "9047 BLKGRD: ERROR unable to allocate " 5730 "memory for hexdump\n"); 5731 } else 5732 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5733 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 5734 _dump_buf_dif); 5735 } 5736 5737 /** 5738 * lpfc_post_init_setup - Perform necessary device post initialization setup. 5739 * @phba: pointer to lpfc hba data structure. 5740 * 5741 * This routine is invoked to perform all the necessary post initialization 5742 * setup for the device. 5743 **/ 5744 static void 5745 lpfc_post_init_setup(struct lpfc_hba *phba) 5746 { 5747 struct Scsi_Host *shost; 5748 struct lpfc_adapter_event_header adapter_event; 5749 5750 /* Get the default values for Model Name and Description */ 5751 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 5752 5753 /* 5754 * hba setup may have changed the hba_queue_depth so we need to 5755 * adjust the value of can_queue. 5756 */ 5757 shost = pci_get_drvdata(phba->pcidev); 5758 shost->can_queue = phba->cfg_hba_queue_depth - 10; 5759 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 5760 lpfc_setup_bg(phba, shost); 5761 5762 lpfc_host_attrib_init(shost); 5763 5764 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 5765 spin_lock_irq(shost->host_lock); 5766 lpfc_poll_start_timer(phba); 5767 spin_unlock_irq(shost->host_lock); 5768 } 5769 5770 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5771 "0428 Perform SCSI scan\n"); 5772 /* Send board arrival event to upper layer */ 5773 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 5774 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 5775 fc_host_post_vendor_event(shost, fc_get_event_number(), 5776 sizeof(adapter_event), 5777 (char *) &adapter_event, 5778 LPFC_NL_VENDOR_ID); 5779 return; 5780 } 5781 5782 /** 5783 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 5784 * @phba: pointer to lpfc hba data structure. 5785 * 5786 * This routine is invoked to set up the PCI device memory space for device 5787 * with SLI-3 interface spec. 5788 * 5789 * Return codes 5790 * 0 - successful 5791 * other values - error 5792 **/ 5793 static int 5794 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 5795 { 5796 struct pci_dev *pdev; 5797 unsigned long bar0map_len, bar2map_len; 5798 int i, hbq_count; 5799 void *ptr; 5800 int error = -ENODEV; 5801 5802 /* Obtain PCI device reference */ 5803 if (!phba->pcidev) 5804 return error; 5805 else 5806 pdev = phba->pcidev; 5807 5808 /* Set the device DMA mask size */ 5809 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 5810 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 5811 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 5812 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 5813 return error; 5814 } 5815 } 5816 5817 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5818 * required by each mapping. 5819 */ 5820 phba->pci_bar0_map = pci_resource_start(pdev, 0); 5821 bar0map_len = pci_resource_len(pdev, 0); 5822 5823 phba->pci_bar2_map = pci_resource_start(pdev, 2); 5824 bar2map_len = pci_resource_len(pdev, 2); 5825 5826 /* Map HBA SLIM to a kernel virtual address. */ 5827 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 5828 if (!phba->slim_memmap_p) { 5829 dev_printk(KERN_ERR, &pdev->dev, 5830 "ioremap failed for SLIM memory.\n"); 5831 goto out; 5832 } 5833 5834 /* Map HBA Control Registers to a kernel virtual address. */ 5835 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 5836 if (!phba->ctrl_regs_memmap_p) { 5837 dev_printk(KERN_ERR, &pdev->dev, 5838 "ioremap failed for HBA control registers.\n"); 5839 goto out_iounmap_slim; 5840 } 5841 5842 /* Allocate memory for SLI-2 structures */ 5843 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 5844 SLI2_SLIM_SIZE, 5845 &phba->slim2p.phys, 5846 GFP_KERNEL); 5847 if (!phba->slim2p.virt) 5848 goto out_iounmap; 5849 5850 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 5851 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 5852 phba->mbox_ext = (phba->slim2p.virt + 5853 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 5854 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 5855 phba->IOCBs = (phba->slim2p.virt + 5856 offsetof(struct lpfc_sli2_slim, IOCBs)); 5857 5858 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 5859 lpfc_sli_hbq_size(), 5860 &phba->hbqslimp.phys, 5861 GFP_KERNEL); 5862 if (!phba->hbqslimp.virt) 5863 goto out_free_slim; 5864 5865 hbq_count = lpfc_sli_hbq_count(); 5866 ptr = phba->hbqslimp.virt; 5867 for (i = 0; i < hbq_count; ++i) { 5868 phba->hbqs[i].hbq_virt = ptr; 5869 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5870 ptr += (lpfc_hbq_defs[i]->entry_count * 5871 sizeof(struct lpfc_hbq_entry)); 5872 } 5873 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 5874 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 5875 5876 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 5877 5878 INIT_LIST_HEAD(&phba->rb_pend_list); 5879 5880 phba->MBslimaddr = phba->slim_memmap_p; 5881 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 5882 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 5883 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 5884 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 5885 5886 return 0; 5887 5888 out_free_slim: 5889 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5890 phba->slim2p.virt, phba->slim2p.phys); 5891 out_iounmap: 5892 iounmap(phba->ctrl_regs_memmap_p); 5893 out_iounmap_slim: 5894 iounmap(phba->slim_memmap_p); 5895 out: 5896 return error; 5897 } 5898 5899 /** 5900 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 5901 * @phba: pointer to lpfc hba data structure. 5902 * 5903 * This routine is invoked to unset the PCI device memory space for device 5904 * with SLI-3 interface spec. 5905 **/ 5906 static void 5907 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 5908 { 5909 struct pci_dev *pdev; 5910 5911 /* Obtain PCI device reference */ 5912 if (!phba->pcidev) 5913 return; 5914 else 5915 pdev = phba->pcidev; 5916 5917 /* Free coherent DMA memory allocated */ 5918 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 5919 phba->hbqslimp.virt, phba->hbqslimp.phys); 5920 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5921 phba->slim2p.virt, phba->slim2p.phys); 5922 5923 /* I/O memory unmap */ 5924 iounmap(phba->ctrl_regs_memmap_p); 5925 iounmap(phba->slim_memmap_p); 5926 5927 return; 5928 } 5929 5930 /** 5931 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 5932 * @phba: pointer to lpfc hba data structure. 5933 * 5934 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 5935 * done and check status. 5936 * 5937 * Return 0 if successful, otherwise -ENODEV. 5938 **/ 5939 int 5940 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5941 { 5942 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 5943 struct lpfc_register reg_data; 5944 int i, port_error = 0; 5945 uint32_t if_type; 5946 5947 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 5948 memset(®_data, 0, sizeof(reg_data)); 5949 if (!phba->sli4_hba.PSMPHRregaddr) 5950 return -ENODEV; 5951 5952 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5953 for (i = 0; i < 3000; i++) { 5954 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 5955 &portsmphr_reg.word0) || 5956 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 5957 /* Port has a fatal POST error, break out */ 5958 port_error = -ENODEV; 5959 break; 5960 } 5961 if (LPFC_POST_STAGE_PORT_READY == 5962 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 5963 break; 5964 msleep(10); 5965 } 5966 5967 /* 5968 * If there was a port error during POST, then don't proceed with 5969 * other register reads as the data may not be valid. Just exit. 5970 */ 5971 if (port_error) { 5972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5973 "1408 Port Failed POST - portsmphr=0x%x, " 5974 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 5975 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 5976 portsmphr_reg.word0, 5977 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 5978 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 5979 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 5980 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 5981 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 5982 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 5983 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 5984 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 5985 } else { 5986 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5987 "2534 Device Info: SLIFamily=0x%x, " 5988 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 5989 "SLIHint_2=0x%x, FT=0x%x\n", 5990 bf_get(lpfc_sli_intf_sli_family, 5991 &phba->sli4_hba.sli_intf), 5992 bf_get(lpfc_sli_intf_slirev, 5993 &phba->sli4_hba.sli_intf), 5994 bf_get(lpfc_sli_intf_if_type, 5995 &phba->sli4_hba.sli_intf), 5996 bf_get(lpfc_sli_intf_sli_hint1, 5997 &phba->sli4_hba.sli_intf), 5998 bf_get(lpfc_sli_intf_sli_hint2, 5999 &phba->sli4_hba.sli_intf), 6000 bf_get(lpfc_sli_intf_func_type, 6001 &phba->sli4_hba.sli_intf)); 6002 /* 6003 * Check for other Port errors during the initialization 6004 * process. Fail the load if the port did not come up 6005 * correctly. 6006 */ 6007 if_type = bf_get(lpfc_sli_intf_if_type, 6008 &phba->sli4_hba.sli_intf); 6009 switch (if_type) { 6010 case LPFC_SLI_INTF_IF_TYPE_0: 6011 phba->sli4_hba.ue_mask_lo = 6012 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 6013 phba->sli4_hba.ue_mask_hi = 6014 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 6015 uerrlo_reg.word0 = 6016 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 6017 uerrhi_reg.word0 = 6018 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 6019 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 6020 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 6021 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6022 "1422 Unrecoverable Error " 6023 "Detected during POST " 6024 "uerr_lo_reg=0x%x, " 6025 "uerr_hi_reg=0x%x, " 6026 "ue_mask_lo_reg=0x%x, " 6027 "ue_mask_hi_reg=0x%x\n", 6028 uerrlo_reg.word0, 6029 uerrhi_reg.word0, 6030 phba->sli4_hba.ue_mask_lo, 6031 phba->sli4_hba.ue_mask_hi); 6032 port_error = -ENODEV; 6033 } 6034 break; 6035 case LPFC_SLI_INTF_IF_TYPE_2: 6036 /* Final checks. The port status should be clean. */ 6037 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 6038 ®_data.word0) || 6039 (bf_get(lpfc_sliport_status_err, ®_data) && 6040 !bf_get(lpfc_sliport_status_rn, ®_data))) { 6041 phba->work_status[0] = 6042 readl(phba->sli4_hba.u.if_type2. 6043 ERR1regaddr); 6044 phba->work_status[1] = 6045 readl(phba->sli4_hba.u.if_type2. 6046 ERR2regaddr); 6047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6048 "2888 Unrecoverable port error " 6049 "following POST: port status reg " 6050 "0x%x, port_smphr reg 0x%x, " 6051 "error 1=0x%x, error 2=0x%x\n", 6052 reg_data.word0, 6053 portsmphr_reg.word0, 6054 phba->work_status[0], 6055 phba->work_status[1]); 6056 port_error = -ENODEV; 6057 } 6058 break; 6059 case LPFC_SLI_INTF_IF_TYPE_1: 6060 default: 6061 break; 6062 } 6063 } 6064 return port_error; 6065 } 6066 6067 /** 6068 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 6069 * @phba: pointer to lpfc hba data structure. 6070 * @if_type: The SLI4 interface type getting configured. 6071 * 6072 * This routine is invoked to set up SLI4 BAR0 PCI config space register 6073 * memory map. 6074 **/ 6075 static void 6076 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 6077 { 6078 switch (if_type) { 6079 case LPFC_SLI_INTF_IF_TYPE_0: 6080 phba->sli4_hba.u.if_type0.UERRLOregaddr = 6081 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 6082 phba->sli4_hba.u.if_type0.UERRHIregaddr = 6083 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 6084 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 6085 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 6086 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 6087 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 6088 phba->sli4_hba.SLIINTFregaddr = 6089 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 6090 break; 6091 case LPFC_SLI_INTF_IF_TYPE_2: 6092 phba->sli4_hba.u.if_type2.ERR1regaddr = 6093 phba->sli4_hba.conf_regs_memmap_p + 6094 LPFC_CTL_PORT_ER1_OFFSET; 6095 phba->sli4_hba.u.if_type2.ERR2regaddr = 6096 phba->sli4_hba.conf_regs_memmap_p + 6097 LPFC_CTL_PORT_ER2_OFFSET; 6098 phba->sli4_hba.u.if_type2.CTRLregaddr = 6099 phba->sli4_hba.conf_regs_memmap_p + 6100 LPFC_CTL_PORT_CTL_OFFSET; 6101 phba->sli4_hba.u.if_type2.STATUSregaddr = 6102 phba->sli4_hba.conf_regs_memmap_p + 6103 LPFC_CTL_PORT_STA_OFFSET; 6104 phba->sli4_hba.SLIINTFregaddr = 6105 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 6106 phba->sli4_hba.PSMPHRregaddr = 6107 phba->sli4_hba.conf_regs_memmap_p + 6108 LPFC_CTL_PORT_SEM_OFFSET; 6109 phba->sli4_hba.RQDBregaddr = 6110 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; 6111 phba->sli4_hba.WQDBregaddr = 6112 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL; 6113 phba->sli4_hba.EQCQDBregaddr = 6114 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 6115 phba->sli4_hba.MQDBregaddr = 6116 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 6117 phba->sli4_hba.BMBXregaddr = 6118 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 6119 break; 6120 case LPFC_SLI_INTF_IF_TYPE_1: 6121 default: 6122 dev_printk(KERN_ERR, &phba->pcidev->dev, 6123 "FATAL - unsupported SLI4 interface type - %d\n", 6124 if_type); 6125 break; 6126 } 6127 } 6128 6129 /** 6130 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 6131 * @phba: pointer to lpfc hba data structure. 6132 * 6133 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 6134 * memory map. 6135 **/ 6136 static void 6137 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 6138 { 6139 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6140 LPFC_SLIPORT_IF0_SMPHR; 6141 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6142 LPFC_HST_ISR0; 6143 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6144 LPFC_HST_IMR0; 6145 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6146 LPFC_HST_ISCR0; 6147 } 6148 6149 /** 6150 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 6151 * @phba: pointer to lpfc hba data structure. 6152 * @vf: virtual function number 6153 * 6154 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 6155 * based on the given viftual function number, @vf. 6156 * 6157 * Return 0 if successful, otherwise -ENODEV. 6158 **/ 6159 static int 6160 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 6161 { 6162 if (vf > LPFC_VIR_FUNC_MAX) 6163 return -ENODEV; 6164 6165 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6166 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 6167 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6168 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 6169 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6170 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 6171 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6172 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 6173 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6174 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 6175 return 0; 6176 } 6177 6178 /** 6179 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 6180 * @phba: pointer to lpfc hba data structure. 6181 * 6182 * This routine is invoked to create the bootstrap mailbox 6183 * region consistent with the SLI-4 interface spec. This 6184 * routine allocates all memory necessary to communicate 6185 * mailbox commands to the port and sets up all alignment 6186 * needs. No locks are expected to be held when calling 6187 * this routine. 6188 * 6189 * Return codes 6190 * 0 - successful 6191 * -ENOMEM - could not allocated memory. 6192 **/ 6193 static int 6194 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 6195 { 6196 uint32_t bmbx_size; 6197 struct lpfc_dmabuf *dmabuf; 6198 struct dma_address *dma_address; 6199 uint32_t pa_addr; 6200 uint64_t phys_addr; 6201 6202 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6203 if (!dmabuf) 6204 return -ENOMEM; 6205 6206 /* 6207 * The bootstrap mailbox region is comprised of 2 parts 6208 * plus an alignment restriction of 16 bytes. 6209 */ 6210 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 6211 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6212 bmbx_size, 6213 &dmabuf->phys, 6214 GFP_KERNEL); 6215 if (!dmabuf->virt) { 6216 kfree(dmabuf); 6217 return -ENOMEM; 6218 } 6219 memset(dmabuf->virt, 0, bmbx_size); 6220 6221 /* 6222 * Initialize the bootstrap mailbox pointers now so that the register 6223 * operations are simple later. The mailbox dma address is required 6224 * to be 16-byte aligned. Also align the virtual memory as each 6225 * maibox is copied into the bmbx mailbox region before issuing the 6226 * command to the port. 6227 */ 6228 phba->sli4_hba.bmbx.dmabuf = dmabuf; 6229 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 6230 6231 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 6232 LPFC_ALIGN_16_BYTE); 6233 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 6234 LPFC_ALIGN_16_BYTE); 6235 6236 /* 6237 * Set the high and low physical addresses now. The SLI4 alignment 6238 * requirement is 16 bytes and the mailbox is posted to the port 6239 * as two 30-bit addresses. The other data is a bit marking whether 6240 * the 30-bit address is the high or low address. 6241 * Upcast bmbx aphys to 64bits so shift instruction compiles 6242 * clean on 32 bit machines. 6243 */ 6244 dma_address = &phba->sli4_hba.bmbx.dma_address; 6245 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 6246 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 6247 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 6248 LPFC_BMBX_BIT1_ADDR_HI); 6249 6250 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 6251 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 6252 LPFC_BMBX_BIT1_ADDR_LO); 6253 return 0; 6254 } 6255 6256 /** 6257 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 6258 * @phba: pointer to lpfc hba data structure. 6259 * 6260 * This routine is invoked to teardown the bootstrap mailbox 6261 * region and release all host resources. This routine requires 6262 * the caller to ensure all mailbox commands recovered, no 6263 * additional mailbox comands are sent, and interrupts are disabled 6264 * before calling this routine. 6265 * 6266 **/ 6267 static void 6268 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 6269 { 6270 dma_free_coherent(&phba->pcidev->dev, 6271 phba->sli4_hba.bmbx.bmbx_size, 6272 phba->sli4_hba.bmbx.dmabuf->virt, 6273 phba->sli4_hba.bmbx.dmabuf->phys); 6274 6275 kfree(phba->sli4_hba.bmbx.dmabuf); 6276 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 6277 } 6278 6279 /** 6280 * lpfc_sli4_read_config - Get the config parameters. 6281 * @phba: pointer to lpfc hba data structure. 6282 * 6283 * This routine is invoked to read the configuration parameters from the HBA. 6284 * The configuration parameters are used to set the base and maximum values 6285 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 6286 * allocation for the port. 6287 * 6288 * Return codes 6289 * 0 - successful 6290 * -ENOMEM - No available memory 6291 * -EIO - The mailbox failed to complete successfully. 6292 **/ 6293 int 6294 lpfc_sli4_read_config(struct lpfc_hba *phba) 6295 { 6296 LPFC_MBOXQ_t *pmb; 6297 struct lpfc_mbx_read_config *rd_config; 6298 union lpfc_sli4_cfg_shdr *shdr; 6299 uint32_t shdr_status, shdr_add_status; 6300 struct lpfc_mbx_get_func_cfg *get_func_cfg; 6301 struct lpfc_rsrc_desc_fcfcoe *desc; 6302 char *pdesc_0; 6303 uint32_t desc_count; 6304 int length, i, rc = 0, rc2; 6305 6306 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6307 if (!pmb) { 6308 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6309 "2011 Unable to allocate memory for issuing " 6310 "SLI_CONFIG_SPECIAL mailbox command\n"); 6311 return -ENOMEM; 6312 } 6313 6314 lpfc_read_config(phba, pmb); 6315 6316 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6317 if (rc != MBX_SUCCESS) { 6318 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6319 "2012 Mailbox failed , mbxCmd x%x " 6320 "READ_CONFIG, mbxStatus x%x\n", 6321 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6322 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6323 rc = -EIO; 6324 } else { 6325 rd_config = &pmb->u.mqe.un.rd_config; 6326 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 6327 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 6328 phba->sli4_hba.lnk_info.lnk_tp = 6329 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 6330 phba->sli4_hba.lnk_info.lnk_no = 6331 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 6332 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6333 "3081 lnk_type:%d, lnk_numb:%d\n", 6334 phba->sli4_hba.lnk_info.lnk_tp, 6335 phba->sli4_hba.lnk_info.lnk_no); 6336 } else 6337 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6338 "3082 Mailbox (x%x) returned ldv:x0\n", 6339 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 6340 phba->sli4_hba.extents_in_use = 6341 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 6342 phba->sli4_hba.max_cfg_param.max_xri = 6343 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 6344 phba->sli4_hba.max_cfg_param.xri_base = 6345 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 6346 phba->sli4_hba.max_cfg_param.max_vpi = 6347 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 6348 phba->sli4_hba.max_cfg_param.vpi_base = 6349 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 6350 phba->sli4_hba.max_cfg_param.max_rpi = 6351 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 6352 phba->sli4_hba.max_cfg_param.rpi_base = 6353 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 6354 phba->sli4_hba.max_cfg_param.max_vfi = 6355 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 6356 phba->sli4_hba.max_cfg_param.vfi_base = 6357 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 6358 phba->sli4_hba.max_cfg_param.max_fcfi = 6359 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 6360 phba->sli4_hba.max_cfg_param.max_eq = 6361 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 6362 phba->sli4_hba.max_cfg_param.max_rq = 6363 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 6364 phba->sli4_hba.max_cfg_param.max_wq = 6365 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 6366 phba->sli4_hba.max_cfg_param.max_cq = 6367 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 6368 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 6369 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 6370 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 6371 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 6372 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 6373 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 6374 phba->max_vports = phba->max_vpi; 6375 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6376 "2003 cfg params Extents? %d " 6377 "XRI(B:%d M:%d), " 6378 "VPI(B:%d M:%d) " 6379 "VFI(B:%d M:%d) " 6380 "RPI(B:%d M:%d) " 6381 "FCFI(Count:%d)\n", 6382 phba->sli4_hba.extents_in_use, 6383 phba->sli4_hba.max_cfg_param.xri_base, 6384 phba->sli4_hba.max_cfg_param.max_xri, 6385 phba->sli4_hba.max_cfg_param.vpi_base, 6386 phba->sli4_hba.max_cfg_param.max_vpi, 6387 phba->sli4_hba.max_cfg_param.vfi_base, 6388 phba->sli4_hba.max_cfg_param.max_vfi, 6389 phba->sli4_hba.max_cfg_param.rpi_base, 6390 phba->sli4_hba.max_cfg_param.max_rpi, 6391 phba->sli4_hba.max_cfg_param.max_fcfi); 6392 } 6393 6394 if (rc) 6395 goto read_cfg_out; 6396 6397 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 6398 if (phba->cfg_hba_queue_depth > 6399 (phba->sli4_hba.max_cfg_param.max_xri - 6400 lpfc_sli4_get_els_iocb_cnt(phba))) 6401 phba->cfg_hba_queue_depth = 6402 phba->sli4_hba.max_cfg_param.max_xri - 6403 lpfc_sli4_get_els_iocb_cnt(phba); 6404 6405 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 6406 LPFC_SLI_INTF_IF_TYPE_2) 6407 goto read_cfg_out; 6408 6409 /* get the pf# and vf# for SLI4 if_type 2 port */ 6410 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 6411 sizeof(struct lpfc_sli4_cfg_mhdr)); 6412 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 6413 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 6414 length, LPFC_SLI4_MBX_EMBED); 6415 6416 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6417 shdr = (union lpfc_sli4_cfg_shdr *) 6418 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 6419 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6420 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6421 if (rc2 || shdr_status || shdr_add_status) { 6422 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6423 "3026 Mailbox failed , mbxCmd x%x " 6424 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 6425 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6426 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6427 goto read_cfg_out; 6428 } 6429 6430 /* search for fc_fcoe resrouce descriptor */ 6431 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 6432 desc_count = get_func_cfg->func_cfg.rsrc_desc_count; 6433 6434 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 6435 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 6436 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 6437 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 6438 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 6439 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 6440 goto read_cfg_out; 6441 6442 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 6443 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 6444 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 6445 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 6446 phba->sli4_hba.iov.pf_number = 6447 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 6448 phba->sli4_hba.iov.vf_number = 6449 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 6450 break; 6451 } 6452 } 6453 6454 if (i < LPFC_RSRC_DESC_MAX_NUM) 6455 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6456 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 6457 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 6458 phba->sli4_hba.iov.vf_number); 6459 else 6460 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6461 "3028 GET_FUNCTION_CONFIG: failed to find " 6462 "Resrouce Descriptor:x%x\n", 6463 LPFC_RSRC_DESC_TYPE_FCFCOE); 6464 6465 read_cfg_out: 6466 mempool_free(pmb, phba->mbox_mem_pool); 6467 return rc; 6468 } 6469 6470 /** 6471 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 6472 * @phba: pointer to lpfc hba data structure. 6473 * 6474 * This routine is invoked to setup the port-side endian order when 6475 * the port if_type is 0. This routine has no function for other 6476 * if_types. 6477 * 6478 * Return codes 6479 * 0 - successful 6480 * -ENOMEM - No available memory 6481 * -EIO - The mailbox failed to complete successfully. 6482 **/ 6483 static int 6484 lpfc_setup_endian_order(struct lpfc_hba *phba) 6485 { 6486 LPFC_MBOXQ_t *mboxq; 6487 uint32_t if_type, rc = 0; 6488 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 6489 HOST_ENDIAN_HIGH_WORD1}; 6490 6491 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6492 switch (if_type) { 6493 case LPFC_SLI_INTF_IF_TYPE_0: 6494 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6495 GFP_KERNEL); 6496 if (!mboxq) { 6497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6498 "0492 Unable to allocate memory for " 6499 "issuing SLI_CONFIG_SPECIAL mailbox " 6500 "command\n"); 6501 return -ENOMEM; 6502 } 6503 6504 /* 6505 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 6506 * two words to contain special data values and no other data. 6507 */ 6508 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 6509 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 6510 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6511 if (rc != MBX_SUCCESS) { 6512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6513 "0493 SLI_CONFIG_SPECIAL mailbox " 6514 "failed with status x%x\n", 6515 rc); 6516 rc = -EIO; 6517 } 6518 mempool_free(mboxq, phba->mbox_mem_pool); 6519 break; 6520 case LPFC_SLI_INTF_IF_TYPE_2: 6521 case LPFC_SLI_INTF_IF_TYPE_1: 6522 default: 6523 break; 6524 } 6525 return rc; 6526 } 6527 6528 /** 6529 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts 6530 * @phba: pointer to lpfc hba data structure. 6531 * 6532 * This routine is invoked to check the user settable queue counts for EQs and 6533 * CQs. after this routine is called the counts will be set to valid values that 6534 * adhere to the constraints of the system's interrupt vectors and the port's 6535 * queue resources. 6536 * 6537 * Return codes 6538 * 0 - successful 6539 * -ENOMEM - No available memory 6540 **/ 6541 static int 6542 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 6543 { 6544 int cfg_fcp_io_channel; 6545 uint32_t cpu; 6546 uint32_t i = 0; 6547 6548 6549 /* 6550 * Sanity check for configured queue parameters against the run-time 6551 * device parameters 6552 */ 6553 6554 /* Sanity check on HBA EQ parameters */ 6555 cfg_fcp_io_channel = phba->cfg_fcp_io_channel; 6556 6557 /* It doesn't make sense to have more io channels then CPUs */ 6558 for_each_online_cpu(cpu) { 6559 i++; 6560 } 6561 if (i < cfg_fcp_io_channel) { 6562 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6563 "3188 Reducing IO channels to match number of " 6564 "CPUs: from %d to %d\n", cfg_fcp_io_channel, i); 6565 cfg_fcp_io_channel = i; 6566 } 6567 6568 if (cfg_fcp_io_channel > 6569 phba->sli4_hba.max_cfg_param.max_eq) { 6570 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq; 6571 if (cfg_fcp_io_channel < LPFC_FCP_IO_CHAN_MIN) { 6572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6573 "2574 Not enough EQs (%d) from the " 6574 "pci function for supporting FCP " 6575 "EQs (%d)\n", 6576 phba->sli4_hba.max_cfg_param.max_eq, 6577 phba->cfg_fcp_io_channel); 6578 goto out_error; 6579 } 6580 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6581 "2575 Not enough EQs (%d) from the pci " 6582 "function for supporting the requested " 6583 "FCP EQs (%d), the actual FCP EQs can " 6584 "be supported: %d\n", 6585 phba->sli4_hba.max_cfg_param.max_eq, 6586 phba->cfg_fcp_io_channel, cfg_fcp_io_channel); 6587 } 6588 6589 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */ 6590 6591 /* The actual number of FCP event queues adopted */ 6592 phba->cfg_fcp_eq_count = cfg_fcp_io_channel; 6593 phba->cfg_fcp_wq_count = cfg_fcp_io_channel; 6594 phba->cfg_fcp_io_channel = cfg_fcp_io_channel; 6595 phba->sli4_hba.cfg_eqn = cfg_fcp_io_channel; 6596 6597 /* Get EQ depth from module parameter, fake the default for now */ 6598 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 6599 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 6600 6601 /* Get CQ depth from module parameter, fake the default for now */ 6602 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 6603 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 6604 6605 return 0; 6606 out_error: 6607 return -ENOMEM; 6608 } 6609 6610 /** 6611 * lpfc_sli4_queue_create - Create all the SLI4 queues 6612 * @phba: pointer to lpfc hba data structure. 6613 * 6614 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 6615 * operation. For each SLI4 queue type, the parameters such as queue entry 6616 * count (queue depth) shall be taken from the module parameter. For now, 6617 * we just use some constant number as place holder. 6618 * 6619 * Return codes 6620 * 0 - successful 6621 * -ENOMEM - No availble memory 6622 * -EIO - The mailbox failed to complete successfully. 6623 **/ 6624 int 6625 lpfc_sli4_queue_create(struct lpfc_hba *phba) 6626 { 6627 struct lpfc_queue *qdesc; 6628 int idx; 6629 6630 /* 6631 * Create HBA Record arrays. 6632 */ 6633 if (!phba->cfg_fcp_io_channel) 6634 return -ERANGE; 6635 6636 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 6637 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 6638 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 6639 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 6640 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 6641 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 6642 6643 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) * 6644 phba->cfg_fcp_io_channel), GFP_KERNEL); 6645 if (!phba->sli4_hba.hba_eq) { 6646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6647 "2576 Failed allocate memory for " 6648 "fast-path EQ record array\n"); 6649 goto out_error; 6650 } 6651 6652 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 6653 phba->cfg_fcp_io_channel), GFP_KERNEL); 6654 if (!phba->sli4_hba.fcp_cq) { 6655 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6656 "2577 Failed allocate memory for fast-path " 6657 "CQ record array\n"); 6658 goto out_error; 6659 } 6660 6661 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 6662 phba->cfg_fcp_io_channel), GFP_KERNEL); 6663 if (!phba->sli4_hba.fcp_wq) { 6664 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6665 "2578 Failed allocate memory for fast-path " 6666 "WQ record array\n"); 6667 goto out_error; 6668 } 6669 6670 /* 6671 * Since the first EQ can have multiple CQs associated with it, 6672 * this array is used to quickly see if we have a FCP fast-path 6673 * CQ match. 6674 */ 6675 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) * 6676 phba->cfg_fcp_io_channel), GFP_KERNEL); 6677 if (!phba->sli4_hba.fcp_cq_map) { 6678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6679 "2545 Failed allocate memory for fast-path " 6680 "CQ map\n"); 6681 goto out_error; 6682 } 6683 6684 /* 6685 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies 6686 * how many EQs to create. 6687 */ 6688 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 6689 6690 /* Create EQs */ 6691 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6692 phba->sli4_hba.eq_ecount); 6693 if (!qdesc) { 6694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6695 "0497 Failed allocate EQ (%d)\n", idx); 6696 goto out_error; 6697 } 6698 phba->sli4_hba.hba_eq[idx] = qdesc; 6699 6700 /* Create Fast Path FCP CQs */ 6701 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6702 phba->sli4_hba.cq_ecount); 6703 if (!qdesc) { 6704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6705 "0499 Failed allocate fast-path FCP " 6706 "CQ (%d)\n", idx); 6707 goto out_error; 6708 } 6709 phba->sli4_hba.fcp_cq[idx] = qdesc; 6710 6711 /* Create Fast Path FCP WQs */ 6712 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6713 phba->sli4_hba.wq_ecount); 6714 if (!qdesc) { 6715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6716 "0503 Failed allocate fast-path FCP " 6717 "WQ (%d)\n", idx); 6718 goto out_error; 6719 } 6720 phba->sli4_hba.fcp_wq[idx] = qdesc; 6721 } 6722 6723 6724 /* 6725 * Create Slow Path Completion Queues (CQs) 6726 */ 6727 6728 /* Create slow-path Mailbox Command Complete Queue */ 6729 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6730 phba->sli4_hba.cq_ecount); 6731 if (!qdesc) { 6732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6733 "0500 Failed allocate slow-path mailbox CQ\n"); 6734 goto out_error; 6735 } 6736 phba->sli4_hba.mbx_cq = qdesc; 6737 6738 /* Create slow-path ELS Complete Queue */ 6739 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6740 phba->sli4_hba.cq_ecount); 6741 if (!qdesc) { 6742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6743 "0501 Failed allocate slow-path ELS CQ\n"); 6744 goto out_error; 6745 } 6746 phba->sli4_hba.els_cq = qdesc; 6747 6748 6749 /* 6750 * Create Slow Path Work Queues (WQs) 6751 */ 6752 6753 /* Create Mailbox Command Queue */ 6754 6755 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 6756 phba->sli4_hba.mq_ecount); 6757 if (!qdesc) { 6758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6759 "0505 Failed allocate slow-path MQ\n"); 6760 goto out_error; 6761 } 6762 phba->sli4_hba.mbx_wq = qdesc; 6763 6764 /* 6765 * Create ELS Work Queues 6766 */ 6767 6768 /* Create slow-path ELS Work Queue */ 6769 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6770 phba->sli4_hba.wq_ecount); 6771 if (!qdesc) { 6772 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6773 "0504 Failed allocate slow-path ELS WQ\n"); 6774 goto out_error; 6775 } 6776 phba->sli4_hba.els_wq = qdesc; 6777 6778 /* 6779 * Create Receive Queue (RQ) 6780 */ 6781 6782 /* Create Receive Queue for header */ 6783 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6784 phba->sli4_hba.rq_ecount); 6785 if (!qdesc) { 6786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6787 "0506 Failed allocate receive HRQ\n"); 6788 goto out_error; 6789 } 6790 phba->sli4_hba.hdr_rq = qdesc; 6791 6792 /* Create Receive Queue for data */ 6793 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6794 phba->sli4_hba.rq_ecount); 6795 if (!qdesc) { 6796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6797 "0507 Failed allocate receive DRQ\n"); 6798 goto out_error; 6799 } 6800 phba->sli4_hba.dat_rq = qdesc; 6801 6802 return 0; 6803 6804 out_error: 6805 lpfc_sli4_queue_destroy(phba); 6806 return -ENOMEM; 6807 } 6808 6809 /** 6810 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 6811 * @phba: pointer to lpfc hba data structure. 6812 * 6813 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 6814 * operation. 6815 * 6816 * Return codes 6817 * 0 - successful 6818 * -ENOMEM - No available memory 6819 * -EIO - The mailbox failed to complete successfully. 6820 **/ 6821 void 6822 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6823 { 6824 int idx; 6825 6826 if (phba->sli4_hba.hba_eq != NULL) { 6827 /* Release HBA event queue */ 6828 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 6829 if (phba->sli4_hba.hba_eq[idx] != NULL) { 6830 lpfc_sli4_queue_free( 6831 phba->sli4_hba.hba_eq[idx]); 6832 phba->sli4_hba.hba_eq[idx] = NULL; 6833 } 6834 } 6835 kfree(phba->sli4_hba.hba_eq); 6836 phba->sli4_hba.hba_eq = NULL; 6837 } 6838 6839 if (phba->sli4_hba.fcp_cq != NULL) { 6840 /* Release FCP completion queue */ 6841 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 6842 if (phba->sli4_hba.fcp_cq[idx] != NULL) { 6843 lpfc_sli4_queue_free( 6844 phba->sli4_hba.fcp_cq[idx]); 6845 phba->sli4_hba.fcp_cq[idx] = NULL; 6846 } 6847 } 6848 kfree(phba->sli4_hba.fcp_cq); 6849 phba->sli4_hba.fcp_cq = NULL; 6850 } 6851 6852 if (phba->sli4_hba.fcp_wq != NULL) { 6853 /* Release FCP work queue */ 6854 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 6855 if (phba->sli4_hba.fcp_wq[idx] != NULL) { 6856 lpfc_sli4_queue_free( 6857 phba->sli4_hba.fcp_wq[idx]); 6858 phba->sli4_hba.fcp_wq[idx] = NULL; 6859 } 6860 } 6861 kfree(phba->sli4_hba.fcp_wq); 6862 phba->sli4_hba.fcp_wq = NULL; 6863 } 6864 6865 /* Release FCP CQ mapping array */ 6866 if (phba->sli4_hba.fcp_cq_map != NULL) { 6867 kfree(phba->sli4_hba.fcp_cq_map); 6868 phba->sli4_hba.fcp_cq_map = NULL; 6869 } 6870 6871 /* Release mailbox command work queue */ 6872 if (phba->sli4_hba.mbx_wq != NULL) { 6873 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6874 phba->sli4_hba.mbx_wq = NULL; 6875 } 6876 6877 /* Release ELS work queue */ 6878 if (phba->sli4_hba.els_wq != NULL) { 6879 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6880 phba->sli4_hba.els_wq = NULL; 6881 } 6882 6883 /* Release unsolicited receive queue */ 6884 if (phba->sli4_hba.hdr_rq != NULL) { 6885 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6886 phba->sli4_hba.hdr_rq = NULL; 6887 } 6888 if (phba->sli4_hba.dat_rq != NULL) { 6889 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 6890 phba->sli4_hba.dat_rq = NULL; 6891 } 6892 6893 /* Release ELS complete queue */ 6894 if (phba->sli4_hba.els_cq != NULL) { 6895 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6896 phba->sli4_hba.els_cq = NULL; 6897 } 6898 6899 /* Release mailbox command complete queue */ 6900 if (phba->sli4_hba.mbx_cq != NULL) { 6901 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6902 phba->sli4_hba.mbx_cq = NULL; 6903 } 6904 6905 return; 6906 } 6907 6908 /** 6909 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 6910 * @phba: pointer to lpfc hba data structure. 6911 * 6912 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 6913 * operation. 6914 * 6915 * Return codes 6916 * 0 - successful 6917 * -ENOMEM - No available memory 6918 * -EIO - The mailbox failed to complete successfully. 6919 **/ 6920 int 6921 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 6922 { 6923 struct lpfc_sli *psli = &phba->sli; 6924 struct lpfc_sli_ring *pring; 6925 int rc = -ENOMEM; 6926 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6927 int fcp_cq_index = 0; 6928 6929 /* 6930 * Set up HBA Event Queues (EQs) 6931 */ 6932 6933 /* Set up HBA event queue */ 6934 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) { 6935 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6936 "3147 Fast-path EQs not allocated\n"); 6937 rc = -ENOMEM; 6938 goto out_error; 6939 } 6940 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { 6941 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) { 6942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6943 "0522 Fast-path EQ (%d) not " 6944 "allocated\n", fcp_eqidx); 6945 rc = -ENOMEM; 6946 goto out_destroy_hba_eq; 6947 } 6948 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx], 6949 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel)); 6950 if (rc) { 6951 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6952 "0523 Failed setup of fast-path EQ " 6953 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6954 goto out_destroy_hba_eq; 6955 } 6956 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6957 "2584 HBA EQ setup: " 6958 "queue[%d]-id=%d\n", fcp_eqidx, 6959 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id); 6960 } 6961 6962 /* Set up fast-path FCP Response Complete Queue */ 6963 if (!phba->sli4_hba.fcp_cq) { 6964 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6965 "3148 Fast-path FCP CQ array not " 6966 "allocated\n"); 6967 rc = -ENOMEM; 6968 goto out_destroy_hba_eq; 6969 } 6970 6971 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) { 6972 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6973 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6974 "0526 Fast-path FCP CQ (%d) not " 6975 "allocated\n", fcp_cqidx); 6976 rc = -ENOMEM; 6977 goto out_destroy_fcp_cq; 6978 } 6979 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 6980 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP); 6981 if (rc) { 6982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6983 "0527 Failed setup of fast-path FCP " 6984 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 6985 goto out_destroy_fcp_cq; 6986 } 6987 6988 /* Setup fcp_cq_map for fast lookup */ 6989 phba->sli4_hba.fcp_cq_map[fcp_cqidx] = 6990 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id; 6991 6992 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6993 "2588 FCP CQ setup: cq[%d]-id=%d, " 6994 "parent seq[%d]-id=%d\n", 6995 fcp_cqidx, 6996 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6997 fcp_cqidx, 6998 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id); 6999 } 7000 7001 /* Set up fast-path FCP Work Queue */ 7002 if (!phba->sli4_hba.fcp_wq) { 7003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7004 "3149 Fast-path FCP WQ array not " 7005 "allocated\n"); 7006 rc = -ENOMEM; 7007 goto out_destroy_fcp_cq; 7008 } 7009 7010 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) { 7011 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 7012 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7013 "0534 Fast-path FCP WQ (%d) not " 7014 "allocated\n", fcp_wqidx); 7015 rc = -ENOMEM; 7016 goto out_destroy_fcp_wq; 7017 } 7018 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 7019 phba->sli4_hba.fcp_cq[fcp_wqidx], 7020 LPFC_FCP); 7021 if (rc) { 7022 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7023 "0535 Failed setup of fast-path FCP " 7024 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 7025 goto out_destroy_fcp_wq; 7026 } 7027 7028 /* Bind this WQ to the next FCP ring */ 7029 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx]; 7030 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx]; 7031 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring; 7032 7033 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7034 "2591 FCP WQ setup: wq[%d]-id=%d, " 7035 "parent cq[%d]-id=%d\n", 7036 fcp_wqidx, 7037 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 7038 fcp_cq_index, 7039 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id); 7040 } 7041 /* 7042 * Set up Complete Queues (CQs) 7043 */ 7044 7045 /* Set up slow-path MBOX Complete Queue as the first CQ */ 7046 if (!phba->sli4_hba.mbx_cq) { 7047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7048 "0528 Mailbox CQ not allocated\n"); 7049 rc = -ENOMEM; 7050 goto out_destroy_fcp_wq; 7051 } 7052 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, 7053 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX); 7054 if (rc) { 7055 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7056 "0529 Failed setup of slow-path mailbox CQ: " 7057 "rc = 0x%x\n", rc); 7058 goto out_destroy_fcp_wq; 7059 } 7060 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7061 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 7062 phba->sli4_hba.mbx_cq->queue_id, 7063 phba->sli4_hba.hba_eq[0]->queue_id); 7064 7065 /* Set up slow-path ELS Complete Queue */ 7066 if (!phba->sli4_hba.els_cq) { 7067 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7068 "0530 ELS CQ not allocated\n"); 7069 rc = -ENOMEM; 7070 goto out_destroy_mbx_cq; 7071 } 7072 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, 7073 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS); 7074 if (rc) { 7075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7076 "0531 Failed setup of slow-path ELS CQ: " 7077 "rc = 0x%x\n", rc); 7078 goto out_destroy_mbx_cq; 7079 } 7080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7081 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 7082 phba->sli4_hba.els_cq->queue_id, 7083 phba->sli4_hba.hba_eq[0]->queue_id); 7084 7085 /* 7086 * Set up all the Work Queues (WQs) 7087 */ 7088 7089 /* Set up Mailbox Command Queue */ 7090 if (!phba->sli4_hba.mbx_wq) { 7091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7092 "0538 Slow-path MQ not allocated\n"); 7093 rc = -ENOMEM; 7094 goto out_destroy_els_cq; 7095 } 7096 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 7097 phba->sli4_hba.mbx_cq, LPFC_MBOX); 7098 if (rc) { 7099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7100 "0539 Failed setup of slow-path MQ: " 7101 "rc = 0x%x\n", rc); 7102 goto out_destroy_els_cq; 7103 } 7104 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7105 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 7106 phba->sli4_hba.mbx_wq->queue_id, 7107 phba->sli4_hba.mbx_cq->queue_id); 7108 7109 /* Set up slow-path ELS Work Queue */ 7110 if (!phba->sli4_hba.els_wq) { 7111 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7112 "0536 Slow-path ELS WQ not allocated\n"); 7113 rc = -ENOMEM; 7114 goto out_destroy_mbx_wq; 7115 } 7116 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 7117 phba->sli4_hba.els_cq, LPFC_ELS); 7118 if (rc) { 7119 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7120 "0537 Failed setup of slow-path ELS WQ: " 7121 "rc = 0x%x\n", rc); 7122 goto out_destroy_mbx_wq; 7123 } 7124 7125 /* Bind this WQ to the ELS ring */ 7126 pring = &psli->ring[LPFC_ELS_RING]; 7127 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq; 7128 phba->sli4_hba.els_cq->pring = pring; 7129 7130 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7131 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 7132 phba->sli4_hba.els_wq->queue_id, 7133 phba->sli4_hba.els_cq->queue_id); 7134 7135 /* 7136 * Create Receive Queue (RQ) 7137 */ 7138 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 7139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7140 "0540 Receive Queue not allocated\n"); 7141 rc = -ENOMEM; 7142 goto out_destroy_els_wq; 7143 } 7144 7145 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); 7146 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ); 7147 7148 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 7149 phba->sli4_hba.els_cq, LPFC_USOL); 7150 if (rc) { 7151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7152 "0541 Failed setup of Receive Queue: " 7153 "rc = 0x%x\n", rc); 7154 goto out_destroy_fcp_wq; 7155 } 7156 7157 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7158 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 7159 "parent cq-id=%d\n", 7160 phba->sli4_hba.hdr_rq->queue_id, 7161 phba->sli4_hba.dat_rq->queue_id, 7162 phba->sli4_hba.els_cq->queue_id); 7163 return 0; 7164 7165 out_destroy_els_wq: 7166 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7167 out_destroy_mbx_wq: 7168 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7169 out_destroy_els_cq: 7170 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7171 out_destroy_mbx_cq: 7172 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7173 out_destroy_fcp_wq: 7174 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 7175 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 7176 out_destroy_fcp_cq: 7177 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 7178 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 7179 out_destroy_hba_eq: 7180 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 7181 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]); 7182 out_error: 7183 return rc; 7184 } 7185 7186 /** 7187 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 7188 * @phba: pointer to lpfc hba data structure. 7189 * 7190 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 7191 * operation. 7192 * 7193 * Return codes 7194 * 0 - successful 7195 * -ENOMEM - No available memory 7196 * -EIO - The mailbox failed to complete successfully. 7197 **/ 7198 void 7199 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 7200 { 7201 int fcp_qidx; 7202 7203 /* Unset mailbox command work queue */ 7204 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7205 /* Unset ELS work queue */ 7206 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7207 /* Unset unsolicited receive queue */ 7208 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7209 /* Unset FCP work queue */ 7210 if (phba->sli4_hba.fcp_wq) { 7211 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7212 fcp_qidx++) 7213 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 7214 } 7215 /* Unset mailbox command complete queue */ 7216 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7217 /* Unset ELS complete queue */ 7218 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7219 /* Unset FCP response complete queue */ 7220 if (phba->sli4_hba.fcp_cq) { 7221 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7222 fcp_qidx++) 7223 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 7224 } 7225 /* Unset fast-path event queue */ 7226 if (phba->sli4_hba.hba_eq) { 7227 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7228 fcp_qidx++) 7229 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]); 7230 } 7231 } 7232 7233 /** 7234 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 7235 * @phba: pointer to lpfc hba data structure. 7236 * 7237 * This routine is invoked to allocate and set up a pool of completion queue 7238 * events. The body of the completion queue event is a completion queue entry 7239 * CQE. For now, this pool is used for the interrupt service routine to queue 7240 * the following HBA completion queue events for the worker thread to process: 7241 * - Mailbox asynchronous events 7242 * - Receive queue completion unsolicited events 7243 * Later, this can be used for all the slow-path events. 7244 * 7245 * Return codes 7246 * 0 - successful 7247 * -ENOMEM - No available memory 7248 **/ 7249 static int 7250 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 7251 { 7252 struct lpfc_cq_event *cq_event; 7253 int i; 7254 7255 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 7256 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 7257 if (!cq_event) 7258 goto out_pool_create_fail; 7259 list_add_tail(&cq_event->list, 7260 &phba->sli4_hba.sp_cqe_event_pool); 7261 } 7262 return 0; 7263 7264 out_pool_create_fail: 7265 lpfc_sli4_cq_event_pool_destroy(phba); 7266 return -ENOMEM; 7267 } 7268 7269 /** 7270 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 7271 * @phba: pointer to lpfc hba data structure. 7272 * 7273 * This routine is invoked to free the pool of completion queue events at 7274 * driver unload time. Note that, it is the responsibility of the driver 7275 * cleanup routine to free all the outstanding completion-queue events 7276 * allocated from this pool back into the pool before invoking this routine 7277 * to destroy the pool. 7278 **/ 7279 static void 7280 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 7281 { 7282 struct lpfc_cq_event *cq_event, *next_cq_event; 7283 7284 list_for_each_entry_safe(cq_event, next_cq_event, 7285 &phba->sli4_hba.sp_cqe_event_pool, list) { 7286 list_del(&cq_event->list); 7287 kfree(cq_event); 7288 } 7289 } 7290 7291 /** 7292 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 7293 * @phba: pointer to lpfc hba data structure. 7294 * 7295 * This routine is the lock free version of the API invoked to allocate a 7296 * completion-queue event from the free pool. 7297 * 7298 * Return: Pointer to the newly allocated completion-queue event if successful 7299 * NULL otherwise. 7300 **/ 7301 struct lpfc_cq_event * 7302 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 7303 { 7304 struct lpfc_cq_event *cq_event = NULL; 7305 7306 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 7307 struct lpfc_cq_event, list); 7308 return cq_event; 7309 } 7310 7311 /** 7312 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 7313 * @phba: pointer to lpfc hba data structure. 7314 * 7315 * This routine is the lock version of the API invoked to allocate a 7316 * completion-queue event from the free pool. 7317 * 7318 * Return: Pointer to the newly allocated completion-queue event if successful 7319 * NULL otherwise. 7320 **/ 7321 struct lpfc_cq_event * 7322 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 7323 { 7324 struct lpfc_cq_event *cq_event; 7325 unsigned long iflags; 7326 7327 spin_lock_irqsave(&phba->hbalock, iflags); 7328 cq_event = __lpfc_sli4_cq_event_alloc(phba); 7329 spin_unlock_irqrestore(&phba->hbalock, iflags); 7330 return cq_event; 7331 } 7332 7333 /** 7334 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 7335 * @phba: pointer to lpfc hba data structure. 7336 * @cq_event: pointer to the completion queue event to be freed. 7337 * 7338 * This routine is the lock free version of the API invoked to release a 7339 * completion-queue event back into the free pool. 7340 **/ 7341 void 7342 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 7343 struct lpfc_cq_event *cq_event) 7344 { 7345 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 7346 } 7347 7348 /** 7349 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 7350 * @phba: pointer to lpfc hba data structure. 7351 * @cq_event: pointer to the completion queue event to be freed. 7352 * 7353 * This routine is the lock version of the API invoked to release a 7354 * completion-queue event back into the free pool. 7355 **/ 7356 void 7357 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 7358 struct lpfc_cq_event *cq_event) 7359 { 7360 unsigned long iflags; 7361 spin_lock_irqsave(&phba->hbalock, iflags); 7362 __lpfc_sli4_cq_event_release(phba, cq_event); 7363 spin_unlock_irqrestore(&phba->hbalock, iflags); 7364 } 7365 7366 /** 7367 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 7368 * @phba: pointer to lpfc hba data structure. 7369 * 7370 * This routine is to free all the pending completion-queue events to the 7371 * back into the free pool for device reset. 7372 **/ 7373 static void 7374 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 7375 { 7376 LIST_HEAD(cqelist); 7377 struct lpfc_cq_event *cqe; 7378 unsigned long iflags; 7379 7380 /* Retrieve all the pending WCQEs from pending WCQE lists */ 7381 spin_lock_irqsave(&phba->hbalock, iflags); 7382 /* Pending FCP XRI abort events */ 7383 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 7384 &cqelist); 7385 /* Pending ELS XRI abort events */ 7386 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 7387 &cqelist); 7388 /* Pending asynnc events */ 7389 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 7390 &cqelist); 7391 spin_unlock_irqrestore(&phba->hbalock, iflags); 7392 7393 while (!list_empty(&cqelist)) { 7394 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 7395 lpfc_sli4_cq_event_release(phba, cqe); 7396 } 7397 } 7398 7399 /** 7400 * lpfc_pci_function_reset - Reset pci function. 7401 * @phba: pointer to lpfc hba data structure. 7402 * 7403 * This routine is invoked to request a PCI function reset. It will destroys 7404 * all resources assigned to the PCI function which originates this request. 7405 * 7406 * Return codes 7407 * 0 - successful 7408 * -ENOMEM - No available memory 7409 * -EIO - The mailbox failed to complete successfully. 7410 **/ 7411 int 7412 lpfc_pci_function_reset(struct lpfc_hba *phba) 7413 { 7414 LPFC_MBOXQ_t *mboxq; 7415 uint32_t rc = 0, if_type; 7416 uint32_t shdr_status, shdr_add_status; 7417 uint32_t rdy_chk, num_resets = 0, reset_again = 0; 7418 union lpfc_sli4_cfg_shdr *shdr; 7419 struct lpfc_register reg_data; 7420 uint16_t devid; 7421 7422 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7423 switch (if_type) { 7424 case LPFC_SLI_INTF_IF_TYPE_0: 7425 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 7426 GFP_KERNEL); 7427 if (!mboxq) { 7428 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7429 "0494 Unable to allocate memory for " 7430 "issuing SLI_FUNCTION_RESET mailbox " 7431 "command\n"); 7432 return -ENOMEM; 7433 } 7434 7435 /* Setup PCI function reset mailbox-ioctl command */ 7436 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7437 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 7438 LPFC_SLI4_MBX_EMBED); 7439 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7440 shdr = (union lpfc_sli4_cfg_shdr *) 7441 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7442 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7443 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 7444 &shdr->response); 7445 if (rc != MBX_TIMEOUT) 7446 mempool_free(mboxq, phba->mbox_mem_pool); 7447 if (shdr_status || shdr_add_status || rc) { 7448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7449 "0495 SLI_FUNCTION_RESET mailbox " 7450 "failed with status x%x add_status x%x," 7451 " mbx status x%x\n", 7452 shdr_status, shdr_add_status, rc); 7453 rc = -ENXIO; 7454 } 7455 break; 7456 case LPFC_SLI_INTF_IF_TYPE_2: 7457 for (num_resets = 0; 7458 num_resets < MAX_IF_TYPE_2_RESETS; 7459 num_resets++) { 7460 reg_data.word0 = 0; 7461 bf_set(lpfc_sliport_ctrl_end, ®_data, 7462 LPFC_SLIPORT_LITTLE_ENDIAN); 7463 bf_set(lpfc_sliport_ctrl_ip, ®_data, 7464 LPFC_SLIPORT_INIT_PORT); 7465 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 7466 CTRLregaddr); 7467 /* flush */ 7468 pci_read_config_word(phba->pcidev, 7469 PCI_DEVICE_ID, &devid); 7470 /* 7471 * Poll the Port Status Register and wait for RDY for 7472 * up to 10 seconds. If the port doesn't respond, treat 7473 * it as an error. If the port responds with RN, start 7474 * the loop again. 7475 */ 7476 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { 7477 msleep(10); 7478 if (lpfc_readl(phba->sli4_hba.u.if_type2. 7479 STATUSregaddr, ®_data.word0)) { 7480 rc = -ENODEV; 7481 goto out; 7482 } 7483 if (bf_get(lpfc_sliport_status_rn, ®_data)) 7484 reset_again++; 7485 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 7486 break; 7487 } 7488 7489 /* 7490 * If the port responds to the init request with 7491 * reset needed, delay for a bit and restart the loop. 7492 */ 7493 if (reset_again && (rdy_chk < 1000)) { 7494 msleep(10); 7495 reset_again = 0; 7496 continue; 7497 } 7498 7499 /* Detect any port errors. */ 7500 if ((bf_get(lpfc_sliport_status_err, ®_data)) || 7501 (rdy_chk >= 1000)) { 7502 phba->work_status[0] = readl( 7503 phba->sli4_hba.u.if_type2.ERR1regaddr); 7504 phba->work_status[1] = readl( 7505 phba->sli4_hba.u.if_type2.ERR2regaddr); 7506 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7507 "2890 Port error detected during port " 7508 "reset(%d): wait_tmo:%d ms, " 7509 "port status reg 0x%x, " 7510 "error 1=0x%x, error 2=0x%x\n", 7511 num_resets, rdy_chk*10, 7512 reg_data.word0, 7513 phba->work_status[0], 7514 phba->work_status[1]); 7515 rc = -ENODEV; 7516 } 7517 7518 /* 7519 * Terminate the outer loop provided the Port indicated 7520 * ready within 10 seconds. 7521 */ 7522 if (rdy_chk < 1000) 7523 break; 7524 } 7525 /* delay driver action following IF_TYPE_2 function reset */ 7526 msleep(100); 7527 break; 7528 case LPFC_SLI_INTF_IF_TYPE_1: 7529 default: 7530 break; 7531 } 7532 7533 out: 7534 /* Catch the not-ready port failure after a port reset. */ 7535 if (num_resets >= MAX_IF_TYPE_2_RESETS) 7536 rc = -ENODEV; 7537 7538 return rc; 7539 } 7540 7541 /** 7542 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 7543 * @phba: pointer to lpfc hba data structure. 7544 * @cnt: number of nop mailbox commands to send. 7545 * 7546 * This routine is invoked to send a number @cnt of NOP mailbox command and 7547 * wait for each command to complete. 7548 * 7549 * Return: the number of NOP mailbox command completed. 7550 **/ 7551 static int 7552 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 7553 { 7554 LPFC_MBOXQ_t *mboxq; 7555 int length, cmdsent; 7556 uint32_t mbox_tmo; 7557 uint32_t rc = 0; 7558 uint32_t shdr_status, shdr_add_status; 7559 union lpfc_sli4_cfg_shdr *shdr; 7560 7561 if (cnt == 0) { 7562 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7563 "2518 Requested to send 0 NOP mailbox cmd\n"); 7564 return cnt; 7565 } 7566 7567 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7568 if (!mboxq) { 7569 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7570 "2519 Unable to allocate memory for issuing " 7571 "NOP mailbox command\n"); 7572 return 0; 7573 } 7574 7575 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 7576 length = (sizeof(struct lpfc_mbx_nop) - 7577 sizeof(struct lpfc_sli4_cfg_mhdr)); 7578 7579 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 7580 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7581 LPFC_MBOX_OPCODE_NOP, length, 7582 LPFC_SLI4_MBX_EMBED); 7583 if (!phba->sli4_hba.intr_enable) 7584 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7585 else { 7586 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 7587 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7588 } 7589 if (rc == MBX_TIMEOUT) 7590 break; 7591 /* Check return status */ 7592 shdr = (union lpfc_sli4_cfg_shdr *) 7593 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7594 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7595 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 7596 &shdr->response); 7597 if (shdr_status || shdr_add_status || rc) { 7598 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7599 "2520 NOP mailbox command failed " 7600 "status x%x add_status x%x mbx " 7601 "status x%x\n", shdr_status, 7602 shdr_add_status, rc); 7603 break; 7604 } 7605 } 7606 7607 if (rc != MBX_TIMEOUT) 7608 mempool_free(mboxq, phba->mbox_mem_pool); 7609 7610 return cmdsent; 7611 } 7612 7613 /** 7614 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 7615 * @phba: pointer to lpfc hba data structure. 7616 * 7617 * This routine is invoked to set up the PCI device memory space for device 7618 * with SLI-4 interface spec. 7619 * 7620 * Return codes 7621 * 0 - successful 7622 * other values - error 7623 **/ 7624 static int 7625 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 7626 { 7627 struct pci_dev *pdev; 7628 unsigned long bar0map_len, bar1map_len, bar2map_len; 7629 int error = -ENODEV; 7630 uint32_t if_type; 7631 7632 /* Obtain PCI device reference */ 7633 if (!phba->pcidev) 7634 return error; 7635 else 7636 pdev = phba->pcidev; 7637 7638 /* Set the device DMA mask size */ 7639 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 7640 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 7641 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 7642 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 7643 return error; 7644 } 7645 } 7646 7647 /* 7648 * The BARs and register set definitions and offset locations are 7649 * dependent on the if_type. 7650 */ 7651 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 7652 &phba->sli4_hba.sli_intf.word0)) { 7653 return error; 7654 } 7655 7656 /* There is no SLI3 failback for SLI4 devices. */ 7657 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 7658 LPFC_SLI_INTF_VALID) { 7659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7660 "2894 SLI_INTF reg contents invalid " 7661 "sli_intf reg 0x%x\n", 7662 phba->sli4_hba.sli_intf.word0); 7663 return error; 7664 } 7665 7666 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7667 /* 7668 * Get the bus address of SLI4 device Bar regions and the 7669 * number of bytes required by each mapping. The mapping of the 7670 * particular PCI BARs regions is dependent on the type of 7671 * SLI4 device. 7672 */ 7673 if (pci_resource_start(pdev, 0)) { 7674 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7675 bar0map_len = pci_resource_len(pdev, 0); 7676 7677 /* 7678 * Map SLI4 PCI Config Space Register base to a kernel virtual 7679 * addr 7680 */ 7681 phba->sli4_hba.conf_regs_memmap_p = 7682 ioremap(phba->pci_bar0_map, bar0map_len); 7683 if (!phba->sli4_hba.conf_regs_memmap_p) { 7684 dev_printk(KERN_ERR, &pdev->dev, 7685 "ioremap failed for SLI4 PCI config " 7686 "registers.\n"); 7687 goto out; 7688 } 7689 /* Set up BAR0 PCI config space register memory map */ 7690 lpfc_sli4_bar0_register_memmap(phba, if_type); 7691 } else { 7692 phba->pci_bar0_map = pci_resource_start(pdev, 1); 7693 bar0map_len = pci_resource_len(pdev, 1); 7694 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 7695 dev_printk(KERN_ERR, &pdev->dev, 7696 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 7697 goto out; 7698 } 7699 phba->sli4_hba.conf_regs_memmap_p = 7700 ioremap(phba->pci_bar0_map, bar0map_len); 7701 if (!phba->sli4_hba.conf_regs_memmap_p) { 7702 dev_printk(KERN_ERR, &pdev->dev, 7703 "ioremap failed for SLI4 PCI config " 7704 "registers.\n"); 7705 goto out; 7706 } 7707 lpfc_sli4_bar0_register_memmap(phba, if_type); 7708 } 7709 7710 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 7711 (pci_resource_start(pdev, 2))) { 7712 /* 7713 * Map SLI4 if type 0 HBA Control Register base to a kernel 7714 * virtual address and setup the registers. 7715 */ 7716 phba->pci_bar1_map = pci_resource_start(pdev, 2); 7717 bar1map_len = pci_resource_len(pdev, 2); 7718 phba->sli4_hba.ctrl_regs_memmap_p = 7719 ioremap(phba->pci_bar1_map, bar1map_len); 7720 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 7721 dev_printk(KERN_ERR, &pdev->dev, 7722 "ioremap failed for SLI4 HBA control registers.\n"); 7723 goto out_iounmap_conf; 7724 } 7725 lpfc_sli4_bar1_register_memmap(phba); 7726 } 7727 7728 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 7729 (pci_resource_start(pdev, 4))) { 7730 /* 7731 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 7732 * virtual address and setup the registers. 7733 */ 7734 phba->pci_bar2_map = pci_resource_start(pdev, 4); 7735 bar2map_len = pci_resource_len(pdev, 4); 7736 phba->sli4_hba.drbl_regs_memmap_p = 7737 ioremap(phba->pci_bar2_map, bar2map_len); 7738 if (!phba->sli4_hba.drbl_regs_memmap_p) { 7739 dev_printk(KERN_ERR, &pdev->dev, 7740 "ioremap failed for SLI4 HBA doorbell registers.\n"); 7741 goto out_iounmap_ctrl; 7742 } 7743 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 7744 if (error) 7745 goto out_iounmap_all; 7746 } 7747 7748 return 0; 7749 7750 out_iounmap_all: 7751 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7752 out_iounmap_ctrl: 7753 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7754 out_iounmap_conf: 7755 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7756 out: 7757 return error; 7758 } 7759 7760 /** 7761 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 7762 * @phba: pointer to lpfc hba data structure. 7763 * 7764 * This routine is invoked to unset the PCI device memory space for device 7765 * with SLI-4 interface spec. 7766 **/ 7767 static void 7768 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 7769 { 7770 uint32_t if_type; 7771 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7772 7773 switch (if_type) { 7774 case LPFC_SLI_INTF_IF_TYPE_0: 7775 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7776 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7777 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7778 break; 7779 case LPFC_SLI_INTF_IF_TYPE_2: 7780 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7781 break; 7782 case LPFC_SLI_INTF_IF_TYPE_1: 7783 default: 7784 dev_printk(KERN_ERR, &phba->pcidev->dev, 7785 "FATAL - unsupported SLI4 interface type - %d\n", 7786 if_type); 7787 break; 7788 } 7789 } 7790 7791 /** 7792 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 7793 * @phba: pointer to lpfc hba data structure. 7794 * 7795 * This routine is invoked to enable the MSI-X interrupt vectors to device 7796 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 7797 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 7798 * invoked, enables either all or nothing, depending on the current 7799 * availability of PCI vector resources. The device driver is responsible 7800 * for calling the individual request_irq() to register each MSI-X vector 7801 * with a interrupt handler, which is done in this function. Note that 7802 * later when device is unloading, the driver should always call free_irq() 7803 * on all MSI-X vectors it has done request_irq() on before calling 7804 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 7805 * will be left with MSI-X enabled and leaks its vectors. 7806 * 7807 * Return codes 7808 * 0 - successful 7809 * other values - error 7810 **/ 7811 static int 7812 lpfc_sli_enable_msix(struct lpfc_hba *phba) 7813 { 7814 int rc, i; 7815 LPFC_MBOXQ_t *pmb; 7816 7817 /* Set up MSI-X multi-message vectors */ 7818 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7819 phba->msix_entries[i].entry = i; 7820 7821 /* Configure MSI-X capability structure */ 7822 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 7823 ARRAY_SIZE(phba->msix_entries)); 7824 if (rc) { 7825 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7826 "0420 PCI enable MSI-X failed (%d)\n", rc); 7827 goto msi_fail_out; 7828 } 7829 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7830 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7831 "0477 MSI-X entry[%d]: vector=x%x " 7832 "message=%d\n", i, 7833 phba->msix_entries[i].vector, 7834 phba->msix_entries[i].entry); 7835 /* 7836 * Assign MSI-X vectors to interrupt handlers 7837 */ 7838 7839 /* vector-0 is associated to slow-path handler */ 7840 rc = request_irq(phba->msix_entries[0].vector, 7841 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 7842 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7843 if (rc) { 7844 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7845 "0421 MSI-X slow-path request_irq failed " 7846 "(%d)\n", rc); 7847 goto msi_fail_out; 7848 } 7849 7850 /* vector-1 is associated to fast-path handler */ 7851 rc = request_irq(phba->msix_entries[1].vector, 7852 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 7853 LPFC_FP_DRIVER_HANDLER_NAME, phba); 7854 7855 if (rc) { 7856 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7857 "0429 MSI-X fast-path request_irq failed " 7858 "(%d)\n", rc); 7859 goto irq_fail_out; 7860 } 7861 7862 /* 7863 * Configure HBA MSI-X attention conditions to messages 7864 */ 7865 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7866 7867 if (!pmb) { 7868 rc = -ENOMEM; 7869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7870 "0474 Unable to allocate memory for issuing " 7871 "MBOX_CONFIG_MSI command\n"); 7872 goto mem_fail_out; 7873 } 7874 rc = lpfc_config_msi(phba, pmb); 7875 if (rc) 7876 goto mbx_fail_out; 7877 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7878 if (rc != MBX_SUCCESS) { 7879 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 7880 "0351 Config MSI mailbox command failed, " 7881 "mbxCmd x%x, mbxStatus x%x\n", 7882 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 7883 goto mbx_fail_out; 7884 } 7885 7886 /* Free memory allocated for mailbox command */ 7887 mempool_free(pmb, phba->mbox_mem_pool); 7888 return rc; 7889 7890 mbx_fail_out: 7891 /* Free memory allocated for mailbox command */ 7892 mempool_free(pmb, phba->mbox_mem_pool); 7893 7894 mem_fail_out: 7895 /* free the irq already requested */ 7896 free_irq(phba->msix_entries[1].vector, phba); 7897 7898 irq_fail_out: 7899 /* free the irq already requested */ 7900 free_irq(phba->msix_entries[0].vector, phba); 7901 7902 msi_fail_out: 7903 /* Unconfigure MSI-X capability structure */ 7904 pci_disable_msix(phba->pcidev); 7905 return rc; 7906 } 7907 7908 /** 7909 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 7910 * @phba: pointer to lpfc hba data structure. 7911 * 7912 * This routine is invoked to release the MSI-X vectors and then disable the 7913 * MSI-X interrupt mode to device with SLI-3 interface spec. 7914 **/ 7915 static void 7916 lpfc_sli_disable_msix(struct lpfc_hba *phba) 7917 { 7918 int i; 7919 7920 /* Free up MSI-X multi-message vectors */ 7921 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7922 free_irq(phba->msix_entries[i].vector, phba); 7923 /* Disable MSI-X */ 7924 pci_disable_msix(phba->pcidev); 7925 7926 return; 7927 } 7928 7929 /** 7930 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 7931 * @phba: pointer to lpfc hba data structure. 7932 * 7933 * This routine is invoked to enable the MSI interrupt mode to device with 7934 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 7935 * enable the MSI vector. The device driver is responsible for calling the 7936 * request_irq() to register MSI vector with a interrupt the handler, which 7937 * is done in this function. 7938 * 7939 * Return codes 7940 * 0 - successful 7941 * other values - error 7942 */ 7943 static int 7944 lpfc_sli_enable_msi(struct lpfc_hba *phba) 7945 { 7946 int rc; 7947 7948 rc = pci_enable_msi(phba->pcidev); 7949 if (!rc) 7950 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7951 "0462 PCI enable MSI mode success.\n"); 7952 else { 7953 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7954 "0471 PCI enable MSI mode failed (%d)\n", rc); 7955 return rc; 7956 } 7957 7958 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7959 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7960 if (rc) { 7961 pci_disable_msi(phba->pcidev); 7962 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7963 "0478 MSI request_irq failed (%d)\n", rc); 7964 } 7965 return rc; 7966 } 7967 7968 /** 7969 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 7970 * @phba: pointer to lpfc hba data structure. 7971 * 7972 * This routine is invoked to disable the MSI interrupt mode to device with 7973 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 7974 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7975 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7976 * its vector. 7977 */ 7978 static void 7979 lpfc_sli_disable_msi(struct lpfc_hba *phba) 7980 { 7981 free_irq(phba->pcidev->irq, phba); 7982 pci_disable_msi(phba->pcidev); 7983 return; 7984 } 7985 7986 /** 7987 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 7988 * @phba: pointer to lpfc hba data structure. 7989 * 7990 * This routine is invoked to enable device interrupt and associate driver's 7991 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 7992 * spec. Depends on the interrupt mode configured to the driver, the driver 7993 * will try to fallback from the configured interrupt mode to an interrupt 7994 * mode which is supported by the platform, kernel, and device in the order 7995 * of: 7996 * MSI-X -> MSI -> IRQ. 7997 * 7998 * Return codes 7999 * 0 - successful 8000 * other values - error 8001 **/ 8002 static uint32_t 8003 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 8004 { 8005 uint32_t intr_mode = LPFC_INTR_ERROR; 8006 int retval; 8007 8008 if (cfg_mode == 2) { 8009 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 8010 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 8011 if (!retval) { 8012 /* Now, try to enable MSI-X interrupt mode */ 8013 retval = lpfc_sli_enable_msix(phba); 8014 if (!retval) { 8015 /* Indicate initialization to MSI-X mode */ 8016 phba->intr_type = MSIX; 8017 intr_mode = 2; 8018 } 8019 } 8020 } 8021 8022 /* Fallback to MSI if MSI-X initialization failed */ 8023 if (cfg_mode >= 1 && phba->intr_type == NONE) { 8024 retval = lpfc_sli_enable_msi(phba); 8025 if (!retval) { 8026 /* Indicate initialization to MSI mode */ 8027 phba->intr_type = MSI; 8028 intr_mode = 1; 8029 } 8030 } 8031 8032 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 8033 if (phba->intr_type == NONE) { 8034 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 8035 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8036 if (!retval) { 8037 /* Indicate initialization to INTx mode */ 8038 phba->intr_type = INTx; 8039 intr_mode = 0; 8040 } 8041 } 8042 return intr_mode; 8043 } 8044 8045 /** 8046 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 8047 * @phba: pointer to lpfc hba data structure. 8048 * 8049 * This routine is invoked to disable device interrupt and disassociate the 8050 * driver's interrupt handler(s) from interrupt vector(s) to device with 8051 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 8052 * release the interrupt vector(s) for the message signaled interrupt. 8053 **/ 8054 static void 8055 lpfc_sli_disable_intr(struct lpfc_hba *phba) 8056 { 8057 /* Disable the currently initialized interrupt mode */ 8058 if (phba->intr_type == MSIX) 8059 lpfc_sli_disable_msix(phba); 8060 else if (phba->intr_type == MSI) 8061 lpfc_sli_disable_msi(phba); 8062 else if (phba->intr_type == INTx) 8063 free_irq(phba->pcidev->irq, phba); 8064 8065 /* Reset interrupt management states */ 8066 phba->intr_type = NONE; 8067 phba->sli.slistat.sli_intr = 0; 8068 8069 return; 8070 } 8071 8072 /** 8073 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 8074 * @phba: pointer to lpfc hba data structure. 8075 * 8076 * This routine is invoked to enable the MSI-X interrupt vectors to device 8077 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 8078 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 8079 * enables either all or nothing, depending on the current availability of 8080 * PCI vector resources. The device driver is responsible for calling the 8081 * individual request_irq() to register each MSI-X vector with a interrupt 8082 * handler, which is done in this function. Note that later when device is 8083 * unloading, the driver should always call free_irq() on all MSI-X vectors 8084 * it has done request_irq() on before calling pci_disable_msix(). Failure 8085 * to do so results in a BUG_ON() and a device will be left with MSI-X 8086 * enabled and leaks its vectors. 8087 * 8088 * Return codes 8089 * 0 - successful 8090 * other values - error 8091 **/ 8092 static int 8093 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 8094 { 8095 int vectors, rc, index; 8096 8097 /* Set up MSI-X multi-message vectors */ 8098 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 8099 phba->sli4_hba.msix_entries[index].entry = index; 8100 8101 /* Configure MSI-X capability structure */ 8102 vectors = phba->sli4_hba.cfg_eqn; 8103 enable_msix_vectors: 8104 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 8105 vectors); 8106 if (rc > 1) { 8107 vectors = rc; 8108 goto enable_msix_vectors; 8109 } else if (rc) { 8110 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8111 "0484 PCI enable MSI-X failed (%d)\n", rc); 8112 goto msi_fail_out; 8113 } 8114 8115 /* Log MSI-X vector assignment */ 8116 for (index = 0; index < vectors; index++) 8117 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8118 "0489 MSI-X entry[%d]: vector=x%x " 8119 "message=%d\n", index, 8120 phba->sli4_hba.msix_entries[index].vector, 8121 phba->sli4_hba.msix_entries[index].entry); 8122 8123 /* 8124 * Assign MSI-X vectors to interrupt handlers 8125 */ 8126 for (index = 0; index < vectors; index++) { 8127 memset(&phba->sli4_hba.handler_name[index], 0, 16); 8128 sprintf((char *)&phba->sli4_hba.handler_name[index], 8129 LPFC_DRIVER_HANDLER_NAME"%d", index); 8130 8131 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8132 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8133 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); 8134 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 8135 &lpfc_sli4_hba_intr_handler, IRQF_SHARED, 8136 (char *)&phba->sli4_hba.handler_name[index], 8137 &phba->sli4_hba.fcp_eq_hdl[index]); 8138 if (rc) { 8139 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8140 "0486 MSI-X fast-path (%d) " 8141 "request_irq failed (%d)\n", index, rc); 8142 goto cfg_fail_out; 8143 } 8144 } 8145 phba->sli4_hba.msix_vec_nr = vectors; 8146 8147 return rc; 8148 8149 cfg_fail_out: 8150 /* free the irq already requested */ 8151 for (--index; index >= 0; index--) 8152 free_irq(phba->sli4_hba.msix_entries[index].vector, 8153 &phba->sli4_hba.fcp_eq_hdl[index]); 8154 8155 msi_fail_out: 8156 /* Unconfigure MSI-X capability structure */ 8157 pci_disable_msix(phba->pcidev); 8158 return rc; 8159 } 8160 8161 /** 8162 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 8163 * @phba: pointer to lpfc hba data structure. 8164 * 8165 * This routine is invoked to release the MSI-X vectors and then disable the 8166 * MSI-X interrupt mode to device with SLI-4 interface spec. 8167 **/ 8168 static void 8169 lpfc_sli4_disable_msix(struct lpfc_hba *phba) 8170 { 8171 int index; 8172 8173 /* Free up MSI-X multi-message vectors */ 8174 for (index = 0; index < phba->sli4_hba.msix_vec_nr; index++) 8175 free_irq(phba->sli4_hba.msix_entries[index].vector, 8176 &phba->sli4_hba.fcp_eq_hdl[index]); 8177 8178 /* Disable MSI-X */ 8179 pci_disable_msix(phba->pcidev); 8180 8181 return; 8182 } 8183 8184 /** 8185 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 8186 * @phba: pointer to lpfc hba data structure. 8187 * 8188 * This routine is invoked to enable the MSI interrupt mode to device with 8189 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 8190 * to enable the MSI vector. The device driver is responsible for calling 8191 * the request_irq() to register MSI vector with a interrupt the handler, 8192 * which is done in this function. 8193 * 8194 * Return codes 8195 * 0 - successful 8196 * other values - error 8197 **/ 8198 static int 8199 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 8200 { 8201 int rc, index; 8202 8203 rc = pci_enable_msi(phba->pcidev); 8204 if (!rc) 8205 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8206 "0487 PCI enable MSI mode success.\n"); 8207 else { 8208 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8209 "0488 PCI enable MSI mode failed (%d)\n", rc); 8210 return rc; 8211 } 8212 8213 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 8214 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8215 if (rc) { 8216 pci_disable_msi(phba->pcidev); 8217 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8218 "0490 MSI request_irq failed (%d)\n", rc); 8219 return rc; 8220 } 8221 8222 for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 8223 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8224 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8225 } 8226 8227 return 0; 8228 } 8229 8230 /** 8231 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 8232 * @phba: pointer to lpfc hba data structure. 8233 * 8234 * This routine is invoked to disable the MSI interrupt mode to device with 8235 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 8236 * done request_irq() on before calling pci_disable_msi(). Failure to do so 8237 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 8238 * its vector. 8239 **/ 8240 static void 8241 lpfc_sli4_disable_msi(struct lpfc_hba *phba) 8242 { 8243 free_irq(phba->pcidev->irq, phba); 8244 pci_disable_msi(phba->pcidev); 8245 return; 8246 } 8247 8248 /** 8249 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 8250 * @phba: pointer to lpfc hba data structure. 8251 * 8252 * This routine is invoked to enable device interrupt and associate driver's 8253 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 8254 * interface spec. Depends on the interrupt mode configured to the driver, 8255 * the driver will try to fallback from the configured interrupt mode to an 8256 * interrupt mode which is supported by the platform, kernel, and device in 8257 * the order of: 8258 * MSI-X -> MSI -> IRQ. 8259 * 8260 * Return codes 8261 * 0 - successful 8262 * other values - error 8263 **/ 8264 static uint32_t 8265 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 8266 { 8267 uint32_t intr_mode = LPFC_INTR_ERROR; 8268 int retval, index; 8269 8270 if (cfg_mode == 2) { 8271 /* Preparation before conf_msi mbox cmd */ 8272 retval = 0; 8273 if (!retval) { 8274 /* Now, try to enable MSI-X interrupt mode */ 8275 retval = lpfc_sli4_enable_msix(phba); 8276 if (!retval) { 8277 /* Indicate initialization to MSI-X mode */ 8278 phba->intr_type = MSIX; 8279 intr_mode = 2; 8280 } 8281 } 8282 } 8283 8284 /* Fallback to MSI if MSI-X initialization failed */ 8285 if (cfg_mode >= 1 && phba->intr_type == NONE) { 8286 retval = lpfc_sli4_enable_msi(phba); 8287 if (!retval) { 8288 /* Indicate initialization to MSI mode */ 8289 phba->intr_type = MSI; 8290 intr_mode = 1; 8291 } 8292 } 8293 8294 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 8295 if (phba->intr_type == NONE) { 8296 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 8297 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8298 if (!retval) { 8299 /* Indicate initialization to INTx mode */ 8300 phba->intr_type = INTx; 8301 intr_mode = 0; 8302 for (index = 0; index < phba->cfg_fcp_io_channel; 8303 index++) { 8304 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8305 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8306 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 8307 fcp_eq_in_use, 1); 8308 } 8309 } 8310 } 8311 return intr_mode; 8312 } 8313 8314 /** 8315 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 8316 * @phba: pointer to lpfc hba data structure. 8317 * 8318 * This routine is invoked to disable device interrupt and disassociate 8319 * the driver's interrupt handler(s) from interrupt vector(s) to device 8320 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 8321 * will release the interrupt vector(s) for the message signaled interrupt. 8322 **/ 8323 static void 8324 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 8325 { 8326 /* Disable the currently initialized interrupt mode */ 8327 if (phba->intr_type == MSIX) 8328 lpfc_sli4_disable_msix(phba); 8329 else if (phba->intr_type == MSI) 8330 lpfc_sli4_disable_msi(phba); 8331 else if (phba->intr_type == INTx) 8332 free_irq(phba->pcidev->irq, phba); 8333 8334 /* Reset interrupt management states */ 8335 phba->intr_type = NONE; 8336 phba->sli.slistat.sli_intr = 0; 8337 8338 return; 8339 } 8340 8341 /** 8342 * lpfc_unset_hba - Unset SLI3 hba device initialization 8343 * @phba: pointer to lpfc hba data structure. 8344 * 8345 * This routine is invoked to unset the HBA device initialization steps to 8346 * a device with SLI-3 interface spec. 8347 **/ 8348 static void 8349 lpfc_unset_hba(struct lpfc_hba *phba) 8350 { 8351 struct lpfc_vport *vport = phba->pport; 8352 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8353 8354 spin_lock_irq(shost->host_lock); 8355 vport->load_flag |= FC_UNLOADING; 8356 spin_unlock_irq(shost->host_lock); 8357 8358 kfree(phba->vpi_bmask); 8359 kfree(phba->vpi_ids); 8360 8361 lpfc_stop_hba_timers(phba); 8362 8363 phba->pport->work_port_events = 0; 8364 8365 lpfc_sli_hba_down(phba); 8366 8367 lpfc_sli_brdrestart(phba); 8368 8369 lpfc_sli_disable_intr(phba); 8370 8371 return; 8372 } 8373 8374 /** 8375 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 8376 * @phba: pointer to lpfc hba data structure. 8377 * 8378 * This routine is invoked to unset the HBA device initialization steps to 8379 * a device with SLI-4 interface spec. 8380 **/ 8381 static void 8382 lpfc_sli4_unset_hba(struct lpfc_hba *phba) 8383 { 8384 struct lpfc_vport *vport = phba->pport; 8385 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8386 8387 spin_lock_irq(shost->host_lock); 8388 vport->load_flag |= FC_UNLOADING; 8389 spin_unlock_irq(shost->host_lock); 8390 8391 phba->pport->work_port_events = 0; 8392 8393 /* Stop the SLI4 device port */ 8394 lpfc_stop_port(phba); 8395 8396 lpfc_sli4_disable_intr(phba); 8397 8398 /* Reset SLI4 HBA FCoE function */ 8399 lpfc_pci_function_reset(phba); 8400 lpfc_sli4_queue_destroy(phba); 8401 8402 return; 8403 } 8404 8405 /** 8406 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 8407 * @phba: Pointer to HBA context object. 8408 * 8409 * This function is called in the SLI4 code path to wait for completion 8410 * of device's XRIs exchange busy. It will check the XRI exchange busy 8411 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 8412 * that, it will check the XRI exchange busy on outstanding FCP and ELS 8413 * I/Os every 30 seconds, log error message, and wait forever. Only when 8414 * all XRI exchange busy complete, the driver unload shall proceed with 8415 * invoking the function reset ioctl mailbox command to the CNA and the 8416 * the rest of the driver unload resource release. 8417 **/ 8418 static void 8419 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 8420 { 8421 int wait_time = 0; 8422 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 8423 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8424 8425 while (!fcp_xri_cmpl || !els_xri_cmpl) { 8426 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 8427 if (!fcp_xri_cmpl) 8428 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8429 "2877 FCP XRI exchange busy " 8430 "wait time: %d seconds.\n", 8431 wait_time/1000); 8432 if (!els_xri_cmpl) 8433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8434 "2878 ELS XRI exchange busy " 8435 "wait time: %d seconds.\n", 8436 wait_time/1000); 8437 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 8438 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 8439 } else { 8440 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 8441 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 8442 } 8443 fcp_xri_cmpl = 8444 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 8445 els_xri_cmpl = 8446 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8447 } 8448 } 8449 8450 /** 8451 * lpfc_sli4_hba_unset - Unset the fcoe hba 8452 * @phba: Pointer to HBA context object. 8453 * 8454 * This function is called in the SLI4 code path to reset the HBA's FCoE 8455 * function. The caller is not required to hold any lock. This routine 8456 * issues PCI function reset mailbox command to reset the FCoE function. 8457 * At the end of the function, it calls lpfc_hba_down_post function to 8458 * free any pending commands. 8459 **/ 8460 static void 8461 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 8462 { 8463 int wait_cnt = 0; 8464 LPFC_MBOXQ_t *mboxq; 8465 struct pci_dev *pdev = phba->pcidev; 8466 8467 lpfc_stop_hba_timers(phba); 8468 phba->sli4_hba.intr_enable = 0; 8469 8470 /* 8471 * Gracefully wait out the potential current outstanding asynchronous 8472 * mailbox command. 8473 */ 8474 8475 /* First, block any pending async mailbox command from posted */ 8476 spin_lock_irq(&phba->hbalock); 8477 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 8478 spin_unlock_irq(&phba->hbalock); 8479 /* Now, trying to wait it out if we can */ 8480 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8481 msleep(10); 8482 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 8483 break; 8484 } 8485 /* Forcefully release the outstanding mailbox command if timed out */ 8486 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8487 spin_lock_irq(&phba->hbalock); 8488 mboxq = phba->sli.mbox_active; 8489 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 8490 __lpfc_mbox_cmpl_put(phba, mboxq); 8491 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8492 phba->sli.mbox_active = NULL; 8493 spin_unlock_irq(&phba->hbalock); 8494 } 8495 8496 /* Abort all iocbs associated with the hba */ 8497 lpfc_sli_hba_iocb_abort(phba); 8498 8499 /* Wait for completion of device XRI exchange busy */ 8500 lpfc_sli4_xri_exchange_busy_wait(phba); 8501 8502 /* Disable PCI subsystem interrupt */ 8503 lpfc_sli4_disable_intr(phba); 8504 8505 /* Disable SR-IOV if enabled */ 8506 if (phba->cfg_sriov_nr_virtfn) 8507 pci_disable_sriov(pdev); 8508 8509 /* Stop kthread signal shall trigger work_done one more time */ 8510 kthread_stop(phba->worker_thread); 8511 8512 /* Reset SLI4 HBA FCoE function */ 8513 lpfc_pci_function_reset(phba); 8514 lpfc_sli4_queue_destroy(phba); 8515 8516 /* Stop the SLI4 device port */ 8517 phba->pport->work_port_events = 0; 8518 } 8519 8520 /** 8521 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 8522 * @phba: Pointer to HBA context object. 8523 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 8524 * 8525 * This function is called in the SLI4 code path to read the port's 8526 * sli4 capabilities. 8527 * 8528 * This function may be be called from any context that can block-wait 8529 * for the completion. The expectation is that this routine is called 8530 * typically from probe_one or from the online routine. 8531 **/ 8532 int 8533 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8534 { 8535 int rc; 8536 struct lpfc_mqe *mqe; 8537 struct lpfc_pc_sli4_params *sli4_params; 8538 uint32_t mbox_tmo; 8539 8540 rc = 0; 8541 mqe = &mboxq->u.mqe; 8542 8543 /* Read the port's SLI4 Parameters port capabilities */ 8544 lpfc_pc_sli4_params(mboxq); 8545 if (!phba->sli4_hba.intr_enable) 8546 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8547 else { 8548 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 8549 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 8550 } 8551 8552 if (unlikely(rc)) 8553 return 1; 8554 8555 sli4_params = &phba->sli4_hba.pc_sli4_params; 8556 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 8557 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 8558 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 8559 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 8560 &mqe->un.sli4_params); 8561 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 8562 &mqe->un.sli4_params); 8563 sli4_params->proto_types = mqe->un.sli4_params.word3; 8564 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 8565 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 8566 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 8567 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 8568 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 8569 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 8570 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 8571 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 8572 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 8573 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 8574 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 8575 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 8576 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 8577 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 8578 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 8579 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 8580 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 8581 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 8582 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 8583 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 8584 8585 /* Make sure that sge_supp_len can be handled by the driver */ 8586 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 8587 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 8588 8589 return rc; 8590 } 8591 8592 /** 8593 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 8594 * @phba: Pointer to HBA context object. 8595 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 8596 * 8597 * This function is called in the SLI4 code path to read the port's 8598 * sli4 capabilities. 8599 * 8600 * This function may be be called from any context that can block-wait 8601 * for the completion. The expectation is that this routine is called 8602 * typically from probe_one or from the online routine. 8603 **/ 8604 int 8605 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8606 { 8607 int rc; 8608 struct lpfc_mqe *mqe = &mboxq->u.mqe; 8609 struct lpfc_pc_sli4_params *sli4_params; 8610 uint32_t mbox_tmo; 8611 int length; 8612 struct lpfc_sli4_parameters *mbx_sli4_parameters; 8613 8614 /* 8615 * By default, the driver assumes the SLI4 port requires RPI 8616 * header postings. The SLI4_PARAM response will correct this 8617 * assumption. 8618 */ 8619 phba->sli4_hba.rpi_hdrs_in_use = 1; 8620 8621 /* Read the port's SLI4 Config Parameters */ 8622 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 8623 sizeof(struct lpfc_sli4_cfg_mhdr)); 8624 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 8625 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 8626 length, LPFC_SLI4_MBX_EMBED); 8627 if (!phba->sli4_hba.intr_enable) 8628 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8629 else { 8630 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 8631 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 8632 } 8633 if (unlikely(rc)) 8634 return rc; 8635 sli4_params = &phba->sli4_hba.pc_sli4_params; 8636 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 8637 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 8638 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 8639 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 8640 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 8641 mbx_sli4_parameters); 8642 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 8643 mbx_sli4_parameters); 8644 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 8645 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 8646 else 8647 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 8648 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 8649 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 8650 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 8651 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 8652 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 8653 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 8654 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 8655 mbx_sli4_parameters); 8656 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 8657 mbx_sli4_parameters); 8658 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 8659 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 8660 8661 /* Make sure that sge_supp_len can be handled by the driver */ 8662 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 8663 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 8664 8665 return 0; 8666 } 8667 8668 /** 8669 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 8670 * @pdev: pointer to PCI device 8671 * @pid: pointer to PCI device identifier 8672 * 8673 * This routine is to be called to attach a device with SLI-3 interface spec 8674 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 8675 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 8676 * information of the device and driver to see if the driver state that it can 8677 * support this kind of device. If the match is successful, the driver core 8678 * invokes this routine. If this routine determines it can claim the HBA, it 8679 * does all the initialization that it needs to do to handle the HBA properly. 8680 * 8681 * Return code 8682 * 0 - driver can claim the device 8683 * negative value - driver can not claim the device 8684 **/ 8685 static int __devinit 8686 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 8687 { 8688 struct lpfc_hba *phba; 8689 struct lpfc_vport *vport = NULL; 8690 struct Scsi_Host *shost = NULL; 8691 int error; 8692 uint32_t cfg_mode, intr_mode; 8693 8694 /* Allocate memory for HBA structure */ 8695 phba = lpfc_hba_alloc(pdev); 8696 if (!phba) 8697 return -ENOMEM; 8698 8699 /* Perform generic PCI device enabling operation */ 8700 error = lpfc_enable_pci_dev(phba); 8701 if (error) 8702 goto out_free_phba; 8703 8704 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 8705 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 8706 if (error) 8707 goto out_disable_pci_dev; 8708 8709 /* Set up SLI-3 specific device PCI memory space */ 8710 error = lpfc_sli_pci_mem_setup(phba); 8711 if (error) { 8712 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8713 "1402 Failed to set up pci memory space.\n"); 8714 goto out_disable_pci_dev; 8715 } 8716 8717 /* Set up phase-1 common device driver resources */ 8718 error = lpfc_setup_driver_resource_phase1(phba); 8719 if (error) { 8720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8721 "1403 Failed to set up driver resource.\n"); 8722 goto out_unset_pci_mem_s3; 8723 } 8724 8725 /* Set up SLI-3 specific device driver resources */ 8726 error = lpfc_sli_driver_resource_setup(phba); 8727 if (error) { 8728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8729 "1404 Failed to set up driver resource.\n"); 8730 goto out_unset_pci_mem_s3; 8731 } 8732 8733 /* Initialize and populate the iocb list per host */ 8734 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 8735 if (error) { 8736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8737 "1405 Failed to initialize iocb list.\n"); 8738 goto out_unset_driver_resource_s3; 8739 } 8740 8741 /* Set up common device driver resources */ 8742 error = lpfc_setup_driver_resource_phase2(phba); 8743 if (error) { 8744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8745 "1406 Failed to set up driver resource.\n"); 8746 goto out_free_iocb_list; 8747 } 8748 8749 /* Get the default values for Model Name and Description */ 8750 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 8751 8752 /* Create SCSI host to the physical port */ 8753 error = lpfc_create_shost(phba); 8754 if (error) { 8755 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8756 "1407 Failed to create scsi host.\n"); 8757 goto out_unset_driver_resource; 8758 } 8759 8760 /* Configure sysfs attributes */ 8761 vport = phba->pport; 8762 error = lpfc_alloc_sysfs_attr(vport); 8763 if (error) { 8764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8765 "1476 Failed to allocate sysfs attr\n"); 8766 goto out_destroy_shost; 8767 } 8768 8769 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8770 /* Now, trying to enable interrupt and bring up the device */ 8771 cfg_mode = phba->cfg_use_msi; 8772 while (true) { 8773 /* Put device to a known state before enabling interrupt */ 8774 lpfc_stop_port(phba); 8775 /* Configure and enable interrupt */ 8776 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 8777 if (intr_mode == LPFC_INTR_ERROR) { 8778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8779 "0431 Failed to enable interrupt.\n"); 8780 error = -ENODEV; 8781 goto out_free_sysfs_attr; 8782 } 8783 /* SLI-3 HBA setup */ 8784 if (lpfc_sli_hba_setup(phba)) { 8785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8786 "1477 Failed to set up hba\n"); 8787 error = -ENODEV; 8788 goto out_remove_device; 8789 } 8790 8791 /* Wait 50ms for the interrupts of previous mailbox commands */ 8792 msleep(50); 8793 /* Check active interrupts on message signaled interrupts */ 8794 if (intr_mode == 0 || 8795 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 8796 /* Log the current active interrupt mode */ 8797 phba->intr_mode = intr_mode; 8798 lpfc_log_intr_mode(phba, intr_mode); 8799 break; 8800 } else { 8801 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8802 "0447 Configure interrupt mode (%d) " 8803 "failed active interrupt test.\n", 8804 intr_mode); 8805 /* Disable the current interrupt mode */ 8806 lpfc_sli_disable_intr(phba); 8807 /* Try next level of interrupt mode */ 8808 cfg_mode = --intr_mode; 8809 } 8810 } 8811 8812 /* Perform post initialization setup */ 8813 lpfc_post_init_setup(phba); 8814 8815 /* Check if there are static vports to be created. */ 8816 lpfc_create_static_vport(phba); 8817 8818 return 0; 8819 8820 out_remove_device: 8821 lpfc_unset_hba(phba); 8822 out_free_sysfs_attr: 8823 lpfc_free_sysfs_attr(vport); 8824 out_destroy_shost: 8825 lpfc_destroy_shost(phba); 8826 out_unset_driver_resource: 8827 lpfc_unset_driver_resource_phase2(phba); 8828 out_free_iocb_list: 8829 lpfc_free_iocb_list(phba); 8830 out_unset_driver_resource_s3: 8831 lpfc_sli_driver_resource_unset(phba); 8832 out_unset_pci_mem_s3: 8833 lpfc_sli_pci_mem_unset(phba); 8834 out_disable_pci_dev: 8835 lpfc_disable_pci_dev(phba); 8836 if (shost) 8837 scsi_host_put(shost); 8838 out_free_phba: 8839 lpfc_hba_free(phba); 8840 return error; 8841 } 8842 8843 /** 8844 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 8845 * @pdev: pointer to PCI device 8846 * 8847 * This routine is to be called to disattach a device with SLI-3 interface 8848 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 8849 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8850 * device to be removed from the PCI subsystem properly. 8851 **/ 8852 static void __devexit 8853 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 8854 { 8855 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8856 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8857 struct lpfc_vport **vports; 8858 struct lpfc_hba *phba = vport->phba; 8859 int i; 8860 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 8861 8862 spin_lock_irq(&phba->hbalock); 8863 vport->load_flag |= FC_UNLOADING; 8864 spin_unlock_irq(&phba->hbalock); 8865 8866 lpfc_free_sysfs_attr(vport); 8867 8868 /* Release all the vports against this physical port */ 8869 vports = lpfc_create_vport_work_array(phba); 8870 if (vports != NULL) 8871 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 8872 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 8873 continue; 8874 fc_vport_terminate(vports[i]->fc_vport); 8875 } 8876 lpfc_destroy_vport_work_array(phba, vports); 8877 8878 /* Remove FC host and then SCSI host with the physical port */ 8879 fc_remove_host(shost); 8880 scsi_remove_host(shost); 8881 lpfc_cleanup(vport); 8882 8883 /* 8884 * Bring down the SLI Layer. This step disable all interrupts, 8885 * clears the rings, discards all mailbox commands, and resets 8886 * the HBA. 8887 */ 8888 8889 /* HBA interrupt will be disabled after this call */ 8890 lpfc_sli_hba_down(phba); 8891 /* Stop kthread signal shall trigger work_done one more time */ 8892 kthread_stop(phba->worker_thread); 8893 /* Final cleanup of txcmplq and reset the HBA */ 8894 lpfc_sli_brdrestart(phba); 8895 8896 kfree(phba->vpi_bmask); 8897 kfree(phba->vpi_ids); 8898 8899 lpfc_stop_hba_timers(phba); 8900 spin_lock_irq(&phba->hbalock); 8901 list_del_init(&vport->listentry); 8902 spin_unlock_irq(&phba->hbalock); 8903 8904 lpfc_debugfs_terminate(vport); 8905 8906 /* Disable SR-IOV if enabled */ 8907 if (phba->cfg_sriov_nr_virtfn) 8908 pci_disable_sriov(pdev); 8909 8910 /* Disable interrupt */ 8911 lpfc_sli_disable_intr(phba); 8912 8913 pci_set_drvdata(pdev, NULL); 8914 scsi_host_put(shost); 8915 8916 /* 8917 * Call scsi_free before mem_free since scsi bufs are released to their 8918 * corresponding pools here. 8919 */ 8920 lpfc_scsi_free(phba); 8921 lpfc_mem_free_all(phba); 8922 8923 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 8924 phba->hbqslimp.virt, phba->hbqslimp.phys); 8925 8926 /* Free resources associated with SLI2 interface */ 8927 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 8928 phba->slim2p.virt, phba->slim2p.phys); 8929 8930 /* unmap adapter SLIM and Control Registers */ 8931 iounmap(phba->ctrl_regs_memmap_p); 8932 iounmap(phba->slim_memmap_p); 8933 8934 lpfc_hba_free(phba); 8935 8936 pci_release_selected_regions(pdev, bars); 8937 pci_disable_device(pdev); 8938 } 8939 8940 /** 8941 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 8942 * @pdev: pointer to PCI device 8943 * @msg: power management message 8944 * 8945 * This routine is to be called from the kernel's PCI subsystem to support 8946 * system Power Management (PM) to device with SLI-3 interface spec. When 8947 * PM invokes this method, it quiesces the device by stopping the driver's 8948 * worker thread for the device, turning off device's interrupt and DMA, 8949 * and bring the device offline. Note that as the driver implements the 8950 * minimum PM requirements to a power-aware driver's PM support for the 8951 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8952 * to the suspend() method call will be treated as SUSPEND and the driver will 8953 * fully reinitialize its device during resume() method call, the driver will 8954 * set device to PCI_D3hot state in PCI config space instead of setting it 8955 * according to the @msg provided by the PM. 8956 * 8957 * Return code 8958 * 0 - driver suspended the device 8959 * Error otherwise 8960 **/ 8961 static int 8962 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 8963 { 8964 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8965 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8966 8967 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8968 "0473 PCI device Power Management suspend.\n"); 8969 8970 /* Bring down the device */ 8971 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 8972 lpfc_offline(phba); 8973 kthread_stop(phba->worker_thread); 8974 8975 /* Disable interrupt from device */ 8976 lpfc_sli_disable_intr(phba); 8977 8978 /* Save device state to PCI config space */ 8979 pci_save_state(pdev); 8980 pci_set_power_state(pdev, PCI_D3hot); 8981 8982 return 0; 8983 } 8984 8985 /** 8986 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 8987 * @pdev: pointer to PCI device 8988 * 8989 * This routine is to be called from the kernel's PCI subsystem to support 8990 * system Power Management (PM) to device with SLI-3 interface spec. When PM 8991 * invokes this method, it restores the device's PCI config space state and 8992 * fully reinitializes the device and brings it online. Note that as the 8993 * driver implements the minimum PM requirements to a power-aware driver's 8994 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 8995 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 8996 * driver will fully reinitialize its device during resume() method call, 8997 * the device will be set to PCI_D0 directly in PCI config space before 8998 * restoring the state. 8999 * 9000 * Return code 9001 * 0 - driver suspended the device 9002 * Error otherwise 9003 **/ 9004 static int 9005 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 9006 { 9007 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9008 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9009 uint32_t intr_mode; 9010 int error; 9011 9012 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9013 "0452 PCI device Power Management resume.\n"); 9014 9015 /* Restore device state from PCI config space */ 9016 pci_set_power_state(pdev, PCI_D0); 9017 pci_restore_state(pdev); 9018 9019 /* 9020 * As the new kernel behavior of pci_restore_state() API call clears 9021 * device saved_state flag, need to save the restored state again. 9022 */ 9023 pci_save_state(pdev); 9024 9025 if (pdev->is_busmaster) 9026 pci_set_master(pdev); 9027 9028 /* Startup the kernel thread for this host adapter. */ 9029 phba->worker_thread = kthread_run(lpfc_do_work, phba, 9030 "lpfc_worker_%d", phba->brd_no); 9031 if (IS_ERR(phba->worker_thread)) { 9032 error = PTR_ERR(phba->worker_thread); 9033 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9034 "0434 PM resume failed to start worker " 9035 "thread: error=x%x.\n", error); 9036 return error; 9037 } 9038 9039 /* Configure and enable interrupt */ 9040 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 9041 if (intr_mode == LPFC_INTR_ERROR) { 9042 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9043 "0430 PM resume Failed to enable interrupt\n"); 9044 return -EIO; 9045 } else 9046 phba->intr_mode = intr_mode; 9047 9048 /* Restart HBA and bring it online */ 9049 lpfc_sli_brdrestart(phba); 9050 lpfc_online(phba); 9051 9052 /* Log the current active interrupt mode */ 9053 lpfc_log_intr_mode(phba, phba->intr_mode); 9054 9055 return 0; 9056 } 9057 9058 /** 9059 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 9060 * @phba: pointer to lpfc hba data structure. 9061 * 9062 * This routine is called to prepare the SLI3 device for PCI slot recover. It 9063 * aborts all the outstanding SCSI I/Os to the pci device. 9064 **/ 9065 static void 9066 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 9067 { 9068 struct lpfc_sli *psli = &phba->sli; 9069 struct lpfc_sli_ring *pring; 9070 9071 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9072 "2723 PCI channel I/O abort preparing for recovery\n"); 9073 9074 /* 9075 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 9076 * and let the SCSI mid-layer to retry them to recover. 9077 */ 9078 pring = &psli->ring[psli->fcp_ring]; 9079 lpfc_sli_abort_iocb_ring(phba, pring); 9080 } 9081 9082 /** 9083 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 9084 * @phba: pointer to lpfc hba data structure. 9085 * 9086 * This routine is called to prepare the SLI3 device for PCI slot reset. It 9087 * disables the device interrupt and pci device, and aborts the internal FCP 9088 * pending I/Os. 9089 **/ 9090 static void 9091 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 9092 { 9093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9094 "2710 PCI channel disable preparing for reset\n"); 9095 9096 /* Block any management I/Os to the device */ 9097 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 9098 9099 /* Block all SCSI devices' I/Os on the host */ 9100 lpfc_scsi_dev_block(phba); 9101 9102 /* stop all timers */ 9103 lpfc_stop_hba_timers(phba); 9104 9105 /* Disable interrupt and pci device */ 9106 lpfc_sli_disable_intr(phba); 9107 pci_disable_device(phba->pcidev); 9108 9109 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 9110 lpfc_sli_flush_fcp_rings(phba); 9111 } 9112 9113 /** 9114 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 9115 * @phba: pointer to lpfc hba data structure. 9116 * 9117 * This routine is called to prepare the SLI3 device for PCI slot permanently 9118 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 9119 * pending I/Os. 9120 **/ 9121 static void 9122 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 9123 { 9124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9125 "2711 PCI channel permanent disable for failure\n"); 9126 /* Block all SCSI devices' I/Os on the host */ 9127 lpfc_scsi_dev_block(phba); 9128 9129 /* stop all timers */ 9130 lpfc_stop_hba_timers(phba); 9131 9132 /* Clean up all driver's outstanding SCSI I/Os */ 9133 lpfc_sli_flush_fcp_rings(phba); 9134 } 9135 9136 /** 9137 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 9138 * @pdev: pointer to PCI device. 9139 * @state: the current PCI connection state. 9140 * 9141 * This routine is called from the PCI subsystem for I/O error handling to 9142 * device with SLI-3 interface spec. This function is called by the PCI 9143 * subsystem after a PCI bus error affecting this device has been detected. 9144 * When this function is invoked, it will need to stop all the I/Os and 9145 * interrupt(s) to the device. Once that is done, it will return 9146 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 9147 * as desired. 9148 * 9149 * Return codes 9150 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 9151 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9152 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9153 **/ 9154 static pci_ers_result_t 9155 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 9156 { 9157 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9158 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9159 9160 switch (state) { 9161 case pci_channel_io_normal: 9162 /* Non-fatal error, prepare for recovery */ 9163 lpfc_sli_prep_dev_for_recover(phba); 9164 return PCI_ERS_RESULT_CAN_RECOVER; 9165 case pci_channel_io_frozen: 9166 /* Fatal error, prepare for slot reset */ 9167 lpfc_sli_prep_dev_for_reset(phba); 9168 return PCI_ERS_RESULT_NEED_RESET; 9169 case pci_channel_io_perm_failure: 9170 /* Permanent failure, prepare for device down */ 9171 lpfc_sli_prep_dev_for_perm_failure(phba); 9172 return PCI_ERS_RESULT_DISCONNECT; 9173 default: 9174 /* Unknown state, prepare and request slot reset */ 9175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9176 "0472 Unknown PCI error state: x%x\n", state); 9177 lpfc_sli_prep_dev_for_reset(phba); 9178 return PCI_ERS_RESULT_NEED_RESET; 9179 } 9180 } 9181 9182 /** 9183 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 9184 * @pdev: pointer to PCI device. 9185 * 9186 * This routine is called from the PCI subsystem for error handling to 9187 * device with SLI-3 interface spec. This is called after PCI bus has been 9188 * reset to restart the PCI card from scratch, as if from a cold-boot. 9189 * During the PCI subsystem error recovery, after driver returns 9190 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 9191 * recovery and then call this routine before calling the .resume method 9192 * to recover the device. This function will initialize the HBA device, 9193 * enable the interrupt, but it will just put the HBA to offline state 9194 * without passing any I/O traffic. 9195 * 9196 * Return codes 9197 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9198 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9199 */ 9200 static pci_ers_result_t 9201 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 9202 { 9203 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9204 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9205 struct lpfc_sli *psli = &phba->sli; 9206 uint32_t intr_mode; 9207 9208 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 9209 if (pci_enable_device_mem(pdev)) { 9210 printk(KERN_ERR "lpfc: Cannot re-enable " 9211 "PCI device after reset.\n"); 9212 return PCI_ERS_RESULT_DISCONNECT; 9213 } 9214 9215 pci_restore_state(pdev); 9216 9217 /* 9218 * As the new kernel behavior of pci_restore_state() API call clears 9219 * device saved_state flag, need to save the restored state again. 9220 */ 9221 pci_save_state(pdev); 9222 9223 if (pdev->is_busmaster) 9224 pci_set_master(pdev); 9225 9226 spin_lock_irq(&phba->hbalock); 9227 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 9228 spin_unlock_irq(&phba->hbalock); 9229 9230 /* Configure and enable interrupt */ 9231 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 9232 if (intr_mode == LPFC_INTR_ERROR) { 9233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9234 "0427 Cannot re-enable interrupt after " 9235 "slot reset.\n"); 9236 return PCI_ERS_RESULT_DISCONNECT; 9237 } else 9238 phba->intr_mode = intr_mode; 9239 9240 /* Take device offline, it will perform cleanup */ 9241 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 9242 lpfc_offline(phba); 9243 lpfc_sli_brdrestart(phba); 9244 9245 /* Log the current active interrupt mode */ 9246 lpfc_log_intr_mode(phba, phba->intr_mode); 9247 9248 return PCI_ERS_RESULT_RECOVERED; 9249 } 9250 9251 /** 9252 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 9253 * @pdev: pointer to PCI device 9254 * 9255 * This routine is called from the PCI subsystem for error handling to device 9256 * with SLI-3 interface spec. It is called when kernel error recovery tells 9257 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 9258 * error recovery. After this call, traffic can start to flow from this device 9259 * again. 9260 */ 9261 static void 9262 lpfc_io_resume_s3(struct pci_dev *pdev) 9263 { 9264 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9265 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9266 9267 /* Bring device online, it will be no-op for non-fatal error resume */ 9268 lpfc_online(phba); 9269 9270 /* Clean up Advanced Error Reporting (AER) if needed */ 9271 if (phba->hba_flag & HBA_AER_ENABLED) 9272 pci_cleanup_aer_uncorrect_error_status(pdev); 9273 } 9274 9275 /** 9276 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 9277 * @phba: pointer to lpfc hba data structure. 9278 * 9279 * returns the number of ELS/CT IOCBs to reserve 9280 **/ 9281 int 9282 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 9283 { 9284 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 9285 9286 if (phba->sli_rev == LPFC_SLI_REV4) { 9287 if (max_xri <= 100) 9288 return 10; 9289 else if (max_xri <= 256) 9290 return 25; 9291 else if (max_xri <= 512) 9292 return 50; 9293 else if (max_xri <= 1024) 9294 return 100; 9295 else if (max_xri <= 1536) 9296 return 150; 9297 else if (max_xri <= 2048) 9298 return 200; 9299 else 9300 return 250; 9301 } else 9302 return 0; 9303 } 9304 9305 /** 9306 * lpfc_write_firmware - attempt to write a firmware image to the port 9307 * @phba: pointer to lpfc hba data structure. 9308 * @fw: pointer to firmware image returned from request_firmware. 9309 * 9310 * returns the number of bytes written if write is successful. 9311 * returns a negative error value if there were errors. 9312 * returns 0 if firmware matches currently active firmware on port. 9313 **/ 9314 int 9315 lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) 9316 { 9317 char fwrev[FW_REV_STR_SIZE]; 9318 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data; 9319 struct list_head dma_buffer_list; 9320 int i, rc = 0; 9321 struct lpfc_dmabuf *dmabuf, *next; 9322 uint32_t offset = 0, temp_offset = 0; 9323 9324 INIT_LIST_HEAD(&dma_buffer_list); 9325 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || 9326 (bf_get_be32(lpfc_grp_hdr_file_type, image) != 9327 LPFC_FILE_TYPE_GROUP) || 9328 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) || 9329 (be32_to_cpu(image->size) != fw->size)) { 9330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9331 "3022 Invalid FW image found. " 9332 "Magic:%x Type:%x ID:%x\n", 9333 be32_to_cpu(image->magic_number), 9334 bf_get_be32(lpfc_grp_hdr_file_type, image), 9335 bf_get_be32(lpfc_grp_hdr_id, image)); 9336 return -EINVAL; 9337 } 9338 lpfc_decode_firmware_rev(phba, fwrev, 1); 9339 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 9340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9341 "3023 Updating Firmware. Current Version:%s " 9342 "New Version:%s\n", 9343 fwrev, image->revision); 9344 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 9345 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 9346 GFP_KERNEL); 9347 if (!dmabuf) { 9348 rc = -ENOMEM; 9349 goto out; 9350 } 9351 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 9352 SLI4_PAGE_SIZE, 9353 &dmabuf->phys, 9354 GFP_KERNEL); 9355 if (!dmabuf->virt) { 9356 kfree(dmabuf); 9357 rc = -ENOMEM; 9358 goto out; 9359 } 9360 list_add_tail(&dmabuf->list, &dma_buffer_list); 9361 } 9362 while (offset < fw->size) { 9363 temp_offset = offset; 9364 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 9365 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 9366 memcpy(dmabuf->virt, 9367 fw->data + temp_offset, 9368 fw->size - temp_offset); 9369 temp_offset = fw->size; 9370 break; 9371 } 9372 memcpy(dmabuf->virt, fw->data + temp_offset, 9373 SLI4_PAGE_SIZE); 9374 temp_offset += SLI4_PAGE_SIZE; 9375 } 9376 rc = lpfc_wr_object(phba, &dma_buffer_list, 9377 (fw->size - offset), &offset); 9378 if (rc) { 9379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9380 "3024 Firmware update failed. " 9381 "%d\n", rc); 9382 goto out; 9383 } 9384 } 9385 rc = offset; 9386 } 9387 out: 9388 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 9389 list_del(&dmabuf->list); 9390 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 9391 dmabuf->virt, dmabuf->phys); 9392 kfree(dmabuf); 9393 } 9394 return rc; 9395 } 9396 9397 /** 9398 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 9399 * @pdev: pointer to PCI device 9400 * @pid: pointer to PCI device identifier 9401 * 9402 * This routine is called from the kernel's PCI subsystem to device with 9403 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 9404 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 9405 * information of the device and driver to see if the driver state that it 9406 * can support this kind of device. If the match is successful, the driver 9407 * core invokes this routine. If this routine determines it can claim the HBA, 9408 * it does all the initialization that it needs to do to handle the HBA 9409 * properly. 9410 * 9411 * Return code 9412 * 0 - driver can claim the device 9413 * negative value - driver can not claim the device 9414 **/ 9415 static int __devinit 9416 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 9417 { 9418 struct lpfc_hba *phba; 9419 struct lpfc_vport *vport = NULL; 9420 struct Scsi_Host *shost = NULL; 9421 int error; 9422 uint32_t cfg_mode, intr_mode; 9423 int mcnt; 9424 int adjusted_fcp_io_channel; 9425 const struct firmware *fw; 9426 uint8_t file_name[16]; 9427 9428 /* Allocate memory for HBA structure */ 9429 phba = lpfc_hba_alloc(pdev); 9430 if (!phba) 9431 return -ENOMEM; 9432 9433 /* Perform generic PCI device enabling operation */ 9434 error = lpfc_enable_pci_dev(phba); 9435 if (error) 9436 goto out_free_phba; 9437 9438 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 9439 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 9440 if (error) 9441 goto out_disable_pci_dev; 9442 9443 /* Set up SLI-4 specific device PCI memory space */ 9444 error = lpfc_sli4_pci_mem_setup(phba); 9445 if (error) { 9446 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9447 "1410 Failed to set up pci memory space.\n"); 9448 goto out_disable_pci_dev; 9449 } 9450 9451 /* Set up phase-1 common device driver resources */ 9452 error = lpfc_setup_driver_resource_phase1(phba); 9453 if (error) { 9454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9455 "1411 Failed to set up driver resource.\n"); 9456 goto out_unset_pci_mem_s4; 9457 } 9458 9459 /* Set up SLI-4 Specific device driver resources */ 9460 error = lpfc_sli4_driver_resource_setup(phba); 9461 if (error) { 9462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9463 "1412 Failed to set up driver resource.\n"); 9464 goto out_unset_pci_mem_s4; 9465 } 9466 9467 /* Initialize and populate the iocb list per host */ 9468 9469 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9470 "2821 initialize iocb list %d.\n", 9471 phba->cfg_iocb_cnt*1024); 9472 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 9473 9474 if (error) { 9475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9476 "1413 Failed to initialize iocb list.\n"); 9477 goto out_unset_driver_resource_s4; 9478 } 9479 9480 INIT_LIST_HEAD(&phba->active_rrq_list); 9481 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 9482 9483 /* Set up common device driver resources */ 9484 error = lpfc_setup_driver_resource_phase2(phba); 9485 if (error) { 9486 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9487 "1414 Failed to set up driver resource.\n"); 9488 goto out_free_iocb_list; 9489 } 9490 9491 /* Get the default values for Model Name and Description */ 9492 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9493 9494 /* Create SCSI host to the physical port */ 9495 error = lpfc_create_shost(phba); 9496 if (error) { 9497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9498 "1415 Failed to create scsi host.\n"); 9499 goto out_unset_driver_resource; 9500 } 9501 9502 /* Configure sysfs attributes */ 9503 vport = phba->pport; 9504 error = lpfc_alloc_sysfs_attr(vport); 9505 if (error) { 9506 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9507 "1416 Failed to allocate sysfs attr\n"); 9508 goto out_destroy_shost; 9509 } 9510 9511 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 9512 /* Now, trying to enable interrupt and bring up the device */ 9513 cfg_mode = phba->cfg_use_msi; 9514 while (true) { 9515 /* Put device to a known state before enabling interrupt */ 9516 lpfc_stop_port(phba); 9517 /* Configure and enable interrupt */ 9518 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 9519 if (intr_mode == LPFC_INTR_ERROR) { 9520 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9521 "0426 Failed to enable interrupt.\n"); 9522 error = -ENODEV; 9523 goto out_free_sysfs_attr; 9524 } 9525 /* Default to single EQ for non-MSI-X */ 9526 if (phba->intr_type != MSIX) 9527 adjusted_fcp_io_channel = 1; 9528 else if (phba->sli4_hba.msix_vec_nr < 9529 phba->cfg_fcp_io_channel) 9530 adjusted_fcp_io_channel = phba->sli4_hba.msix_vec_nr; 9531 else 9532 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; 9533 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; 9534 /* Set up SLI-4 HBA */ 9535 if (lpfc_sli4_hba_setup(phba)) { 9536 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9537 "1421 Failed to set up hba\n"); 9538 error = -ENODEV; 9539 goto out_disable_intr; 9540 } 9541 9542 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 9543 if (intr_mode != 0) 9544 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 9545 LPFC_ACT_INTR_CNT); 9546 9547 /* Check active interrupts received only for MSI/MSI-X */ 9548 if (intr_mode == 0 || 9549 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 9550 /* Log the current active interrupt mode */ 9551 phba->intr_mode = intr_mode; 9552 lpfc_log_intr_mode(phba, intr_mode); 9553 break; 9554 } 9555 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9556 "0451 Configure interrupt mode (%d) " 9557 "failed active interrupt test.\n", 9558 intr_mode); 9559 /* Unset the previous SLI-4 HBA setup. */ 9560 /* 9561 * TODO: Is this operation compatible with IF TYPE 2 9562 * devices? All port state is deleted and cleared. 9563 */ 9564 lpfc_sli4_unset_hba(phba); 9565 /* Try next level of interrupt mode */ 9566 cfg_mode = --intr_mode; 9567 } 9568 9569 /* Perform post initialization setup */ 9570 lpfc_post_init_setup(phba); 9571 9572 /* check for firmware upgrade or downgrade (if_type 2 only) */ 9573 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 9574 LPFC_SLI_INTF_IF_TYPE_2) { 9575 snprintf(file_name, 16, "%s.grp", phba->ModelName); 9576 error = request_firmware(&fw, file_name, &phba->pcidev->dev); 9577 if (!error) { 9578 lpfc_write_firmware(phba, fw); 9579 release_firmware(fw); 9580 } 9581 } 9582 9583 /* Check if there are static vports to be created. */ 9584 lpfc_create_static_vport(phba); 9585 return 0; 9586 9587 out_disable_intr: 9588 lpfc_sli4_disable_intr(phba); 9589 out_free_sysfs_attr: 9590 lpfc_free_sysfs_attr(vport); 9591 out_destroy_shost: 9592 lpfc_destroy_shost(phba); 9593 out_unset_driver_resource: 9594 lpfc_unset_driver_resource_phase2(phba); 9595 out_free_iocb_list: 9596 lpfc_free_iocb_list(phba); 9597 out_unset_driver_resource_s4: 9598 lpfc_sli4_driver_resource_unset(phba); 9599 out_unset_pci_mem_s4: 9600 lpfc_sli4_pci_mem_unset(phba); 9601 out_disable_pci_dev: 9602 lpfc_disable_pci_dev(phba); 9603 if (shost) 9604 scsi_host_put(shost); 9605 out_free_phba: 9606 lpfc_hba_free(phba); 9607 return error; 9608 } 9609 9610 /** 9611 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 9612 * @pdev: pointer to PCI device 9613 * 9614 * This routine is called from the kernel's PCI subsystem to device with 9615 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 9616 * removed from PCI bus, it performs all the necessary cleanup for the HBA 9617 * device to be removed from the PCI subsystem properly. 9618 **/ 9619 static void __devexit 9620 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 9621 { 9622 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9623 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 9624 struct lpfc_vport **vports; 9625 struct lpfc_hba *phba = vport->phba; 9626 int i; 9627 9628 /* Mark the device unloading flag */ 9629 spin_lock_irq(&phba->hbalock); 9630 vport->load_flag |= FC_UNLOADING; 9631 spin_unlock_irq(&phba->hbalock); 9632 9633 /* Free the HBA sysfs attributes */ 9634 lpfc_free_sysfs_attr(vport); 9635 9636 /* Release all the vports against this physical port */ 9637 vports = lpfc_create_vport_work_array(phba); 9638 if (vports != NULL) 9639 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 9640 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 9641 continue; 9642 fc_vport_terminate(vports[i]->fc_vport); 9643 } 9644 lpfc_destroy_vport_work_array(phba, vports); 9645 9646 /* Remove FC host and then SCSI host with the physical port */ 9647 fc_remove_host(shost); 9648 scsi_remove_host(shost); 9649 9650 /* Perform cleanup on the physical port */ 9651 lpfc_cleanup(vport); 9652 9653 /* 9654 * Bring down the SLI Layer. This step disables all interrupts, 9655 * clears the rings, discards all mailbox commands, and resets 9656 * the HBA FCoE function. 9657 */ 9658 lpfc_debugfs_terminate(vport); 9659 lpfc_sli4_hba_unset(phba); 9660 9661 spin_lock_irq(&phba->hbalock); 9662 list_del_init(&vport->listentry); 9663 spin_unlock_irq(&phba->hbalock); 9664 9665 /* Perform scsi free before driver resource_unset since scsi 9666 * buffers are released to their corresponding pools here. 9667 */ 9668 lpfc_scsi_free(phba); 9669 9670 lpfc_sli4_driver_resource_unset(phba); 9671 9672 /* Unmap adapter Control and Doorbell registers */ 9673 lpfc_sli4_pci_mem_unset(phba); 9674 9675 /* Release PCI resources and disable device's PCI function */ 9676 scsi_host_put(shost); 9677 lpfc_disable_pci_dev(phba); 9678 9679 /* Finally, free the driver's device data structure */ 9680 lpfc_hba_free(phba); 9681 9682 return; 9683 } 9684 9685 /** 9686 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 9687 * @pdev: pointer to PCI device 9688 * @msg: power management message 9689 * 9690 * This routine is called from the kernel's PCI subsystem to support system 9691 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 9692 * this method, it quiesces the device by stopping the driver's worker 9693 * thread for the device, turning off device's interrupt and DMA, and bring 9694 * the device offline. Note that as the driver implements the minimum PM 9695 * requirements to a power-aware driver's PM support for suspend/resume -- all 9696 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 9697 * method call will be treated as SUSPEND and the driver will fully 9698 * reinitialize its device during resume() method call, the driver will set 9699 * device to PCI_D3hot state in PCI config space instead of setting it 9700 * according to the @msg provided by the PM. 9701 * 9702 * Return code 9703 * 0 - driver suspended the device 9704 * Error otherwise 9705 **/ 9706 static int 9707 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 9708 { 9709 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9710 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9711 9712 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9713 "2843 PCI device Power Management suspend.\n"); 9714 9715 /* Bring down the device */ 9716 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 9717 lpfc_offline(phba); 9718 kthread_stop(phba->worker_thread); 9719 9720 /* Disable interrupt from device */ 9721 lpfc_sli4_disable_intr(phba); 9722 lpfc_sli4_queue_destroy(phba); 9723 9724 /* Save device state to PCI config space */ 9725 pci_save_state(pdev); 9726 pci_set_power_state(pdev, PCI_D3hot); 9727 9728 return 0; 9729 } 9730 9731 /** 9732 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 9733 * @pdev: pointer to PCI device 9734 * 9735 * This routine is called from the kernel's PCI subsystem to support system 9736 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 9737 * this method, it restores the device's PCI config space state and fully 9738 * reinitializes the device and brings it online. Note that as the driver 9739 * implements the minimum PM requirements to a power-aware driver's PM for 9740 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 9741 * to the suspend() method call will be treated as SUSPEND and the driver 9742 * will fully reinitialize its device during resume() method call, the device 9743 * will be set to PCI_D0 directly in PCI config space before restoring the 9744 * state. 9745 * 9746 * Return code 9747 * 0 - driver suspended the device 9748 * Error otherwise 9749 **/ 9750 static int 9751 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 9752 { 9753 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9754 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9755 uint32_t intr_mode; 9756 int error; 9757 9758 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9759 "0292 PCI device Power Management resume.\n"); 9760 9761 /* Restore device state from PCI config space */ 9762 pci_set_power_state(pdev, PCI_D0); 9763 pci_restore_state(pdev); 9764 9765 /* 9766 * As the new kernel behavior of pci_restore_state() API call clears 9767 * device saved_state flag, need to save the restored state again. 9768 */ 9769 pci_save_state(pdev); 9770 9771 if (pdev->is_busmaster) 9772 pci_set_master(pdev); 9773 9774 /* Startup the kernel thread for this host adapter. */ 9775 phba->worker_thread = kthread_run(lpfc_do_work, phba, 9776 "lpfc_worker_%d", phba->brd_no); 9777 if (IS_ERR(phba->worker_thread)) { 9778 error = PTR_ERR(phba->worker_thread); 9779 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9780 "0293 PM resume failed to start worker " 9781 "thread: error=x%x.\n", error); 9782 return error; 9783 } 9784 9785 /* Configure and enable interrupt */ 9786 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 9787 if (intr_mode == LPFC_INTR_ERROR) { 9788 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9789 "0294 PM resume Failed to enable interrupt\n"); 9790 return -EIO; 9791 } else 9792 phba->intr_mode = intr_mode; 9793 9794 /* Restart HBA and bring it online */ 9795 lpfc_sli_brdrestart(phba); 9796 lpfc_online(phba); 9797 9798 /* Log the current active interrupt mode */ 9799 lpfc_log_intr_mode(phba, phba->intr_mode); 9800 9801 return 0; 9802 } 9803 9804 /** 9805 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 9806 * @phba: pointer to lpfc hba data structure. 9807 * 9808 * This routine is called to prepare the SLI4 device for PCI slot recover. It 9809 * aborts all the outstanding SCSI I/Os to the pci device. 9810 **/ 9811 static void 9812 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 9813 { 9814 struct lpfc_sli *psli = &phba->sli; 9815 struct lpfc_sli_ring *pring; 9816 9817 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9818 "2828 PCI channel I/O abort preparing for recovery\n"); 9819 /* 9820 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 9821 * and let the SCSI mid-layer to retry them to recover. 9822 */ 9823 pring = &psli->ring[psli->fcp_ring]; 9824 lpfc_sli_abort_iocb_ring(phba, pring); 9825 } 9826 9827 /** 9828 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 9829 * @phba: pointer to lpfc hba data structure. 9830 * 9831 * This routine is called to prepare the SLI4 device for PCI slot reset. It 9832 * disables the device interrupt and pci device, and aborts the internal FCP 9833 * pending I/Os. 9834 **/ 9835 static void 9836 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 9837 { 9838 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9839 "2826 PCI channel disable preparing for reset\n"); 9840 9841 /* Block any management I/Os to the device */ 9842 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 9843 9844 /* Block all SCSI devices' I/Os on the host */ 9845 lpfc_scsi_dev_block(phba); 9846 9847 /* stop all timers */ 9848 lpfc_stop_hba_timers(phba); 9849 9850 /* Disable interrupt and pci device */ 9851 lpfc_sli4_disable_intr(phba); 9852 lpfc_sli4_queue_destroy(phba); 9853 pci_disable_device(phba->pcidev); 9854 9855 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 9856 lpfc_sli_flush_fcp_rings(phba); 9857 } 9858 9859 /** 9860 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 9861 * @phba: pointer to lpfc hba data structure. 9862 * 9863 * This routine is called to prepare the SLI4 device for PCI slot permanently 9864 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 9865 * pending I/Os. 9866 **/ 9867 static void 9868 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 9869 { 9870 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9871 "2827 PCI channel permanent disable for failure\n"); 9872 9873 /* Block all SCSI devices' I/Os on the host */ 9874 lpfc_scsi_dev_block(phba); 9875 9876 /* stop all timers */ 9877 lpfc_stop_hba_timers(phba); 9878 9879 /* Clean up all driver's outstanding SCSI I/Os */ 9880 lpfc_sli_flush_fcp_rings(phba); 9881 } 9882 9883 /** 9884 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 9885 * @pdev: pointer to PCI device. 9886 * @state: the current PCI connection state. 9887 * 9888 * This routine is called from the PCI subsystem for error handling to device 9889 * with SLI-4 interface spec. This function is called by the PCI subsystem 9890 * after a PCI bus error affecting this device has been detected. When this 9891 * function is invoked, it will need to stop all the I/Os and interrupt(s) 9892 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 9893 * for the PCI subsystem to perform proper recovery as desired. 9894 * 9895 * Return codes 9896 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9897 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9898 **/ 9899 static pci_ers_result_t 9900 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 9901 { 9902 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9903 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9904 9905 switch (state) { 9906 case pci_channel_io_normal: 9907 /* Non-fatal error, prepare for recovery */ 9908 lpfc_sli4_prep_dev_for_recover(phba); 9909 return PCI_ERS_RESULT_CAN_RECOVER; 9910 case pci_channel_io_frozen: 9911 /* Fatal error, prepare for slot reset */ 9912 lpfc_sli4_prep_dev_for_reset(phba); 9913 return PCI_ERS_RESULT_NEED_RESET; 9914 case pci_channel_io_perm_failure: 9915 /* Permanent failure, prepare for device down */ 9916 lpfc_sli4_prep_dev_for_perm_failure(phba); 9917 return PCI_ERS_RESULT_DISCONNECT; 9918 default: 9919 /* Unknown state, prepare and request slot reset */ 9920 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9921 "2825 Unknown PCI error state: x%x\n", state); 9922 lpfc_sli4_prep_dev_for_reset(phba); 9923 return PCI_ERS_RESULT_NEED_RESET; 9924 } 9925 } 9926 9927 /** 9928 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 9929 * @pdev: pointer to PCI device. 9930 * 9931 * This routine is called from the PCI subsystem for error handling to device 9932 * with SLI-4 interface spec. It is called after PCI bus has been reset to 9933 * restart the PCI card from scratch, as if from a cold-boot. During the 9934 * PCI subsystem error recovery, after the driver returns 9935 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 9936 * recovery and then call this routine before calling the .resume method to 9937 * recover the device. This function will initialize the HBA device, enable 9938 * the interrupt, but it will just put the HBA to offline state without 9939 * passing any I/O traffic. 9940 * 9941 * Return codes 9942 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9943 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9944 */ 9945 static pci_ers_result_t 9946 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 9947 { 9948 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9949 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9950 struct lpfc_sli *psli = &phba->sli; 9951 uint32_t intr_mode; 9952 9953 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 9954 if (pci_enable_device_mem(pdev)) { 9955 printk(KERN_ERR "lpfc: Cannot re-enable " 9956 "PCI device after reset.\n"); 9957 return PCI_ERS_RESULT_DISCONNECT; 9958 } 9959 9960 pci_restore_state(pdev); 9961 9962 /* 9963 * As the new kernel behavior of pci_restore_state() API call clears 9964 * device saved_state flag, need to save the restored state again. 9965 */ 9966 pci_save_state(pdev); 9967 9968 if (pdev->is_busmaster) 9969 pci_set_master(pdev); 9970 9971 spin_lock_irq(&phba->hbalock); 9972 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 9973 spin_unlock_irq(&phba->hbalock); 9974 9975 /* Configure and enable interrupt */ 9976 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 9977 if (intr_mode == LPFC_INTR_ERROR) { 9978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9979 "2824 Cannot re-enable interrupt after " 9980 "slot reset.\n"); 9981 return PCI_ERS_RESULT_DISCONNECT; 9982 } else 9983 phba->intr_mode = intr_mode; 9984 9985 /* Log the current active interrupt mode */ 9986 lpfc_log_intr_mode(phba, phba->intr_mode); 9987 9988 return PCI_ERS_RESULT_RECOVERED; 9989 } 9990 9991 /** 9992 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 9993 * @pdev: pointer to PCI device 9994 * 9995 * This routine is called from the PCI subsystem for error handling to device 9996 * with SLI-4 interface spec. It is called when kernel error recovery tells 9997 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 9998 * error recovery. After this call, traffic can start to flow from this device 9999 * again. 10000 **/ 10001 static void 10002 lpfc_io_resume_s4(struct pci_dev *pdev) 10003 { 10004 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10005 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10006 10007 /* 10008 * In case of slot reset, as function reset is performed through 10009 * mailbox command which needs DMA to be enabled, this operation 10010 * has to be moved to the io resume phase. Taking device offline 10011 * will perform the necessary cleanup. 10012 */ 10013 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 10014 /* Perform device reset */ 10015 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10016 lpfc_offline(phba); 10017 lpfc_sli_brdrestart(phba); 10018 /* Bring the device back online */ 10019 lpfc_online(phba); 10020 } 10021 10022 /* Clean up Advanced Error Reporting (AER) if needed */ 10023 if (phba->hba_flag & HBA_AER_ENABLED) 10024 pci_cleanup_aer_uncorrect_error_status(pdev); 10025 } 10026 10027 /** 10028 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 10029 * @pdev: pointer to PCI device 10030 * @pid: pointer to PCI device identifier 10031 * 10032 * This routine is to be registered to the kernel's PCI subsystem. When an 10033 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 10034 * at PCI device-specific information of the device and driver to see if the 10035 * driver state that it can support this kind of device. If the match is 10036 * successful, the driver core invokes this routine. This routine dispatches 10037 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 10038 * do all the initialization that it needs to do to handle the HBA device 10039 * properly. 10040 * 10041 * Return code 10042 * 0 - driver can claim the device 10043 * negative value - driver can not claim the device 10044 **/ 10045 static int __devinit 10046 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 10047 { 10048 int rc; 10049 struct lpfc_sli_intf intf; 10050 10051 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 10052 return -ENODEV; 10053 10054 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 10055 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 10056 rc = lpfc_pci_probe_one_s4(pdev, pid); 10057 else 10058 rc = lpfc_pci_probe_one_s3(pdev, pid); 10059 10060 return rc; 10061 } 10062 10063 /** 10064 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 10065 * @pdev: pointer to PCI device 10066 * 10067 * This routine is to be registered to the kernel's PCI subsystem. When an 10068 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 10069 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 10070 * remove routine, which will perform all the necessary cleanup for the 10071 * device to be removed from the PCI subsystem properly. 10072 **/ 10073 static void __devexit 10074 lpfc_pci_remove_one(struct pci_dev *pdev) 10075 { 10076 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10077 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10078 10079 switch (phba->pci_dev_grp) { 10080 case LPFC_PCI_DEV_LP: 10081 lpfc_pci_remove_one_s3(pdev); 10082 break; 10083 case LPFC_PCI_DEV_OC: 10084 lpfc_pci_remove_one_s4(pdev); 10085 break; 10086 default: 10087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10088 "1424 Invalid PCI device group: 0x%x\n", 10089 phba->pci_dev_grp); 10090 break; 10091 } 10092 return; 10093 } 10094 10095 /** 10096 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 10097 * @pdev: pointer to PCI device 10098 * @msg: power management message 10099 * 10100 * This routine is to be registered to the kernel's PCI subsystem to support 10101 * system Power Management (PM). When PM invokes this method, it dispatches 10102 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 10103 * suspend the device. 10104 * 10105 * Return code 10106 * 0 - driver suspended the device 10107 * Error otherwise 10108 **/ 10109 static int 10110 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 10111 { 10112 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10113 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10114 int rc = -ENODEV; 10115 10116 switch (phba->pci_dev_grp) { 10117 case LPFC_PCI_DEV_LP: 10118 rc = lpfc_pci_suspend_one_s3(pdev, msg); 10119 break; 10120 case LPFC_PCI_DEV_OC: 10121 rc = lpfc_pci_suspend_one_s4(pdev, msg); 10122 break; 10123 default: 10124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10125 "1425 Invalid PCI device group: 0x%x\n", 10126 phba->pci_dev_grp); 10127 break; 10128 } 10129 return rc; 10130 } 10131 10132 /** 10133 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 10134 * @pdev: pointer to PCI device 10135 * 10136 * This routine is to be registered to the kernel's PCI subsystem to support 10137 * system Power Management (PM). When PM invokes this method, it dispatches 10138 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 10139 * resume the device. 10140 * 10141 * Return code 10142 * 0 - driver suspended the device 10143 * Error otherwise 10144 **/ 10145 static int 10146 lpfc_pci_resume_one(struct pci_dev *pdev) 10147 { 10148 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10149 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10150 int rc = -ENODEV; 10151 10152 switch (phba->pci_dev_grp) { 10153 case LPFC_PCI_DEV_LP: 10154 rc = lpfc_pci_resume_one_s3(pdev); 10155 break; 10156 case LPFC_PCI_DEV_OC: 10157 rc = lpfc_pci_resume_one_s4(pdev); 10158 break; 10159 default: 10160 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10161 "1426 Invalid PCI device group: 0x%x\n", 10162 phba->pci_dev_grp); 10163 break; 10164 } 10165 return rc; 10166 } 10167 10168 /** 10169 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 10170 * @pdev: pointer to PCI device. 10171 * @state: the current PCI connection state. 10172 * 10173 * This routine is registered to the PCI subsystem for error handling. This 10174 * function is called by the PCI subsystem after a PCI bus error affecting 10175 * this device has been detected. When this routine is invoked, it dispatches 10176 * the action to the proper SLI-3 or SLI-4 device error detected handling 10177 * routine, which will perform the proper error detected operation. 10178 * 10179 * Return codes 10180 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 10181 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10182 **/ 10183 static pci_ers_result_t 10184 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 10185 { 10186 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10187 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10188 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 10189 10190 switch (phba->pci_dev_grp) { 10191 case LPFC_PCI_DEV_LP: 10192 rc = lpfc_io_error_detected_s3(pdev, state); 10193 break; 10194 case LPFC_PCI_DEV_OC: 10195 rc = lpfc_io_error_detected_s4(pdev, state); 10196 break; 10197 default: 10198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10199 "1427 Invalid PCI device group: 0x%x\n", 10200 phba->pci_dev_grp); 10201 break; 10202 } 10203 return rc; 10204 } 10205 10206 /** 10207 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 10208 * @pdev: pointer to PCI device. 10209 * 10210 * This routine is registered to the PCI subsystem for error handling. This 10211 * function is called after PCI bus has been reset to restart the PCI card 10212 * from scratch, as if from a cold-boot. When this routine is invoked, it 10213 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 10214 * routine, which will perform the proper device reset. 10215 * 10216 * Return codes 10217 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 10218 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10219 **/ 10220 static pci_ers_result_t 10221 lpfc_io_slot_reset(struct pci_dev *pdev) 10222 { 10223 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10224 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10225 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 10226 10227 switch (phba->pci_dev_grp) { 10228 case LPFC_PCI_DEV_LP: 10229 rc = lpfc_io_slot_reset_s3(pdev); 10230 break; 10231 case LPFC_PCI_DEV_OC: 10232 rc = lpfc_io_slot_reset_s4(pdev); 10233 break; 10234 default: 10235 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10236 "1428 Invalid PCI device group: 0x%x\n", 10237 phba->pci_dev_grp); 10238 break; 10239 } 10240 return rc; 10241 } 10242 10243 /** 10244 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 10245 * @pdev: pointer to PCI device 10246 * 10247 * This routine is registered to the PCI subsystem for error handling. It 10248 * is called when kernel error recovery tells the lpfc driver that it is 10249 * OK to resume normal PCI operation after PCI bus error recovery. When 10250 * this routine is invoked, it dispatches the action to the proper SLI-3 10251 * or SLI-4 device io_resume routine, which will resume the device operation. 10252 **/ 10253 static void 10254 lpfc_io_resume(struct pci_dev *pdev) 10255 { 10256 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10257 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10258 10259 switch (phba->pci_dev_grp) { 10260 case LPFC_PCI_DEV_LP: 10261 lpfc_io_resume_s3(pdev); 10262 break; 10263 case LPFC_PCI_DEV_OC: 10264 lpfc_io_resume_s4(pdev); 10265 break; 10266 default: 10267 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10268 "1429 Invalid PCI device group: 0x%x\n", 10269 phba->pci_dev_grp); 10270 break; 10271 } 10272 return; 10273 } 10274 10275 /** 10276 * lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace 10277 * @inode: pointer to the inode representing the lpfcmgmt device 10278 * @filep: pointer to the file representing the open lpfcmgmt device 10279 * 10280 * This routine puts a reference count on the lpfc module whenever the 10281 * character device is opened 10282 **/ 10283 static int 10284 lpfc_mgmt_open(struct inode *inode, struct file *filep) 10285 { 10286 try_module_get(THIS_MODULE); 10287 return 0; 10288 } 10289 10290 /** 10291 * lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace 10292 * @inode: pointer to the inode representing the lpfcmgmt device 10293 * @filep: pointer to the file representing the open lpfcmgmt device 10294 * 10295 * This routine removes a reference count from the lpfc module when the 10296 * character device is closed 10297 **/ 10298 static int 10299 lpfc_mgmt_release(struct inode *inode, struct file *filep) 10300 { 10301 module_put(THIS_MODULE); 10302 return 0; 10303 } 10304 10305 static struct pci_device_id lpfc_id_table[] = { 10306 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 10307 PCI_ANY_ID, PCI_ANY_ID, }, 10308 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 10309 PCI_ANY_ID, PCI_ANY_ID, }, 10310 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 10311 PCI_ANY_ID, PCI_ANY_ID, }, 10312 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 10313 PCI_ANY_ID, PCI_ANY_ID, }, 10314 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 10315 PCI_ANY_ID, PCI_ANY_ID, }, 10316 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 10317 PCI_ANY_ID, PCI_ANY_ID, }, 10318 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 10319 PCI_ANY_ID, PCI_ANY_ID, }, 10320 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 10321 PCI_ANY_ID, PCI_ANY_ID, }, 10322 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 10323 PCI_ANY_ID, PCI_ANY_ID, }, 10324 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 10325 PCI_ANY_ID, PCI_ANY_ID, }, 10326 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 10327 PCI_ANY_ID, PCI_ANY_ID, }, 10328 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 10329 PCI_ANY_ID, PCI_ANY_ID, }, 10330 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 10331 PCI_ANY_ID, PCI_ANY_ID, }, 10332 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 10333 PCI_ANY_ID, PCI_ANY_ID, }, 10334 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 10335 PCI_ANY_ID, PCI_ANY_ID, }, 10336 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 10337 PCI_ANY_ID, PCI_ANY_ID, }, 10338 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 10339 PCI_ANY_ID, PCI_ANY_ID, }, 10340 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 10341 PCI_ANY_ID, PCI_ANY_ID, }, 10342 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 10343 PCI_ANY_ID, PCI_ANY_ID, }, 10344 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 10345 PCI_ANY_ID, PCI_ANY_ID, }, 10346 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 10347 PCI_ANY_ID, PCI_ANY_ID, }, 10348 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 10349 PCI_ANY_ID, PCI_ANY_ID, }, 10350 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 10351 PCI_ANY_ID, PCI_ANY_ID, }, 10352 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 10353 PCI_ANY_ID, PCI_ANY_ID, }, 10354 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 10355 PCI_ANY_ID, PCI_ANY_ID, }, 10356 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 10357 PCI_ANY_ID, PCI_ANY_ID, }, 10358 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 10359 PCI_ANY_ID, PCI_ANY_ID, }, 10360 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 10361 PCI_ANY_ID, PCI_ANY_ID, }, 10362 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 10363 PCI_ANY_ID, PCI_ANY_ID, }, 10364 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 10365 PCI_ANY_ID, PCI_ANY_ID, }, 10366 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 10367 PCI_ANY_ID, PCI_ANY_ID, }, 10368 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 10369 PCI_ANY_ID, PCI_ANY_ID, }, 10370 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 10371 PCI_ANY_ID, PCI_ANY_ID, }, 10372 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 10373 PCI_ANY_ID, PCI_ANY_ID, }, 10374 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 10375 PCI_ANY_ID, PCI_ANY_ID, }, 10376 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 10377 PCI_ANY_ID, PCI_ANY_ID, }, 10378 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 10379 PCI_ANY_ID, PCI_ANY_ID, }, 10380 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 10381 PCI_ANY_ID, PCI_ANY_ID, }, 10382 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 10383 PCI_ANY_ID, PCI_ANY_ID, }, 10384 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 10385 PCI_ANY_ID, PCI_ANY_ID, }, 10386 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 10387 PCI_ANY_ID, PCI_ANY_ID, }, 10388 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, 10389 PCI_ANY_ID, PCI_ANY_ID, }, 10390 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 10391 PCI_ANY_ID, PCI_ANY_ID, }, 10392 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, 10393 PCI_ANY_ID, PCI_ANY_ID, }, 10394 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, 10395 PCI_ANY_ID, PCI_ANY_ID, }, 10396 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, 10397 PCI_ANY_ID, PCI_ANY_ID, }, 10398 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF, 10399 PCI_ANY_ID, PCI_ANY_ID, }, 10400 { 0 } 10401 }; 10402 10403 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 10404 10405 static const struct pci_error_handlers lpfc_err_handler = { 10406 .error_detected = lpfc_io_error_detected, 10407 .slot_reset = lpfc_io_slot_reset, 10408 .resume = lpfc_io_resume, 10409 }; 10410 10411 static struct pci_driver lpfc_driver = { 10412 .name = LPFC_DRIVER_NAME, 10413 .id_table = lpfc_id_table, 10414 .probe = lpfc_pci_probe_one, 10415 .remove = __devexit_p(lpfc_pci_remove_one), 10416 .suspend = lpfc_pci_suspend_one, 10417 .resume = lpfc_pci_resume_one, 10418 .err_handler = &lpfc_err_handler, 10419 }; 10420 10421 static const struct file_operations lpfc_mgmt_fop = { 10422 .open = lpfc_mgmt_open, 10423 .release = lpfc_mgmt_release, 10424 }; 10425 10426 static struct miscdevice lpfc_mgmt_dev = { 10427 .minor = MISC_DYNAMIC_MINOR, 10428 .name = "lpfcmgmt", 10429 .fops = &lpfc_mgmt_fop, 10430 }; 10431 10432 /** 10433 * lpfc_init - lpfc module initialization routine 10434 * 10435 * This routine is to be invoked when the lpfc module is loaded into the 10436 * kernel. The special kernel macro module_init() is used to indicate the 10437 * role of this routine to the kernel as lpfc module entry point. 10438 * 10439 * Return codes 10440 * 0 - successful 10441 * -ENOMEM - FC attach transport failed 10442 * all others - failed 10443 */ 10444 static int __init 10445 lpfc_init(void) 10446 { 10447 int error = 0; 10448 10449 printk(LPFC_MODULE_DESC "\n"); 10450 printk(LPFC_COPYRIGHT "\n"); 10451 10452 error = misc_register(&lpfc_mgmt_dev); 10453 if (error) 10454 printk(KERN_ERR "Could not register lpfcmgmt device, " 10455 "misc_register returned with status %d", error); 10456 10457 if (lpfc_enable_npiv) { 10458 lpfc_transport_functions.vport_create = lpfc_vport_create; 10459 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 10460 } 10461 lpfc_transport_template = 10462 fc_attach_transport(&lpfc_transport_functions); 10463 if (lpfc_transport_template == NULL) 10464 return -ENOMEM; 10465 if (lpfc_enable_npiv) { 10466 lpfc_vport_transport_template = 10467 fc_attach_transport(&lpfc_vport_transport_functions); 10468 if (lpfc_vport_transport_template == NULL) { 10469 fc_release_transport(lpfc_transport_template); 10470 return -ENOMEM; 10471 } 10472 } 10473 error = pci_register_driver(&lpfc_driver); 10474 if (error) { 10475 fc_release_transport(lpfc_transport_template); 10476 if (lpfc_enable_npiv) 10477 fc_release_transport(lpfc_vport_transport_template); 10478 } 10479 10480 return error; 10481 } 10482 10483 /** 10484 * lpfc_exit - lpfc module removal routine 10485 * 10486 * This routine is invoked when the lpfc module is removed from the kernel. 10487 * The special kernel macro module_exit() is used to indicate the role of 10488 * this routine to the kernel as lpfc module exit point. 10489 */ 10490 static void __exit 10491 lpfc_exit(void) 10492 { 10493 misc_deregister(&lpfc_mgmt_dev); 10494 pci_unregister_driver(&lpfc_driver); 10495 fc_release_transport(lpfc_transport_template); 10496 if (lpfc_enable_npiv) 10497 fc_release_transport(lpfc_vport_transport_template); 10498 if (_dump_buf_data) { 10499 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 10500 "_dump_buf_data at 0x%p\n", 10501 (1L << _dump_buf_data_order), _dump_buf_data); 10502 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 10503 } 10504 10505 if (_dump_buf_dif) { 10506 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 10507 "_dump_buf_dif at 0x%p\n", 10508 (1L << _dump_buf_dif_order), _dump_buf_dif); 10509 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 10510 } 10511 } 10512 10513 module_init(lpfc_init); 10514 module_exit(lpfc_exit); 10515 MODULE_LICENSE("GPL"); 10516 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 10517 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 10518 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 10519