1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Emulex. All rights reserved. 24 * Use is subject to License terms. 25 */ 26 27 #define DEF_ICFG 1 28 29 #include <emlxs.h> 30 #include <emlxs_version.h> 31 32 char emlxs_revision[] = EMLXS_REVISION; 33 char emlxs_version[] = EMLXS_VERSION; 34 char emlxs_name[] = EMLXS_NAME; 35 char emlxs_label[] = EMLXS_LABEL; 36 37 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */ 38 EMLXS_MSG_DEF(EMLXS_SOLARIS_C); 39 40 #ifdef MENLO_SUPPORT 41 static int32_t emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp); 42 #endif /* MENLO_SUPPORT */ 43 44 static void emlxs_fca_attach(emlxs_hba_t *hba); 45 static void emlxs_fca_detach(emlxs_hba_t *hba); 46 static void emlxs_drv_banner(emlxs_hba_t *hba); 47 48 static int32_t emlxs_get_props(emlxs_hba_t *hba); 49 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp); 50 static int32_t emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp); 51 static int32_t emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp); 52 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp); 53 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp); 54 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp); 55 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp); 56 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp); 57 static uint32_t emlxs_add_instance(int32_t ddiinst); 58 static void emlxs_iodone(emlxs_buf_t *sbp); 59 static int emlxs_pm_lower_power(dev_info_t *dip); 60 static int emlxs_pm_raise_power(dev_info_t *dip); 61 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, 62 uint32_t failed); 63 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3); 64 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba); 65 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code, 66 uint32_t args, uint32_t *arg); 67 68 #ifdef SLI3_SUPPORT 69 static void emlxs_read_vport_prop(emlxs_hba_t *hba); 70 #endif /* SLI3_SUPPORT */ 71 72 73 /* 74 * Driver Entry Routines. 75 */ 76 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t); 77 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t); 78 static int32_t emlxs_open(dev_t *, int32_t, int32_t, cred_t *); 79 static int32_t emlxs_close(dev_t, int32_t, int32_t, cred_t *); 80 static int32_t emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t, 81 cred_t *, int32_t *); 82 static int32_t emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 83 84 85 /* 86 * FC_AL Transport Functions. 87 */ 88 static opaque_t emlxs_bind_port(dev_info_t *, fc_fca_port_info_t *, 89 fc_fca_bind_info_t *); 90 static void emlxs_unbind_port(opaque_t); 91 static void emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *); 92 static int32_t emlxs_get_cap(opaque_t, char *, void *); 93 static int32_t emlxs_set_cap(opaque_t, char *, void *); 94 static int32_t emlxs_get_map(opaque_t, fc_lilpmap_t *); 95 static int32_t emlxs_ub_alloc(opaque_t, uint64_t *, uint32_t, 96 uint32_t *, uint32_t); 97 static int32_t emlxs_ub_free(opaque_t, uint32_t, uint64_t *); 98 99 static opaque_t emlxs_get_device(opaque_t, fc_portid_t); 100 static int32_t emlxs_notify(opaque_t, uint32_t); 101 static void emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *); 102 103 /* 104 * Driver Internal Functions. 105 */ 106 107 static void emlxs_poll(emlxs_port_t *, emlxs_buf_t *); 108 static int32_t emlxs_power(dev_info_t *, int32_t, int32_t); 109 #ifdef EMLXS_I386 110 #ifdef S11 111 static int32_t emlxs_quiesce(dev_info_t *); 112 #endif 113 #endif 114 static int32_t emlxs_hba_resume(dev_info_t *); 115 static int32_t emlxs_hba_suspend(dev_info_t *); 116 static int32_t emlxs_hba_detach(dev_info_t *); 117 static int32_t emlxs_hba_attach(dev_info_t *); 118 static void emlxs_lock_destroy(emlxs_hba_t *); 119 static void emlxs_lock_init(emlxs_hba_t *); 120 static ULP_BDE64 *emlxs_pkt_to_bpl(ULP_BDE64 *, fc_packet_t *, 121 uint32_t, uint8_t); 122 123 char *emlxs_pm_components[] = { 124 "NAME=emlxx000", 125 "0=Device D3 State", 126 "1=Device D0 State" 127 }; 128 129 130 /* 131 * Default emlx dma limits 132 */ 133 ddi_dma_lim_t emlxs_dma_lim = { 134 (uint32_t)0, /* dlim_addr_lo */ 135 (uint32_t)0xffffffff, /* dlim_addr_hi */ 136 (uint_t)0x00ffffff, /* dlim_cntr_max */ 137 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dlim_burstsizes */ 138 1, /* dlim_minxfer */ 139 0x00ffffff /* dlim_dmaspeed */ 140 }; 141 142 /* 143 * Be careful when using these attributes; the defaults listed below are 144 * (almost) the most general case, permitting allocation in almost any 145 * way supported by the LightPulse family. The sole exception is the 146 * alignment specified as requiring memory allocation on a 4-byte boundary; 147 * the Lightpulse can DMA memory on any byte boundary. 148 * 149 * The LightPulse family currently is limited to 16M transfers; 150 * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields. 151 */ 152 ddi_dma_attr_t emlxs_dma_attr = { 153 DMA_ATTR_V0, /* dma_attr_version */ 154 (uint64_t)0, /* dma_attr_addr_lo */ 155 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 156 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 157 1, /* dma_attr_align */ 158 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 159 1, /* dma_attr_minxfer */ 160 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 161 (uint64_t)0xffffffff, /* dma_attr_seg */ 162 EMLXS_SGLLEN, /* dma_attr_sgllen */ 163 1, /* dma_attr_granular */ 164 0 /* dma_attr_flags */ 165 }; 166 167 ddi_dma_attr_t emlxs_dma_attr_ro = { 168 DMA_ATTR_V0, /* dma_attr_version */ 169 (uint64_t)0, /* dma_attr_addr_lo */ 170 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 171 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 172 1, /* dma_attr_align */ 173 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 174 1, /* dma_attr_minxfer */ 175 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 176 (uint64_t)0xffffffff, /* dma_attr_seg */ 177 EMLXS_SGLLEN, /* dma_attr_sgllen */ 178 1, /* dma_attr_granular */ 179 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */ 180 }; 181 182 ddi_dma_attr_t emlxs_dma_attr_1sg = { 183 DMA_ATTR_V0, /* dma_attr_version */ 184 (uint64_t)0, /* dma_attr_addr_lo */ 185 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 186 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 187 1, /* dma_attr_align */ 188 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 189 1, /* dma_attr_minxfer */ 190 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 191 (uint64_t)0xffffffff, /* dma_attr_seg */ 192 1, /* dma_attr_sgllen */ 193 1, /* dma_attr_granular */ 194 0 /* dma_attr_flags */ 195 }; 196 197 #if (EMLXS_MODREV >= EMLXS_MODREV3) 198 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = { 199 DMA_ATTR_V0, /* dma_attr_version */ 200 (uint64_t)0, /* dma_attr_addr_lo */ 201 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 202 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 203 1, /* dma_attr_align */ 204 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 205 1, /* dma_attr_minxfer */ 206 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 207 (uint64_t)0xffffffff, /* dma_attr_seg */ 208 EMLXS_SGLLEN, /* dma_attr_sgllen */ 209 1, /* dma_attr_granular */ 210 0 /* dma_attr_flags */ 211 }; 212 #endif /* >= EMLXS_MODREV3 */ 213 214 /* 215 * DDI access attributes for device 216 */ 217 ddi_device_acc_attr_t emlxs_dev_acc_attr = { 218 (uint16_t)DDI_DEVICE_ATTR_V0, /* devacc_attr_version */ 219 (uint8_t)DDI_STRUCTURE_LE_ACC, /* PCI is Little Endian */ 220 (uint8_t)DDI_STRICTORDER_ACC /* devacc_attr_dataorder */ 221 }; 222 223 /* 224 * DDI access attributes for data 225 */ 226 ddi_device_acc_attr_t emlxs_data_acc_attr = { 227 DDI_DEVICE_ATTR_V0, /* devacc_attr_version */ 228 DDI_NEVERSWAP_ACC, /* don't swap for Data */ 229 DDI_STRICTORDER_ACC /* devacc_attr_dataorder */ 230 }; 231 232 /* 233 * Fill in the FC Transport structure, 234 * as defined in the Fibre Channel Transport Programmming Guide. 235 */ 236 #if (EMLXS_MODREV == EMLXS_MODREV5) 237 static fc_fca_tran_t emlxs_fca_tran = { 238 FCTL_FCA_MODREV_5, /* fca_version, with SUN NPIV support */ 239 MAX_VPORTS, /* fca numerb of ports */ 240 sizeof (emlxs_buf_t), /* fca pkt size */ 241 2048, /* fca cmd max */ 242 &emlxs_dma_lim, /* fca dma limits */ 243 0, /* fca iblock, to be filled in later */ 244 &emlxs_dma_attr, /* fca dma attributes */ 245 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 246 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 247 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 248 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 249 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 250 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 251 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 252 &emlxs_data_acc_attr, /* fca access atributes */ 253 0, /* fca_num_npivports */ 254 {0, 0, 0, 0, 0, 0, 0, 0}, /* Physical port WWPN */ 255 emlxs_bind_port, 256 emlxs_unbind_port, 257 emlxs_pkt_init, 258 emlxs_pkt_uninit, 259 emlxs_transport, 260 emlxs_get_cap, 261 emlxs_set_cap, 262 emlxs_get_map, 263 emlxs_transport, 264 emlxs_ub_alloc, 265 emlxs_ub_free, 266 emlxs_ub_release, 267 emlxs_pkt_abort, 268 emlxs_reset, 269 emlxs_port_manage, 270 emlxs_get_device, 271 emlxs_notify 272 }; 273 #endif /* EMLXS_MODREV5 */ 274 275 276 #if (EMLXS_MODREV == EMLXS_MODREV4) 277 static fc_fca_tran_t emlxs_fca_tran = { 278 FCTL_FCA_MODREV_4, /* fca_version */ 279 MAX_VPORTS, /* fca numerb of ports */ 280 sizeof (emlxs_buf_t), /* fca pkt size */ 281 2048, /* fca cmd max */ 282 &emlxs_dma_lim, /* fca dma limits */ 283 0, /* fca iblock, to be filled in later */ 284 &emlxs_dma_attr, /* fca dma attributes */ 285 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 286 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 287 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 288 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 289 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 290 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 291 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 292 &emlxs_data_acc_attr, /* fca access atributes */ 293 emlxs_bind_port, 294 emlxs_unbind_port, 295 emlxs_pkt_init, 296 emlxs_pkt_uninit, 297 emlxs_transport, 298 emlxs_get_cap, 299 emlxs_set_cap, 300 emlxs_get_map, 301 emlxs_transport, 302 emlxs_ub_alloc, 303 emlxs_ub_free, 304 emlxs_ub_release, 305 emlxs_pkt_abort, 306 emlxs_reset, 307 emlxs_port_manage, 308 emlxs_get_device, 309 emlxs_notify 310 }; 311 #endif /* EMLXS_MODEREV4 */ 312 313 314 #if (EMLXS_MODREV == EMLXS_MODREV3) 315 static fc_fca_tran_t emlxs_fca_tran = { 316 FCTL_FCA_MODREV_3, /* fca_version */ 317 MAX_VPORTS, /* fca numerb of ports */ 318 sizeof (emlxs_buf_t), /* fca pkt size */ 319 2048, /* fca cmd max */ 320 &emlxs_dma_lim, /* fca dma limits */ 321 0, /* fca iblock, to be filled in later */ 322 &emlxs_dma_attr, /* fca dma attributes */ 323 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 324 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 325 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 326 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 327 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 328 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 329 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 330 &emlxs_data_acc_attr, /* fca access atributes */ 331 emlxs_bind_port, 332 emlxs_unbind_port, 333 emlxs_pkt_init, 334 emlxs_pkt_uninit, 335 emlxs_transport, 336 emlxs_get_cap, 337 emlxs_set_cap, 338 emlxs_get_map, 339 emlxs_transport, 340 emlxs_ub_alloc, 341 emlxs_ub_free, 342 emlxs_ub_release, 343 emlxs_pkt_abort, 344 emlxs_reset, 345 emlxs_port_manage, 346 emlxs_get_device, 347 emlxs_notify 348 }; 349 #endif /* EMLXS_MODREV3 */ 350 351 352 #if (EMLXS_MODREV == EMLXS_MODREV2) 353 static fc_fca_tran_t emlxs_fca_tran = { 354 FCTL_FCA_MODREV_2, /* fca_version */ 355 MAX_VPORTS, /* number of ports */ 356 sizeof (emlxs_buf_t), /* pkt size */ 357 2048, /* max cmds */ 358 &emlxs_dma_lim, /* DMA limits */ 359 0, /* iblock, to be filled in later */ 360 &emlxs_dma_attr, /* dma attributes */ 361 &emlxs_data_acc_attr, /* access atributes */ 362 emlxs_bind_port, 363 emlxs_unbind_port, 364 emlxs_pkt_init, 365 emlxs_pkt_uninit, 366 emlxs_transport, 367 emlxs_get_cap, 368 emlxs_set_cap, 369 emlxs_get_map, 370 emlxs_transport, 371 emlxs_ub_alloc, 372 emlxs_ub_free, 373 emlxs_ub_release, 374 emlxs_pkt_abort, 375 emlxs_reset, 376 emlxs_port_manage, 377 emlxs_get_device, 378 emlxs_notify 379 }; 380 #endif /* EMLXS_MODREV2 */ 381 382 /* 383 * This is needed when the module gets loaded by the kernel 384 * so ddi library calls get resolved. 385 */ 386 #ifndef MODSYM_SUPPORT 387 char _depends_on[] = "misc/fctl"; 388 #endif /* MODSYM_SUPPORT */ 389 390 /* 391 * state pointer which the implementation uses as a place to 392 * hang a set of per-driver structures; 393 * 394 */ 395 void *emlxs_soft_state = NULL; 396 397 /* 398 * Driver Global variables. 399 */ 400 int32_t emlxs_scsi_reset_delay = 3000; /* milliseconds */ 401 402 emlxs_device_t emlxs_device; 403 404 uint32_t emlxs_instance[MAX_FC_BRDS]; /* uses emlxs_device.lock */ 405 uint32_t emlxs_instance_count = 0; /* uses emlxs_device.lock */ 406 407 408 /* 409 * Single private "global" lock used to gain access to 410 * the hba_list and/or any other case where we want need to be 411 * single-threaded. 412 */ 413 uint32_t emlxs_diag_state; 414 415 /* 416 * CB ops vector. Used for administration only. 417 */ 418 static struct cb_ops emlxs_cb_ops = { 419 emlxs_open, /* cb_open */ 420 emlxs_close, /* cb_close */ 421 nodev, /* cb_strategy */ 422 nodev, /* cb_print */ 423 nodev, /* cb_dump */ 424 nodev, /* cb_read */ 425 nodev, /* cb_write */ 426 emlxs_ioctl, /* cb_ioctl */ 427 nodev, /* cb_devmap */ 428 nodev, /* cb_mmap */ 429 nodev, /* cb_segmap */ 430 nochpoll, /* cb_chpoll */ 431 ddi_prop_op, /* cb_prop_op */ 432 0, /* cb_stream */ 433 #ifdef _LP64 434 D_64BIT | D_HOTPLUG | D_MP | D_NEW, /* cb_flag */ 435 #else 436 D_HOTPLUG | D_MP | D_NEW, /* cb_flag */ 437 #endif 438 CB_REV, /* rev */ 439 nodev, /* cb_aread */ 440 nodev /* cb_awrite */ 441 }; 442 443 /* Generic bus ops */ 444 static struct bus_ops emlxs_bus_ops = { 445 BUSO_REV, 446 nullbusmap, /* bus_map */ 447 NULL, /* bus_get_intrspec */ 448 NULL, /* bus_add_intrspec */ 449 NULL, /* bus_remove_intrspec */ 450 i_ddi_map_fault, /* bus_map_fault */ 451 ddi_dma_map, /* bus_dma_map */ 452 ddi_dma_allochdl, /* bus_dma_allochdl */ 453 ddi_dma_freehdl, /* bus_dma_freehdl */ 454 ddi_dma_bindhdl, /* bus_dma_bindhdl */ 455 ddi_dma_unbindhdl, /* bus_unbindhdl */ 456 ddi_dma_flush, /* bus_dma_flush */ 457 ddi_dma_win, /* bus_dma_win */ 458 ddi_dma_mctl, /* bus_dma_ctl */ 459 ddi_ctlops, /* bus_ctl */ 460 ddi_bus_prop_op, /* bus_prop_op */ 461 }; 462 463 static struct dev_ops emlxs_ops = { 464 DEVO_REV, /* rev */ 465 0, /* refcnt */ 466 emlxs_info, /* getinfo */ 467 nulldev, /* identify */ 468 nulldev, /* probe */ 469 emlxs_attach, /* attach */ 470 emlxs_detach, /* detach */ 471 nodev, /* reset */ 472 &emlxs_cb_ops, /* devo_cb_ops */ 473 &emlxs_bus_ops, /* bus ops - Gets replaced by */ 474 /* fctl_fca_busops in fc_fca_init */ 475 emlxs_power, /* power ops */ 476 #ifdef EMLXS_I386 477 #ifdef S11 478 emlxs_quiesce, /* quiesce */ 479 #endif 480 #endif 481 }; 482 483 #include <sys/modctl.h> 484 extern struct mod_ops mod_driverops; 485 486 #ifdef SAN_DIAG_SUPPORT 487 extern kmutex_t sd_bucket_mutex; 488 extern sd_bucket_info_t sd_bucket; 489 #endif /* SAN_DIAG_SUPPORT */ 490 491 /* 492 * Module linkage information for the kernel. 493 */ 494 static struct modldrv emlxs_modldrv = { 495 &mod_driverops, /* module type - driver */ 496 emlxs_name, /* module name */ 497 &emlxs_ops, /* driver ops */ 498 }; 499 500 501 /* 502 * Driver module linkage structure 503 */ 504 static struct modlinkage emlxs_modlinkage = { 505 MODREV_1, /* ml_rev - must be MODREV_1 */ 506 &emlxs_modldrv, /* ml_linkage */ 507 NULL /* end of driver linkage */ 508 }; 509 510 511 /* We only need to add entries for non-default return codes. */ 512 /* Entries do not need to be in order. */ 513 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */ 514 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE */ 515 516 emlxs_xlat_err_t emlxs_iostat_tbl[] = { 517 /* {f/w code, pkt_state, pkt_reason, */ 518 /* pkt_expln, pkt_action} */ 519 520 /* 0x00 - Do not remove */ 521 {IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE, 522 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 523 524 /* 0x01 - Do not remove */ 525 {IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE, 526 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 527 528 /* 0x02 */ 529 {IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS, 530 FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE}, 531 532 /* 533 * This is a default entry. 534 * The real codes are written dynamically in emlxs_els.c 535 */ 536 /* 0x09 */ 537 {IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE, 538 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 539 540 /* Special error code */ 541 /* 0x10 */ 542 {IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN, 543 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 544 545 /* Special error code */ 546 /* 0x11 */ 547 {IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, 548 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 549 550 /* CLASS 2 only */ 551 /* 0x04 */ 552 {IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR, 553 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 554 555 /* CLASS 2 only */ 556 /* 0x05 */ 557 {IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR, 558 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 559 560 /* CLASS 2 only */ 561 /* 0x06 */ 562 {IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY, 563 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY}, 564 565 /* CLASS 2 only */ 566 /* 0x07 */ 567 {IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY, 568 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY}, 569 }; 570 571 #define IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t)) 572 573 574 /* We only need to add entries for non-default return codes. */ 575 /* Entries do not need to be in order. */ 576 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */ 577 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE} */ 578 579 emlxs_xlat_err_t emlxs_ioerr_tbl[] = { 580 /* {f/w code, pkt_state, pkt_reason, */ 581 /* pkt_expln, pkt_action} */ 582 583 /* 0x01 */ 584 {IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN, 585 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 586 587 /* 0x02 */ 588 {IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT, 589 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 590 591 /* 0x04 */ 592 {IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 593 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 594 595 /* 0x05 */ 596 {IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED, 597 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 598 599 /* 0x06 */ 600 {IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ, 601 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 602 603 /* 0x07 */ 604 {IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED, 605 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 606 607 /* 0x08 */ 608 {IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ, 609 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 610 611 /* 0x0B */ 612 {IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM, 613 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 614 615 /* 0x0D */ 616 {IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR, 617 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 618 619 /* 0x0E */ 620 {IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR, 621 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 622 623 /* 0x0F */ 624 {IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME, 625 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 626 627 /* 0x11 */ 628 {IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM, 629 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 630 631 /* 0x13 */ 632 {IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH, 633 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 634 635 /* 0x14 */ 636 {IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED, 637 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 638 639 /* 0x15 */ 640 {IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED, 641 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 642 643 /* 0x16 */ 644 {IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED, 645 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 646 647 /* 0x17 */ 648 {IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT, 649 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 650 651 /* 0x18 */ 652 {IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL, 653 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 654 655 /* 0x1A */ 656 {IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 657 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 658 659 /* 0x21 */ 660 {IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID, 661 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 662 663 /* Occurs at link down */ 664 /* 0x28 */ 665 {IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 666 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 667 668 /* 0xF0 */ 669 {IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT, 670 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 671 }; 672 673 #define IOERR_MAX (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t)) 674 675 676 677 emlxs_table_t emlxs_error_table[] = { 678 {IOERR_SUCCESS, "No error."}, 679 {IOERR_MISSING_CONTINUE, "Missing continue."}, 680 {IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."}, 681 {IOERR_INTERNAL_ERROR, "Internal error."}, 682 {IOERR_INVALID_RPI, "Invalid RPI."}, 683 {IOERR_NO_XRI, "No XRI."}, 684 {IOERR_ILLEGAL_COMMAND, "Illegal command."}, 685 {IOERR_XCHG_DROPPED, "Exchange dropped."}, 686 {IOERR_ILLEGAL_FIELD, "Illegal field."}, 687 {IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."}, 688 {IOERR_TX_DMA_FAILED, "TX DMA failed."}, 689 {IOERR_RX_DMA_FAILED, "RX DMA failed."}, 690 {IOERR_ILLEGAL_FRAME, "Illegal frame."}, 691 {IOERR_NO_RESOURCES, "No resources."}, 692 {IOERR_ILLEGAL_LENGTH, "Illegal length."}, 693 {IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."}, 694 {IOERR_ABORT_IN_PROGRESS, "Abort in progess."}, 695 {IOERR_ABORT_REQUESTED, "Abort requested."}, 696 {IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."}, 697 {IOERR_LOOP_OPEN_FAILURE, "Loop open failed."}, 698 {IOERR_RING_RESET, "Ring reset."}, 699 {IOERR_LINK_DOWN, "Link down."}, 700 {IOERR_CORRUPTED_DATA, "Corrupted data."}, 701 {IOERR_CORRUPTED_RPI, "Corrupted RPI."}, 702 {IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."}, 703 {IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."}, 704 {IOERR_DUP_FRAME, "Duplicate frame."}, 705 {IOERR_LINK_CONTROL_FRAME, "Link control frame."}, 706 {IOERR_BAD_HOST_ADDRESS, "Bad host address."}, 707 {IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."}, 708 {IOERR_MISSING_HDR_BUFFER, "Missing header buffer."}, 709 {IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."}, 710 {IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."}, 711 {IOERR_BUFFER_SHORTAGE, "Buffer shortage."}, 712 {IOERR_XRIBUF_WAITING, "XRI buffer shortage"}, 713 {IOERR_XRIBUF_MISSING, "XRI buffer missing"}, 714 {IOERR_ROFFSET_INVAL, "Relative offset invalid."}, 715 {IOERR_ROFFSET_MISSING, "Relative offset missing."}, 716 {IOERR_INSUF_BUFFER, "Buffer too small."}, 717 {IOERR_MISSING_SI, "ELS frame missing SI"}, 718 {IOERR_MISSING_ES, "Exhausted burst without ES"}, 719 {IOERR_INCOMP_XFER, "Transfer incomplete."}, 720 {IOERR_ABORT_TIMEOUT, "Abort timeout."} 721 722 }; /* emlxs_error_table */ 723 724 725 emlxs_table_t emlxs_state_table[] = { 726 {IOSTAT_SUCCESS, "Success."}, 727 {IOSTAT_FCP_RSP_ERROR, "FCP response error."}, 728 {IOSTAT_REMOTE_STOP, "Remote stop."}, 729 {IOSTAT_LOCAL_REJECT, "Local reject."}, 730 {IOSTAT_NPORT_RJT, "NPort reject."}, 731 {IOSTAT_FABRIC_RJT, "Fabric reject."}, 732 {IOSTAT_NPORT_BSY, "Nport busy."}, 733 {IOSTAT_FABRIC_BSY, "Fabric busy."}, 734 {IOSTAT_INTERMED_RSP, "Intermediate response."}, 735 {IOSTAT_LS_RJT, "LS reject."}, 736 {IOSTAT_CMD_REJECT, "Cmd reject."}, 737 {IOSTAT_FCP_TGT_LENCHK, "TGT length check."}, 738 {IOSTAT_NEED_BUF_ENTRY, "Need buffer entry."}, 739 {IOSTAT_ILLEGAL_FRAME_RCVD, "Illegal frame."}, 740 {IOSTAT_DATA_UNDERRUN, "Data underrun."}, 741 {IOSTAT_DATA_OVERRUN, "Data overrun."}, 742 743 }; /* emlxs_state_table */ 744 745 746 #ifdef MENLO_SUPPORT 747 emlxs_table_t emlxs_menlo_cmd_table[] = { 748 {MENLO_CMD_INITIALIZE, "MENLO_INIT"}, 749 {MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"}, 750 {MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"}, 751 {MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"}, 752 {MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"}, 753 {MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"}, 754 755 {MENLO_CMD_GET_INIT, "MENLO_GET_INIT"}, 756 {MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"}, 757 {MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"}, 758 {MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"}, 759 {MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"}, 760 {MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"}, 761 {MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"}, 762 {MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"}, 763 {MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"}, 764 765 {MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"}, 766 {MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"}, 767 {MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"}, 768 769 {MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"}, 770 {MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"}, 771 772 {MENLO_CMD_RESET, "MENLO_RESET"}, 773 {MENLO_CMD_SET_MODE, "MENLO_SET_MODE"} 774 775 }; /* emlxs_menlo_cmd_table */ 776 777 emlxs_table_t emlxs_menlo_rsp_table[] = { 778 {MENLO_RSP_SUCCESS, "SUCCESS"}, 779 {MENLO_ERR_FAILED, "FAILED"}, 780 {MENLO_ERR_INVALID_CMD, "INVALID_CMD"}, 781 {MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"}, 782 {MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"}, 783 {MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"}, 784 {MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"}, 785 {MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"}, 786 {MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"}, 787 {MENLO_ERR_INVALID_DATA, "INVALID_DATA"}, 788 {MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"}, 789 {MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"}, 790 {MENLO_ERR_INVALID_MASK, "INVALID_MASK"}, 791 {MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"}, 792 {MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"}, 793 {MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"}, 794 {MENLO_ERR_BUSY, "BUSY"}, 795 796 }; /* emlxs_menlo_rsp_table */ 797 798 #endif /* MENLO_SUPPORT */ 799 800 801 emlxs_table_t emlxs_mscmd_table[] = { 802 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 803 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 804 {MS_GTIN, "MS_GTIN"}, 805 {MS_GIEL, "MS_GIEL"}, 806 {MS_GIET, "MS_GIET"}, 807 {MS_GDID, "MS_GDID"}, 808 {MS_GMID, "MS_GMID"}, 809 {MS_GFN, "MS_GFN"}, 810 {MS_GIELN, "MS_GIELN"}, 811 {MS_GMAL, "MS_GMAL"}, 812 {MS_GIEIL, "MS_GIEIL"}, 813 {MS_GPL, "MS_GPL"}, 814 {MS_GPT, "MS_GPT"}, 815 {MS_GPPN, "MS_GPPN"}, 816 {MS_GAPNL, "MS_GAPNL"}, 817 {MS_GPS, "MS_GPS"}, 818 {MS_GPSC, "MS_GPSC"}, 819 {MS_GATIN, "MS_GATIN"}, 820 {MS_GSES, "MS_GSES"}, 821 {MS_GPLNL, "MS_GPLNL"}, 822 {MS_GPLT, "MS_GPLT"}, 823 {MS_GPLML, "MS_GPLML"}, 824 {MS_GPAB, "MS_GPAB"}, 825 {MS_GNPL, "MS_GNPL"}, 826 {MS_GPNL, "MS_GPNL"}, 827 {MS_GPFCP, "MS_GPFCP"}, 828 {MS_GPLI, "MS_GPLI"}, 829 {MS_GNID, "MS_GNID"}, 830 {MS_RIELN, "MS_RIELN"}, 831 {MS_RPL, "MS_RPL"}, 832 {MS_RPLN, "MS_RPLN"}, 833 {MS_RPLT, "MS_RPLT"}, 834 {MS_RPLM, "MS_RPLM"}, 835 {MS_RPAB, "MS_RPAB"}, 836 {MS_RPFCP, "MS_RPFCP"}, 837 {MS_RPLI, "MS_RPLI"}, 838 {MS_DPL, "MS_DPL"}, 839 {MS_DPLN, "MS_DPLN"}, 840 {MS_DPLM, "MS_DPLM"}, 841 {MS_DPLML, "MS_DPLML"}, 842 {MS_DPLI, "MS_DPLI"}, 843 {MS_DPAB, "MS_DPAB"}, 844 {MS_DPALL, "MS_DPALL"} 845 846 }; /* emlxs_mscmd_table */ 847 848 849 emlxs_table_t emlxs_ctcmd_table[] = { 850 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 851 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 852 {SLI_CTNS_GA_NXT, "GA_NXT"}, 853 {SLI_CTNS_GPN_ID, "GPN_ID"}, 854 {SLI_CTNS_GNN_ID, "GNN_ID"}, 855 {SLI_CTNS_GCS_ID, "GCS_ID"}, 856 {SLI_CTNS_GFT_ID, "GFT_ID"}, 857 {SLI_CTNS_GSPN_ID, "GSPN_ID"}, 858 {SLI_CTNS_GPT_ID, "GPT_ID"}, 859 {SLI_CTNS_GID_PN, "GID_PN"}, 860 {SLI_CTNS_GID_NN, "GID_NN"}, 861 {SLI_CTNS_GIP_NN, "GIP_NN"}, 862 {SLI_CTNS_GIPA_NN, "GIPA_NN"}, 863 {SLI_CTNS_GSNN_NN, "GSNN_NN"}, 864 {SLI_CTNS_GNN_IP, "GNN_IP"}, 865 {SLI_CTNS_GIPA_IP, "GIPA_IP"}, 866 {SLI_CTNS_GID_FT, "GID_FT"}, 867 {SLI_CTNS_GID_PT, "GID_PT"}, 868 {SLI_CTNS_RPN_ID, "RPN_ID"}, 869 {SLI_CTNS_RNN_ID, "RNN_ID"}, 870 {SLI_CTNS_RCS_ID, "RCS_ID"}, 871 {SLI_CTNS_RFT_ID, "RFT_ID"}, 872 {SLI_CTNS_RSPN_ID, "RSPN_ID"}, 873 {SLI_CTNS_RPT_ID, "RPT_ID"}, 874 {SLI_CTNS_RIP_NN, "RIP_NN"}, 875 {SLI_CTNS_RIPA_NN, "RIPA_NN"}, 876 {SLI_CTNS_RSNN_NN, "RSNN_NN"}, 877 {SLI_CTNS_DA_ID, "DA_ID"}, 878 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */ 879 880 }; /* emlxs_ctcmd_table */ 881 882 883 884 emlxs_table_t emlxs_rmcmd_table[] = { 885 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 886 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 887 {CT_OP_GSAT, "RM_GSAT"}, 888 {CT_OP_GHAT, "RM_GHAT"}, 889 {CT_OP_GPAT, "RM_GPAT"}, 890 {CT_OP_GDAT, "RM_GDAT"}, 891 {CT_OP_GPST, "RM_GPST"}, 892 {CT_OP_GDP, "RM_GDP"}, 893 {CT_OP_GDPG, "RM_GDPG"}, 894 {CT_OP_GEPS, "RM_GEPS"}, 895 {CT_OP_GLAT, "RM_GLAT"}, 896 {CT_OP_SSAT, "RM_SSAT"}, 897 {CT_OP_SHAT, "RM_SHAT"}, 898 {CT_OP_SPAT, "RM_SPAT"}, 899 {CT_OP_SDAT, "RM_SDAT"}, 900 {CT_OP_SDP, "RM_SDP"}, 901 {CT_OP_SBBS, "RM_SBBS"}, 902 {CT_OP_RPST, "RM_RPST"}, 903 {CT_OP_VFW, "RM_VFW"}, 904 {CT_OP_DFW, "RM_DFW"}, 905 {CT_OP_RES, "RM_RES"}, 906 {CT_OP_RHD, "RM_RHD"}, 907 {CT_OP_UFW, "RM_UFW"}, 908 {CT_OP_RDP, "RM_RDP"}, 909 {CT_OP_GHDR, "RM_GHDR"}, 910 {CT_OP_CHD, "RM_CHD"}, 911 {CT_OP_SSR, "RM_SSR"}, 912 {CT_OP_RSAT, "RM_RSAT"}, 913 {CT_OP_WSAT, "RM_WSAT"}, 914 {CT_OP_RSAH, "RM_RSAH"}, 915 {CT_OP_WSAH, "RM_WSAH"}, 916 {CT_OP_RACT, "RM_RACT"}, 917 {CT_OP_WACT, "RM_WACT"}, 918 {CT_OP_RKT, "RM_RKT"}, 919 {CT_OP_WKT, "RM_WKT"}, 920 {CT_OP_SSC, "RM_SSC"}, 921 {CT_OP_QHBA, "RM_QHBA"}, 922 {CT_OP_GST, "RM_GST"}, 923 {CT_OP_GFTM, "RM_GFTM"}, 924 {CT_OP_SRL, "RM_SRL"}, 925 {CT_OP_SI, "RM_SI"}, 926 {CT_OP_SRC, "RM_SRC"}, 927 {CT_OP_GPB, "RM_GPB"}, 928 {CT_OP_SPB, "RM_SPB"}, 929 {CT_OP_RPB, "RM_RPB"}, 930 {CT_OP_RAPB, "RM_RAPB"}, 931 {CT_OP_GBC, "RM_GBC"}, 932 {CT_OP_GBS, "RM_GBS"}, 933 {CT_OP_SBS, "RM_SBS"}, 934 {CT_OP_GANI, "RM_GANI"}, 935 {CT_OP_GRV, "RM_GRV"}, 936 {CT_OP_GAPBS, "RM_GAPBS"}, 937 {CT_OP_APBC, "RM_APBC"}, 938 {CT_OP_GDT, "RM_GDT"}, 939 {CT_OP_GDLMI, "RM_GDLMI"}, 940 {CT_OP_GANA, "RM_GANA"}, 941 {CT_OP_GDLV, "RM_GDLV"}, 942 {CT_OP_GWUP, "RM_GWUP"}, 943 {CT_OP_GLM, "RM_GLM"}, 944 {CT_OP_GABS, "RM_GABS"}, 945 {CT_OP_SABS, "RM_SABS"}, 946 {CT_OP_RPR, "RM_RPR"}, 947 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */ 948 949 }; /* emlxs_rmcmd_table */ 950 951 952 emlxs_table_t emlxs_elscmd_table[] = { 953 {ELS_CMD_ACC, "ACC"}, 954 {ELS_CMD_LS_RJT, "LS_RJT"}, 955 {ELS_CMD_PLOGI, "PLOGI"}, 956 {ELS_CMD_FLOGI, "FLOGI"}, 957 {ELS_CMD_LOGO, "LOGO"}, 958 {ELS_CMD_ABTX, "ABTX"}, 959 {ELS_CMD_RCS, "RCS"}, 960 {ELS_CMD_RES, "RES"}, 961 {ELS_CMD_RSS, "RSS"}, 962 {ELS_CMD_RSI, "RSI"}, 963 {ELS_CMD_ESTS, "ESTS"}, 964 {ELS_CMD_ESTC, "ESTC"}, 965 {ELS_CMD_ADVC, "ADVC"}, 966 {ELS_CMD_RTV, "RTV"}, 967 {ELS_CMD_RLS, "RLS"}, 968 {ELS_CMD_ECHO, "ECHO"}, 969 {ELS_CMD_TEST, "TEST"}, 970 {ELS_CMD_RRQ, "RRQ"}, 971 {ELS_CMD_PRLI, "PRLI"}, 972 {ELS_CMD_PRLO, "PRLO"}, 973 {ELS_CMD_SCN, "SCN"}, 974 {ELS_CMD_TPLS, "TPLS"}, 975 {ELS_CMD_GPRLO, "GPRLO"}, 976 {ELS_CMD_GAID, "GAID"}, 977 {ELS_CMD_FACT, "FACT"}, 978 {ELS_CMD_FDACT, "FDACT"}, 979 {ELS_CMD_NACT, "NACT"}, 980 {ELS_CMD_NDACT, "NDACT"}, 981 {ELS_CMD_QoSR, "QoSR"}, 982 {ELS_CMD_RVCS, "RVCS"}, 983 {ELS_CMD_PDISC, "PDISC"}, 984 {ELS_CMD_FDISC, "FDISC"}, 985 {ELS_CMD_ADISC, "ADISC"}, 986 {ELS_CMD_FARP, "FARP"}, 987 {ELS_CMD_FARPR, "FARPR"}, 988 {ELS_CMD_FAN, "FAN"}, 989 {ELS_CMD_RSCN, "RSCN"}, 990 {ELS_CMD_SCR, "SCR"}, 991 {ELS_CMD_LINIT, "LINIT"}, 992 {ELS_CMD_RNID, "RNID"}, 993 {ELS_CMD_AUTH, "AUTH"} 994 995 }; /* emlxs_elscmd_table */ 996 997 998 /* 999 * 1000 * Device Driver Entry Routines 1001 * 1002 */ 1003 1004 #ifdef MODSYM_SUPPORT 1005 static void emlxs_fca_modclose(); 1006 static int emlxs_fca_modopen(); 1007 emlxs_modsym_t emlxs_modsym; 1008 1009 static int 1010 emlxs_fca_modopen() 1011 { 1012 int err; 1013 1014 if (emlxs_modsym.mod_fctl) { 1015 return (EEXIST); 1016 } 1017 1018 /* Leadville (fctl) */ 1019 err = 0; 1020 emlxs_modsym.mod_fctl = 1021 ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err); 1022 if (!emlxs_modsym.mod_fctl) { 1023 cmn_err(CE_WARN, 1024 "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d", 1025 DRIVER_NAME, err); 1026 1027 goto failed; 1028 } 1029 1030 err = 0; 1031 /* Check if the fctl fc_fca_attach is present */ 1032 emlxs_modsym.fc_fca_attach = 1033 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach", 1034 &err); 1035 if ((void *)emlxs_modsym.fc_fca_attach == NULL) { 1036 cmn_err(CE_WARN, 1037 "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME); 1038 goto failed; 1039 } 1040 1041 err = 0; 1042 /* Check if the fctl fc_fca_detach is present */ 1043 emlxs_modsym.fc_fca_detach = 1044 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach", 1045 &err); 1046 if ((void *)emlxs_modsym.fc_fca_detach == NULL) { 1047 cmn_err(CE_WARN, 1048 "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME); 1049 goto failed; 1050 } 1051 1052 err = 0; 1053 /* Check if the fctl fc_fca_init is present */ 1054 emlxs_modsym.fc_fca_init = 1055 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err); 1056 if ((void *)emlxs_modsym.fc_fca_init == NULL) { 1057 cmn_err(CE_WARN, 1058 "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME); 1059 goto failed; 1060 } 1061 1062 return (0); 1063 1064 failed: 1065 1066 emlxs_fca_modclose(); 1067 1068 return (ENODEV); 1069 1070 1071 } /* emlxs_fca_modopen() */ 1072 1073 1074 static void 1075 emlxs_fca_modclose() 1076 { 1077 if (emlxs_modsym.mod_fctl) { 1078 (void) ddi_modclose(emlxs_modsym.mod_fctl); 1079 emlxs_modsym.mod_fctl = 0; 1080 } 1081 1082 emlxs_modsym.fc_fca_attach = NULL; 1083 emlxs_modsym.fc_fca_detach = NULL; 1084 emlxs_modsym.fc_fca_init = NULL; 1085 1086 return; 1087 1088 } /* emlxs_fca_modclose() */ 1089 1090 #endif /* MODSYM_SUPPORT */ 1091 1092 1093 1094 /* 1095 * Global driver initialization, called once when driver is loaded 1096 */ 1097 int 1098 _init(void) 1099 { 1100 int ret; 1101 char buf[64]; 1102 1103 /* 1104 * First init call for this driver, 1105 * so initialize the emlxs_dev_ctl structure. 1106 */ 1107 bzero(&emlxs_device, sizeof (emlxs_device)); 1108 1109 #ifdef MODSYM_SUPPORT 1110 bzero(&emlxs_modsym, sizeof (emlxs_modsym_t)); 1111 #endif /* MODSYM_SUPPORT */ 1112 1113 (void) sprintf(buf, "%s_device mutex", DRIVER_NAME); 1114 mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL); 1115 1116 (void) drv_getparm(LBOLT, &emlxs_device.log_timestamp); 1117 emlxs_device.drv_timestamp = ddi_get_time(); 1118 1119 for (ret = 0; ret < MAX_FC_BRDS; ret++) { 1120 emlxs_instance[ret] = (uint32_t)-1; 1121 } 1122 1123 /* 1124 * Provide for one ddiinst of the emlxs_dev_ctl structure 1125 * for each possible board in the system. 1126 */ 1127 if ((ret = ddi_soft_state_init(&emlxs_soft_state, 1128 sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) { 1129 cmn_err(CE_WARN, 1130 "?%s: _init: ddi_soft_state_init failed. rval=%x", 1131 DRIVER_NAME, ret); 1132 1133 return (ret); 1134 } 1135 1136 if ((ret = mod_install(&emlxs_modlinkage)) != 0) { 1137 (void) ddi_soft_state_fini(&emlxs_soft_state); 1138 } 1139 1140 #ifdef SAN_DIAG_SUPPORT 1141 (void) sprintf(buf, "%s_sd_bucket mutex", DRIVER_NAME); 1142 mutex_init(&sd_bucket_mutex, buf, MUTEX_DRIVER, NULL); 1143 #endif /* SAN_DIAG_SUPPORT */ 1144 1145 return (ret); 1146 1147 } /* _init() */ 1148 1149 1150 /* 1151 * Called when driver is unloaded. 1152 */ 1153 int 1154 _fini(void) 1155 { 1156 int ret; 1157 1158 if ((ret = mod_remove(&emlxs_modlinkage)) != 0) { 1159 return (ret); 1160 } 1161 #ifdef MODSYM_SUPPORT 1162 /* Close SFS */ 1163 emlxs_fca_modclose(); 1164 #ifdef SFCT_SUPPORT 1165 /* Close FCT */ 1166 emlxs_fct_modclose(); 1167 #endif /* SFCT_SUPPORT */ 1168 #endif /* MODSYM_SUPPORT */ 1169 1170 /* 1171 * Destroy the soft state structure 1172 */ 1173 (void) ddi_soft_state_fini(&emlxs_soft_state); 1174 1175 /* Destroy the global device lock */ 1176 mutex_destroy(&emlxs_device.lock); 1177 1178 #ifdef SAN_DIAG_SUPPORT 1179 mutex_destroy(&sd_bucket_mutex); 1180 #endif /* SAN_DIAG_SUPPORT */ 1181 1182 return (ret); 1183 1184 } /* _fini() */ 1185 1186 1187 1188 int 1189 _info(struct modinfo *modinfop) 1190 { 1191 1192 return (mod_info(&emlxs_modlinkage, modinfop)); 1193 1194 } /* _info() */ 1195 1196 1197 /* 1198 * Attach an ddiinst of an emlx host adapter. 1199 * Allocate data structures, initialize the adapter and we're ready to fly. 1200 */ 1201 static int 1202 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1203 { 1204 emlxs_hba_t *hba; 1205 int ddiinst; 1206 int emlxinst; 1207 int rval; 1208 1209 switch (cmd) { 1210 case DDI_ATTACH: 1211 /* If successful this will set EMLXS_PM_IN_ATTACH */ 1212 rval = emlxs_hba_attach(dip); 1213 break; 1214 1215 case DDI_PM_RESUME: 1216 /* This will resume the driver */ 1217 rval = emlxs_pm_raise_power(dip); 1218 break; 1219 1220 case DDI_RESUME: 1221 /* This will resume the driver */ 1222 rval = emlxs_hba_resume(dip); 1223 break; 1224 1225 default: 1226 rval = DDI_FAILURE; 1227 } 1228 1229 if (rval == DDI_SUCCESS) { 1230 ddiinst = ddi_get_instance(dip); 1231 emlxinst = emlxs_get_instance(ddiinst); 1232 hba = emlxs_device.hba[emlxinst]; 1233 1234 if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) { 1235 1236 /* Enable driver dump feature */ 1237 mutex_enter(&EMLXS_PORT_LOCK); 1238 hba->flag |= FC_DUMP_SAFE; 1239 mutex_exit(&EMLXS_PORT_LOCK); 1240 } 1241 } 1242 1243 return (rval); 1244 1245 } /* emlxs_attach() */ 1246 1247 1248 /* 1249 * Detach/prepare driver to unload (see detach(9E)). 1250 */ 1251 static int 1252 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1253 { 1254 emlxs_hba_t *hba; 1255 emlxs_port_t *port; 1256 int ddiinst; 1257 int emlxinst; 1258 int rval; 1259 1260 ddiinst = ddi_get_instance(dip); 1261 emlxinst = emlxs_get_instance(ddiinst); 1262 hba = emlxs_device.hba[emlxinst]; 1263 1264 if (hba == NULL) { 1265 cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME); 1266 1267 return (DDI_FAILURE); 1268 } 1269 1270 if (hba == (emlxs_hba_t *)-1) { 1271 cmn_err(CE_WARN, "?%s: Detach: Device attach failed.", 1272 DRIVER_NAME); 1273 1274 return (DDI_FAILURE); 1275 } 1276 1277 port = &PPORT; 1278 rval = DDI_SUCCESS; 1279 1280 /* Check driver dump */ 1281 mutex_enter(&EMLXS_PORT_LOCK); 1282 1283 if (hba->flag & FC_DUMP_ACTIVE) { 1284 mutex_exit(&EMLXS_PORT_LOCK); 1285 1286 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1287 "emlxs_detach: Driver busy. Driver dump active."); 1288 1289 return (DDI_FAILURE); 1290 } 1291 1292 hba->flag &= ~FC_DUMP_SAFE; 1293 mutex_exit(&EMLXS_PORT_LOCK); 1294 1295 switch (cmd) { 1296 case DDI_DETACH: 1297 1298 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1299 "DDI_DETACH"); 1300 1301 rval = emlxs_hba_detach(dip); 1302 1303 if (rval != DDI_SUCCESS) { 1304 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1305 "Unable to detach."); 1306 } 1307 break; 1308 1309 1310 case DDI_PM_SUSPEND: 1311 1312 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1313 "DDI_PM_SUSPEND"); 1314 1315 /* This will suspend the driver */ 1316 rval = emlxs_pm_lower_power(dip); 1317 1318 if (rval != DDI_SUCCESS) { 1319 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1320 "Unable to lower power."); 1321 } 1322 1323 break; 1324 1325 1326 case DDI_SUSPEND: 1327 1328 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1329 "DDI_SUSPEND"); 1330 1331 /* Suspend the driver */ 1332 rval = emlxs_hba_suspend(dip); 1333 1334 if (rval != DDI_SUCCESS) { 1335 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1336 "Unable to suspend driver."); 1337 } 1338 break; 1339 1340 1341 default: 1342 cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x", 1343 DRIVER_NAME, cmd); 1344 rval = DDI_FAILURE; 1345 } 1346 1347 if (rval == DDI_FAILURE) { 1348 /* Re-Enable driver dump feature */ 1349 mutex_enter(&EMLXS_PORT_LOCK); 1350 hba->flag |= FC_DUMP_SAFE; 1351 mutex_exit(&EMLXS_PORT_LOCK); 1352 } 1353 1354 return (rval); 1355 1356 } /* emlxs_detach() */ 1357 1358 1359 /* EMLXS_PORT_LOCK must be held when calling this */ 1360 extern void 1361 emlxs_port_init(emlxs_port_t *port) 1362 { 1363 emlxs_hba_t *hba = HBA; 1364 1365 /* Initialize the base node */ 1366 bzero((caddr_t)&port->node_base, sizeof (NODELIST)); 1367 port->node_base.nlp_Rpi = 0; 1368 port->node_base.nlp_DID = 0xffffff; 1369 port->node_base.nlp_list_next = NULL; 1370 port->node_base.nlp_list_prev = NULL; 1371 port->node_base.nlp_active = 1; 1372 port->node_base.nlp_base = 1; 1373 port->node_count = 0; 1374 1375 if (!(port->flag & EMLXS_PORT_ENABLE)) { 1376 uint8_t dummy_wwn[8] = 1377 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 1378 1379 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn, 1380 sizeof (NAME_TYPE)); 1381 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn, 1382 sizeof (NAME_TYPE)); 1383 } 1384 1385 if (!(port->flag & EMLXS_PORT_CONFIG)) { 1386 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256); 1387 (void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256); 1388 } 1389 1390 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam, 1391 sizeof (SERV_PARM)); 1392 bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName, 1393 sizeof (NAME_TYPE)); 1394 bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName, 1395 sizeof (NAME_TYPE)); 1396 1397 return; 1398 1399 } /* emlxs_port_init() */ 1400 1401 1402 1403 /* 1404 * emlxs_bind_port 1405 * 1406 * Arguments: 1407 * 1408 * dip: the dev_info pointer for the ddiinst 1409 * port_info: pointer to info handed back to the transport 1410 * bind_info: pointer to info from the transport 1411 * 1412 * Return values: a port handle for this port, NULL for failure 1413 * 1414 */ 1415 static opaque_t 1416 emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info, 1417 fc_fca_bind_info_t *bind_info) 1418 { 1419 emlxs_hba_t *hba; 1420 emlxs_port_t *port; 1421 emlxs_port_t *vport; 1422 int ddiinst; 1423 emlxs_vpd_t *vpd; 1424 emlxs_config_t *cfg; 1425 char *dptr; 1426 char buffer[16]; 1427 uint32_t length; 1428 uint32_t len; 1429 char topology[32]; 1430 char linkspeed[32]; 1431 1432 ddiinst = ddi_get_instance(dip); 1433 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 1434 port = &PPORT; 1435 1436 ddiinst = hba->ddiinst; 1437 vpd = &VPD; 1438 cfg = &CFG; 1439 1440 mutex_enter(&EMLXS_PORT_LOCK); 1441 1442 if (bind_info->port_num > 0) { 1443 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1444 if (!(hba->flag & FC_NPIV_ENABLED) || 1445 !(bind_info->port_npiv) || 1446 (bind_info->port_num > hba->vpi_max)) 1447 #elif (EMLXS_MODREV >= EMLXS_MODREV3) 1448 if (!(hba->flag & FC_NPIV_ENABLED) || 1449 (bind_info->port_num > hba->vpi_high)) 1450 #endif 1451 { 1452 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1453 "emlxs_port_bind: Port %d not supported.", 1454 bind_info->port_num); 1455 1456 mutex_exit(&EMLXS_PORT_LOCK); 1457 1458 port_info->pi_error = FC_OUTOFBOUNDS; 1459 return (NULL); 1460 } 1461 } 1462 1463 /* Get true port pointer */ 1464 port = &VPORT(bind_info->port_num); 1465 1466 if (port->tgt_mode) { 1467 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1468 "emlxs_port_bind: Port %d is in target mode.", 1469 bind_info->port_num); 1470 1471 mutex_exit(&EMLXS_PORT_LOCK); 1472 1473 port_info->pi_error = FC_OUTOFBOUNDS; 1474 return (NULL); 1475 } 1476 1477 if (!port->ini_mode) { 1478 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1479 "emlxs_port_bind: Port %d is not in initiator mode.", 1480 bind_info->port_num); 1481 1482 mutex_exit(&EMLXS_PORT_LOCK); 1483 1484 port_info->pi_error = FC_OUTOFBOUNDS; 1485 return (NULL); 1486 } 1487 1488 /* Make sure the port is not already bound to the transport */ 1489 if (port->flag & EMLXS_PORT_BOUND) { 1490 1491 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1492 "emlxs_port_bind: Port %d already bound. flag=%x", 1493 bind_info->port_num, port->flag); 1494 1495 mutex_exit(&EMLXS_PORT_LOCK); 1496 1497 port_info->pi_error = FC_ALREADY; 1498 return (NULL); 1499 } 1500 1501 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1502 "fca_bind_port: Port %d: port_info=%p bind_info=%p", 1503 bind_info->port_num, port_info, bind_info); 1504 1505 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1506 if (bind_info->port_npiv) { 1507 bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn, 1508 sizeof (NAME_TYPE)); 1509 bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn, 1510 sizeof (NAME_TYPE)); 1511 if (port->snn[0] == 0) { 1512 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 1513 256); 1514 } 1515 1516 if (port->spn[0] == 0) { 1517 (void) sprintf((caddr_t)port->spn, "%s VPort-%d", 1518 (caddr_t)hba->spn, port->vpi); 1519 } 1520 port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE); 1521 } 1522 #endif /* >= EMLXS_MODREV5 */ 1523 1524 /* 1525 * Restricted login should apply both physical and 1526 * virtual ports. 1527 */ 1528 if (cfg[CFG_VPORT_RESTRICTED].current) { 1529 port->flag |= EMLXS_PORT_RESTRICTED; 1530 } 1531 1532 /* Perform generic port initialization */ 1533 emlxs_port_init(port); 1534 1535 /* Perform SFS specific initialization */ 1536 port->ulp_handle = bind_info->port_handle; 1537 port->ulp_statec_cb = bind_info->port_statec_cb; 1538 port->ulp_unsol_cb = bind_info->port_unsol_cb; 1539 port->ub_count = EMLXS_UB_TOKEN_OFFSET; 1540 port->ub_pool = NULL; 1541 1542 /* Update the port info structure */ 1543 1544 /* Set the topology and state */ 1545 if ((hba->state < FC_LINK_UP) || 1546 ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) || 1547 !(hba->flag & FC_NPIV_SUPPORTED)))) { 1548 port_info->pi_port_state = FC_STATE_OFFLINE; 1549 port_info->pi_topology = FC_TOP_UNKNOWN; 1550 } 1551 #ifdef MENLO_SUPPORT 1552 else if (hba->flag & FC_MENLO_MODE) { 1553 port_info->pi_port_state = FC_STATE_OFFLINE; 1554 port_info->pi_topology = FC_TOP_UNKNOWN; 1555 } 1556 #endif /* MENLO_SUPPORT */ 1557 else { 1558 /* Check for loop topology */ 1559 if (hba->topology == TOPOLOGY_LOOP) { 1560 port_info->pi_port_state = FC_STATE_LOOP; 1561 (void) strcpy(topology, ", loop"); 1562 1563 if (hba->flag & FC_FABRIC_ATTACHED) { 1564 port_info->pi_topology = FC_TOP_PUBLIC_LOOP; 1565 } else { 1566 port_info->pi_topology = FC_TOP_PRIVATE_LOOP; 1567 } 1568 } else { 1569 port_info->pi_topology = FC_TOP_FABRIC; 1570 port_info->pi_port_state = FC_STATE_ONLINE; 1571 (void) strcpy(topology, ", fabric"); 1572 } 1573 1574 /* Set the link speed */ 1575 switch (hba->linkspeed) { 1576 case 0: 1577 (void) strcpy(linkspeed, "Gb"); 1578 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED; 1579 break; 1580 1581 case LA_1GHZ_LINK: 1582 (void) strcpy(linkspeed, "1Gb"); 1583 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED; 1584 break; 1585 case LA_2GHZ_LINK: 1586 (void) strcpy(linkspeed, "2Gb"); 1587 port_info->pi_port_state |= FC_STATE_2GBIT_SPEED; 1588 break; 1589 case LA_4GHZ_LINK: 1590 (void) strcpy(linkspeed, "4Gb"); 1591 port_info->pi_port_state |= FC_STATE_4GBIT_SPEED; 1592 break; 1593 case LA_8GHZ_LINK: 1594 (void) strcpy(linkspeed, "8Gb"); 1595 port_info->pi_port_state |= FC_STATE_8GBIT_SPEED; 1596 break; 1597 case LA_10GHZ_LINK: 1598 (void) strcpy(linkspeed, "10Gb"); 1599 port_info->pi_port_state |= FC_STATE_10GBIT_SPEED; 1600 break; 1601 default: 1602 (void) sprintf(linkspeed, "unknown(0x%x)", 1603 hba->linkspeed); 1604 break; 1605 } 1606 1607 /* Adjusting port context for link up messages */ 1608 vport = port; 1609 port = &PPORT; 1610 if (vport->vpi == 0) { 1611 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s", 1612 linkspeed, topology); 1613 } else if (!(hba->flag & FC_NPIV_LINKUP)) { 1614 hba->flag |= FC_NPIV_LINKUP; 1615 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg, 1616 "%s%s", linkspeed, topology); 1617 } 1618 port = vport; 1619 1620 } 1621 1622 /* Save initial state */ 1623 port->ulp_statec = port_info->pi_port_state; 1624 1625 /* 1626 * The transport needs a copy of the common service parameters 1627 * for this port. The transport can get any updates through 1628 * the getcap entry point. 1629 */ 1630 bcopy((void *) &port->sparam, 1631 (void *) &port_info->pi_login_params.common_service, 1632 sizeof (SERV_PARM)); 1633 1634 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 1635 /* Swap the service parameters for ULP */ 1636 emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params. 1637 common_service); 1638 #endif /* EMLXS_MODREV2X */ 1639 1640 port_info->pi_login_params.common_service.btob_credit = 0xffff; 1641 1642 bcopy((void *) &port->wwnn, 1643 (void *) &port_info->pi_login_params.node_ww_name, 1644 sizeof (NAME_TYPE)); 1645 1646 bcopy((void *) &port->wwpn, 1647 (void *) &port_info->pi_login_params.nport_ww_name, 1648 sizeof (NAME_TYPE)); 1649 1650 /* 1651 * We need to turn off CLASS2 support. 1652 * Otherwise, FC transport will use CLASS2 as default class 1653 * and never try with CLASS3. 1654 */ 1655 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1656 #if (EMLXS_MODREVX >= EMLXS_MODREV3X) 1657 if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) { 1658 port_info->pi_login_params.class_1.class_opt &= ~0x0080; 1659 } 1660 1661 if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) { 1662 port_info->pi_login_params.class_2.class_opt &= ~0x0080; 1663 } 1664 #else /* EMLXS_SPARC or EMLXS_MODREV2X */ 1665 if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) { 1666 port_info->pi_login_params.class_1.class_opt &= ~0x8000; 1667 } 1668 1669 if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) { 1670 port_info->pi_login_params.class_2.class_opt &= ~0x8000; 1671 } 1672 #endif /* >= EMLXS_MODREV3X */ 1673 #endif /* >= EMLXS_MODREV3 */ 1674 1675 1676 #if (EMLXS_MODREV <= EMLXS_MODREV2) 1677 if ((port_info->pi_login_params.class_1.data[0]) & 0x80) { 1678 port_info->pi_login_params.class_1.data[0] &= ~0x80; 1679 } 1680 1681 if ((port_info->pi_login_params.class_2.data[0]) & 0x80) { 1682 port_info->pi_login_params.class_2.data[0] &= ~0x80; 1683 } 1684 #endif /* <= EMLXS_MODREV2 */ 1685 1686 /* Additional parameters */ 1687 port_info->pi_s_id.port_id = port->did; 1688 port_info->pi_s_id.priv_lilp_posit = 0; 1689 port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current; 1690 1691 /* Initialize the RNID parameters */ 1692 bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params)); 1693 1694 (void) sprintf((char *)port_info->pi_rnid_params.params.global_id, 1695 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType, 1696 hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0], 1697 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3], 1698 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 1699 1700 port_info->pi_rnid_params.params.unit_type = RNID_HBA; 1701 port_info->pi_rnid_params.params.port_id = port->did; 1702 port_info->pi_rnid_params.params.ip_version = RNID_IPV4; 1703 1704 /* Initialize the port attributes */ 1705 bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs)); 1706 1707 (void) strcpy(port_info->pi_attrs.manufacturer, "Emulex"); 1708 1709 port_info->pi_rnid_params.status = FC_SUCCESS; 1710 1711 (void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num); 1712 1713 (void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)", 1714 vpd->fw_version, vpd->fw_label); 1715 1716 #ifdef EMLXS_I386 1717 (void) sprintf(port_info->pi_attrs.option_rom_version, 1718 "Boot:%s", vpd->boot_version); 1719 #else /* EMLXS_SPARC */ 1720 (void) sprintf(port_info->pi_attrs.option_rom_version, 1721 "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version); 1722 #endif /* EMLXS_I386 */ 1723 1724 1725 (void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)", 1726 emlxs_version, emlxs_revision); 1727 1728 (void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME); 1729 1730 port_info->pi_attrs.vendor_specific_id = 1731 ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX); 1732 1733 port_info->pi_attrs.supported_cos = SWAP_DATA32(FC_NS_CLASS3); 1734 1735 port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE; 1736 1737 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1738 1739 port_info->pi_rnid_params.params.num_attached = 0; 1740 1741 /* 1742 * Copy the serial number string (right most 16 chars) into the right 1743 * justified local buffer 1744 */ 1745 bzero(buffer, sizeof (buffer)); 1746 length = strlen(vpd->serial_num); 1747 len = (length > 16) ? 16 : length; 1748 bcopy(&vpd->serial_num[(length - len)], 1749 &buffer[(sizeof (buffer) - len)], len); 1750 1751 port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index; 1752 1753 #endif /* >= EMLXS_MODREV5 */ 1754 1755 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLX_MODREV == EMLXS_MODREV4)) 1756 1757 port_info->pi_rnid_params.params.num_attached = 0; 1758 1759 if (hba->flag & FC_NPIV_ENABLED) { 1760 uint8_t byte; 1761 uint8_t *wwpn; 1762 uint32_t i; 1763 uint32_t j; 1764 1765 /* Copy the WWPN as a string into the local buffer */ 1766 wwpn = (uint8_t *)&hba->wwpn; 1767 for (i = 0; i < 16; i++) { 1768 byte = *wwpn++; 1769 j = ((byte & 0xf0) >> 4); 1770 if (j <= 9) { 1771 buffer[i] = 1772 (char)((uint8_t)'0' + (uint8_t)j); 1773 } else { 1774 buffer[i] = 1775 (char)((uint8_t)'A' + (uint8_t)(j - 1776 10)); 1777 } 1778 1779 i++; 1780 j = (byte & 0xf); 1781 if (j <= 9) { 1782 buffer[i] = 1783 (char)((uint8_t)'0' + (uint8_t)j); 1784 } else { 1785 buffer[i] = 1786 (char)((uint8_t)'A' + (uint8_t)(j - 1787 10)); 1788 } 1789 } 1790 1791 port_info->pi_attrs.hba_fru_details.port_index = port->vpi; 1792 } else { 1793 /* Copy the serial number string (right most 16 chars) */ 1794 /* into the right justified local buffer */ 1795 bzero(buffer, sizeof (buffer)); 1796 length = strlen(vpd->serial_num); 1797 len = (length > 16) ? 16 : length; 1798 bcopy(&vpd->serial_num[(length - len)], 1799 &buffer[(sizeof (buffer) - len)], len); 1800 1801 port_info->pi_attrs.hba_fru_details.port_index = 1802 vpd->port_index; 1803 } 1804 1805 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */ 1806 1807 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1808 1809 dptr = (char *)&port_info->pi_attrs.hba_fru_details.high; 1810 dptr[0] = buffer[0]; 1811 dptr[1] = buffer[1]; 1812 dptr[2] = buffer[2]; 1813 dptr[3] = buffer[3]; 1814 dptr[4] = buffer[4]; 1815 dptr[5] = buffer[5]; 1816 dptr[6] = buffer[6]; 1817 dptr[7] = buffer[7]; 1818 port_info->pi_attrs.hba_fru_details.high = 1819 SWAP_DATA64(port_info->pi_attrs.hba_fru_details.high); 1820 1821 dptr = (char *)&port_info->pi_attrs.hba_fru_details.low; 1822 dptr[0] = buffer[8]; 1823 dptr[1] = buffer[9]; 1824 dptr[2] = buffer[10]; 1825 dptr[3] = buffer[11]; 1826 dptr[4] = buffer[12]; 1827 dptr[5] = buffer[13]; 1828 dptr[6] = buffer[14]; 1829 dptr[7] = buffer[15]; 1830 port_info->pi_attrs.hba_fru_details.low = 1831 SWAP_DATA64(port_info->pi_attrs.hba_fru_details.low); 1832 1833 #endif /* >= EMLXS_MODREV3 */ 1834 1835 #if (EMLXS_MODREV >= EMLXS_MODREV4) 1836 (void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name, 1837 (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN); 1838 (void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name, 1839 (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN); 1840 #endif /* >= EMLXS_MODREV4 */ 1841 1842 (void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev); 1843 1844 /* Set the hba speed limit */ 1845 if (vpd->link_speed & LMT_10GB_CAPABLE) { 1846 port_info->pi_attrs.supported_speed |= 1847 FC_HBA_PORTSPEED_10GBIT; 1848 } 1849 if (vpd->link_speed & LMT_8GB_CAPABLE) { 1850 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT; 1851 } 1852 if (vpd->link_speed & LMT_4GB_CAPABLE) { 1853 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT; 1854 } 1855 if (vpd->link_speed & LMT_2GB_CAPABLE) { 1856 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT; 1857 } 1858 if (vpd->link_speed & LMT_1GB_CAPABLE) { 1859 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT; 1860 } 1861 1862 /* Set the hba model info */ 1863 (void) strcpy(port_info->pi_attrs.model, hba->model_info.model); 1864 (void) strcpy(port_info->pi_attrs.model_description, 1865 hba->model_info.model_desc); 1866 1867 1868 /* Log information */ 1869 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1870 "Bind info: port_num = %d", bind_info->port_num); 1871 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1872 "Bind info: port_handle = %p", bind_info->port_handle); 1873 1874 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1875 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1876 "Bind info: port_npiv = %d", bind_info->port_npiv); 1877 #endif /* >= EMLXS_MODREV5 */ 1878 1879 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1880 "Port info: pi_topology = %x", port_info->pi_topology); 1881 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1882 "Port info: pi_error = %x", port_info->pi_error); 1883 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1884 "Port info: pi_port_state = %x", port_info->pi_port_state); 1885 1886 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1887 "Port info: port_id = %x", port_info->pi_s_id.port_id); 1888 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1889 "Port info: priv_lilp_posit = %x", 1890 port_info->pi_s_id.priv_lilp_posit); 1891 1892 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1893 "Port info: hard_addr = %x", 1894 port_info->pi_hard_addr.hard_addr); 1895 1896 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1897 "Port info: rnid.status = %x", 1898 port_info->pi_rnid_params.status); 1899 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1900 "Port info: rnid.global_id = %16s", 1901 port_info->pi_rnid_params.params.global_id); 1902 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1903 "Port info: rnid.unit_type = %x", 1904 port_info->pi_rnid_params.params.unit_type); 1905 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1906 "Port info: rnid.port_id = %x", 1907 port_info->pi_rnid_params.params.port_id); 1908 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1909 "Port info: rnid.num_attached = %x", 1910 port_info->pi_rnid_params.params.num_attached); 1911 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1912 "Port info: rnid.ip_version = %x", 1913 port_info->pi_rnid_params.params.ip_version); 1914 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1915 "Port info: rnid.udp_port = %x", 1916 port_info->pi_rnid_params.params.udp_port); 1917 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1918 "Port info: rnid.ip_addr = %16s", 1919 port_info->pi_rnid_params.params.ip_addr); 1920 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1921 "Port info: rnid.spec_id_resv = %x", 1922 port_info->pi_rnid_params.params.specific_id_resv); 1923 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1924 "Port info: rnid.topo_flags = %x", 1925 port_info->pi_rnid_params.params.topo_flags); 1926 1927 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1928 "Port info: manufacturer = %s", 1929 port_info->pi_attrs.manufacturer); 1930 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1931 "Port info: serial_num = %s", 1932 port_info->pi_attrs.serial_number); 1933 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1934 "Port info: model = %s", port_info->pi_attrs.model); 1935 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1936 "Port info: model_description = %s", 1937 port_info->pi_attrs.model_description); 1938 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1939 "Port info: hardware_version = %s", 1940 port_info->pi_attrs.hardware_version); 1941 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1942 "Port info: driver_version = %s", 1943 port_info->pi_attrs.driver_version); 1944 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1945 "Port info: option_rom_version = %s", 1946 port_info->pi_attrs.option_rom_version); 1947 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1948 "Port info: firmware_version = %s", 1949 port_info->pi_attrs.firmware_version); 1950 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1951 "Port info: driver_name = %s", 1952 port_info->pi_attrs.driver_name); 1953 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1954 "Port info: vendor_specific_id = %x", 1955 port_info->pi_attrs.vendor_specific_id); 1956 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1957 "Port info: supported_cos = %x", 1958 port_info->pi_attrs.supported_cos); 1959 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1960 "Port info: supported_speed = %x", 1961 port_info->pi_attrs.supported_speed); 1962 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1963 "Port info: max_frame_size = %x", 1964 port_info->pi_attrs.max_frame_size); 1965 1966 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1967 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1968 "Port info: fru_port_index = %x", 1969 port_info->pi_attrs.hba_fru_details.port_index); 1970 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1971 "Port info: fru_high = %llx", 1972 port_info->pi_attrs.hba_fru_details.high); 1973 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1974 "Port info: fru_low = %llx", 1975 port_info->pi_attrs.hba_fru_details.low); 1976 #endif /* >= EMLXS_MODREV3 */ 1977 1978 #if (EMLXS_MODREV >= EMLXS_MODREV4) 1979 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1980 "Port info: sym_node_name = %s", 1981 port_info->pi_attrs.sym_node_name); 1982 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1983 "Port info: sym_port_name = %s", 1984 port_info->pi_attrs.sym_port_name); 1985 #endif /* >= EMLXS_MODREV4 */ 1986 1987 /* Set the bound flag */ 1988 port->flag |= EMLXS_PORT_BOUND; 1989 hba->num_of_ports++; 1990 1991 mutex_exit(&EMLXS_PORT_LOCK); 1992 1993 return ((opaque_t)port); 1994 1995 } /* emlxs_bind_port() */ 1996 1997 1998 static void 1999 emlxs_unbind_port(opaque_t fca_port_handle) 2000 { 2001 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2002 emlxs_hba_t *hba = HBA; 2003 uint32_t count; 2004 2005 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2006 "fca_unbind_port: port=%p", port); 2007 2008 /* Check ub buffer pools */ 2009 if (port->ub_pool) { 2010 mutex_enter(&EMLXS_UB_LOCK); 2011 2012 /* Wait up to 10 seconds for all ub pools to be freed */ 2013 count = 10 * 2; 2014 while (port->ub_pool && count) { 2015 mutex_exit(&EMLXS_UB_LOCK); 2016 delay(drv_usectohz(500000)); /* half second wait */ 2017 count--; 2018 mutex_enter(&EMLXS_UB_LOCK); 2019 } 2020 2021 if (port->ub_pool) { 2022 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2023 "fca_unbind_port: Unsolicited buffers still " 2024 "active. port=%p. Destroying...", port); 2025 2026 /* Destroy all pools */ 2027 while (port->ub_pool) { 2028 emlxs_ub_destroy(port, port->ub_pool); 2029 } 2030 } 2031 2032 mutex_exit(&EMLXS_UB_LOCK); 2033 } 2034 2035 /* Destroy & flush all port nodes, if they exist */ 2036 if (port->node_count) { 2037 (void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0); 2038 } 2039 #if (EMLXS_MODREV >= EMLXS_MODREV5) 2040 if ((hba->flag & FC_NPIV_ENABLED) && 2041 (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) { 2042 (void) emlxs_mb_unreg_vpi(port); 2043 } 2044 #endif 2045 2046 mutex_enter(&EMLXS_PORT_LOCK); 2047 2048 if (!(port->flag & EMLXS_PORT_BOUND)) { 2049 mutex_exit(&EMLXS_PORT_LOCK); 2050 return; 2051 } 2052 2053 port->flag &= ~EMLXS_PORT_BOUND; 2054 hba->num_of_ports--; 2055 2056 port->ulp_handle = 0; 2057 port->ulp_statec = FC_STATE_OFFLINE; 2058 port->ulp_statec_cb = NULL; 2059 port->ulp_unsol_cb = NULL; 2060 2061 mutex_exit(&EMLXS_PORT_LOCK); 2062 2063 return; 2064 2065 } /* emlxs_unbind_port() */ 2066 2067 2068 /*ARGSUSED*/ 2069 extern int 2070 emlxs_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep) 2071 { 2072 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2073 emlxs_hba_t *hba = HBA; 2074 emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private; 2075 2076 if (!sbp) { 2077 return (FC_FAILURE); 2078 } 2079 2080 bzero((void *)sbp, sizeof (emlxs_buf_t)); 2081 2082 mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, (void *)hba->intr_arg); 2083 sbp->pkt_flags = 2084 PACKET_VALID | PACKET_RETURNED; 2085 sbp->port = port; 2086 sbp->pkt = pkt; 2087 sbp->iocbq.sbp = sbp; 2088 2089 return (FC_SUCCESS); 2090 2091 } /* emlxs_pkt_init() */ 2092 2093 2094 2095 static void 2096 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp) 2097 { 2098 emlxs_hba_t *hba = HBA; 2099 emlxs_config_t *cfg = &CFG; 2100 fc_packet_t *pkt = PRIV2PKT(sbp); 2101 uint32_t *iptr; 2102 2103 mutex_enter(&sbp->mtx); 2104 2105 /* Reinitialize */ 2106 sbp->pkt = pkt; 2107 sbp->port = port; 2108 sbp->bmp = NULL; 2109 sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED); 2110 sbp->iotag = 0; 2111 sbp->ticks = 0; 2112 sbp->abort_attempts = 0; 2113 sbp->fpkt = NULL; 2114 sbp->flush_count = 0; 2115 sbp->next = NULL; 2116 2117 if (!port->tgt_mode) { 2118 sbp->node = NULL; 2119 sbp->did = 0; 2120 sbp->lun = 0; 2121 sbp->class = 0; 2122 sbp->ring = NULL; 2123 sbp->class = 0; 2124 } 2125 2126 bzero((void *)&sbp->iocbq, sizeof (IOCBQ)); 2127 sbp->iocbq.sbp = sbp; 2128 2129 if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp || 2130 ddi_in_panic()) { 2131 sbp->pkt_flags |= PACKET_POLLED; 2132 } 2133 2134 /* Prepare the fc packet */ 2135 pkt->pkt_state = FC_PKT_SUCCESS; 2136 pkt->pkt_reason = 0; 2137 pkt->pkt_action = 0; 2138 pkt->pkt_expln = 0; 2139 pkt->pkt_data_resid = 0; 2140 pkt->pkt_resp_resid = 0; 2141 2142 /* Make sure all pkt's have a proper timeout */ 2143 if (!cfg[CFG_TIMEOUT_ENABLE].current) { 2144 /* This disables all IOCB on chip timeouts */ 2145 pkt->pkt_timeout = 0x80000000; 2146 } else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) { 2147 pkt->pkt_timeout = 60; 2148 } 2149 2150 /* Clear the response buffer */ 2151 if (pkt->pkt_rsplen) { 2152 /* Check for FCP commands */ 2153 if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) || 2154 (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) { 2155 iptr = (uint32_t *)pkt->pkt_resp; 2156 iptr[2] = 0; 2157 iptr[3] = 0; 2158 } else { 2159 bzero(pkt->pkt_resp, pkt->pkt_rsplen); 2160 } 2161 } 2162 2163 mutex_exit(&sbp->mtx); 2164 2165 return; 2166 2167 } /* emlxs_initialize_pkt() */ 2168 2169 2170 2171 /* 2172 * We may not need this routine 2173 */ 2174 /*ARGSUSED*/ 2175 extern int 2176 emlxs_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt) 2177 { 2178 emlxs_buf_t *sbp = PKT2PRIV(pkt); 2179 2180 if (!sbp) { 2181 return (FC_FAILURE); 2182 } 2183 2184 if (!(sbp->pkt_flags & PACKET_VALID)) { 2185 return (FC_FAILURE); 2186 } 2187 2188 sbp->pkt_flags &= ~PACKET_VALID; 2189 mutex_destroy(&sbp->mtx); 2190 2191 return (FC_SUCCESS); 2192 2193 } /* emlxs_pkt_uninit() */ 2194 2195 2196 static int 2197 emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr) 2198 { 2199 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2200 emlxs_hba_t *hba = HBA; 2201 int32_t rval; 2202 2203 if (!(port->flag & EMLXS_PORT_BOUND)) { 2204 return (FC_CAP_ERROR); 2205 } 2206 2207 if (strcmp(cap, FC_NODE_WWN) == 0) { 2208 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2209 "fca_get_cap: FC_NODE_WWN"); 2210 2211 bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE)); 2212 rval = FC_CAP_FOUND; 2213 2214 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) { 2215 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2216 "fca_get_cap: FC_LOGIN_PARAMS"); 2217 2218 /* 2219 * We need to turn off CLASS2 support. 2220 * Otherwise, FC transport will use CLASS2 as default class 2221 * and never try with CLASS3. 2222 */ 2223 hba->sparam.cls2.classValid = 0; 2224 2225 bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM)); 2226 2227 rval = FC_CAP_FOUND; 2228 2229 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) { 2230 int32_t *num_bufs; 2231 emlxs_config_t *cfg = &CFG; 2232 2233 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2234 "fca_get_cap: FC_CAP_UNSOL_BUF (%d)", 2235 cfg[CFG_UB_BUFS].current); 2236 2237 num_bufs = (int32_t *)ptr; 2238 2239 /* We multiply by MAX_VPORTS because ULP uses a */ 2240 /* formula to calculate ub bufs from this */ 2241 *num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS); 2242 2243 rval = FC_CAP_FOUND; 2244 2245 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) { 2246 int32_t *size; 2247 2248 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2249 "fca_get_cap: FC_CAP_PAYLOAD_SIZE"); 2250 2251 size = (int32_t *)ptr; 2252 *size = -1; 2253 rval = FC_CAP_FOUND; 2254 2255 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) { 2256 fc_reset_action_t *action; 2257 2258 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2259 "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR"); 2260 2261 action = (fc_reset_action_t *)ptr; 2262 *action = FC_RESET_RETURN_ALL; 2263 rval = FC_CAP_FOUND; 2264 2265 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) { 2266 fc_dma_behavior_t *behavior; 2267 2268 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2269 "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF"); 2270 2271 behavior = (fc_dma_behavior_t *)ptr; 2272 *behavior = FC_ALLOW_STREAMING; 2273 rval = FC_CAP_FOUND; 2274 2275 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) { 2276 fc_fcp_dma_t *fcp_dma; 2277 2278 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2279 "fca_get_cap: FC_CAP_FCP_DMA"); 2280 2281 fcp_dma = (fc_fcp_dma_t *)ptr; 2282 *fcp_dma = FC_DVMA_SPACE; 2283 rval = FC_CAP_FOUND; 2284 2285 } else { 2286 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2287 "fca_get_cap: Unknown capability. [%s]", cap); 2288 2289 rval = FC_CAP_ERROR; 2290 2291 } 2292 2293 return (rval); 2294 2295 } /* emlxs_get_cap() */ 2296 2297 2298 2299 static int 2300 emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr) 2301 { 2302 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2303 2304 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2305 "fca_set_cap: cap=[%s] arg=%p", cap, ptr); 2306 2307 return (FC_CAP_ERROR); 2308 2309 } /* emlxs_set_cap() */ 2310 2311 2312 static opaque_t 2313 emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id) 2314 { 2315 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2316 2317 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2318 "fca_get_device: did=%x", d_id); 2319 2320 return (NULL); 2321 2322 } /* emlxs_get_device() */ 2323 2324 2325 static int32_t 2326 emlxs_notify(opaque_t fca_port_handle, uint32_t cmd) 2327 { 2328 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2329 2330 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x", 2331 cmd); 2332 2333 return (FC_SUCCESS); 2334 2335 } /* emlxs_notify */ 2336 2337 2338 2339 static int 2340 emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf) 2341 { 2342 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2343 emlxs_hba_t *hba = HBA; 2344 uint32_t lilp_length; 2345 2346 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2347 "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf, 2348 port->alpa_map[0], port->alpa_map[1], port->alpa_map[2], 2349 port->alpa_map[3], port->alpa_map[4]); 2350 2351 if (!(port->flag & EMLXS_PORT_BOUND)) { 2352 return (FC_NOMAP); 2353 } 2354 2355 if (hba->topology != TOPOLOGY_LOOP) { 2356 return (FC_NOMAP); 2357 } 2358 2359 /* Check if alpa map is available */ 2360 if (port->alpa_map[0] != 0) { 2361 mapbuf->lilp_magic = MAGIC_LILP; 2362 } else { /* No LILP map available */ 2363 2364 /* Set lilp_magic to MAGIC_LISA and this will */ 2365 /* trigger an ALPA scan in ULP */ 2366 mapbuf->lilp_magic = MAGIC_LISA; 2367 } 2368 2369 mapbuf->lilp_myalpa = port->did; 2370 2371 /* The first byte of the alpa_map is the lilp map length */ 2372 /* Add one to include the lilp length byte itself */ 2373 lilp_length = (uint32_t)port->alpa_map[0] + 1; 2374 2375 /* Make sure the max transfer is 128 bytes */ 2376 if (lilp_length > 128) { 2377 lilp_length = 128; 2378 } 2379 2380 /* We start copying from the lilp_length field */ 2381 /* in order to get a word aligned address */ 2382 bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length, 2383 lilp_length); 2384 2385 return (FC_SUCCESS); 2386 2387 } /* emlxs_get_map() */ 2388 2389 2390 2391 extern int 2392 emlxs_transport(opaque_t fca_port_handle, fc_packet_t *pkt) 2393 { 2394 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2395 emlxs_hba_t *hba = HBA; 2396 emlxs_buf_t *sbp; 2397 uint32_t rval; 2398 uint32_t pkt_flags; 2399 2400 /* Make sure adapter is online */ 2401 if (!(hba->flag & FC_ONLINE_MODE)) { 2402 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 2403 "Adapter offline."); 2404 2405 return (FC_OFFLINE); 2406 } 2407 2408 /* Validate packet */ 2409 sbp = PKT2PRIV(pkt); 2410 2411 /* Make sure ULP was told that the port was online */ 2412 if ((port->ulp_statec == FC_STATE_OFFLINE) && 2413 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2414 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 2415 "Port offline."); 2416 2417 return (FC_OFFLINE); 2418 } 2419 2420 if (sbp->port != port) { 2421 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2422 "Invalid port handle. sbp=%p port=%p flags=%x", sbp, 2423 sbp->port, sbp->pkt_flags); 2424 return (FC_BADPACKET); 2425 } 2426 2427 if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_RETURNED))) { 2428 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2429 "Invalid packet flags. sbp=%p port=%p flags=%x", sbp, 2430 sbp->port, sbp->pkt_flags); 2431 return (FC_BADPACKET); 2432 } 2433 #ifdef SFCT_SUPPORT 2434 if (port->tgt_mode && !sbp->fct_cmd && 2435 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2436 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2437 "Packet blocked. Target mode."); 2438 return (FC_TRANSPORT_ERROR); 2439 } 2440 #endif /* SFCT_SUPPORT */ 2441 2442 #ifdef IDLE_TIMER 2443 emlxs_pm_busy_component(hba); 2444 #endif /* IDLE_TIMER */ 2445 2446 /* Prepare the packet for transport */ 2447 emlxs_initialize_pkt(port, sbp); 2448 2449 /* Save a copy of the pkt flags. */ 2450 /* We will check the polling flag later */ 2451 pkt_flags = sbp->pkt_flags; 2452 2453 /* Send the packet */ 2454 switch (pkt->pkt_tran_type) { 2455 case FC_PKT_FCP_READ: 2456 case FC_PKT_FCP_WRITE: 2457 rval = emlxs_send_fcp_cmd(port, sbp); 2458 break; 2459 2460 case FC_PKT_IP_WRITE: 2461 case FC_PKT_BROADCAST: 2462 rval = emlxs_send_ip(port, sbp); 2463 break; 2464 2465 case FC_PKT_EXCHANGE: 2466 switch (pkt->pkt_cmd_fhdr.type) { 2467 case FC_TYPE_SCSI_FCP: 2468 rval = emlxs_send_fcp_cmd(port, sbp); 2469 break; 2470 2471 case FC_TYPE_FC_SERVICES: 2472 rval = emlxs_send_ct(port, sbp); 2473 break; 2474 2475 #ifdef MENLO_SUPPORT 2476 case EMLXS_MENLO_TYPE: 2477 rval = emlxs_send_menlo(port, sbp); 2478 break; 2479 #endif /* MENLO_SUPPORT */ 2480 2481 default: 2482 rval = emlxs_send_els(port, sbp); 2483 } 2484 break; 2485 2486 case FC_PKT_OUTBOUND: 2487 switch (pkt->pkt_cmd_fhdr.type) { 2488 #ifdef SFCT_SUPPORT 2489 case FC_TYPE_SCSI_FCP: 2490 rval = emlxs_send_fct_status(port, sbp); 2491 break; 2492 2493 case FC_TYPE_BASIC_LS: 2494 rval = emlxs_send_fct_abort(port, sbp); 2495 break; 2496 #endif /* SFCT_SUPPORT */ 2497 2498 case FC_TYPE_FC_SERVICES: 2499 rval = emlxs_send_ct_rsp(port, sbp); 2500 break; 2501 #ifdef MENLO_SUPPORT 2502 case EMLXS_MENLO_TYPE: 2503 rval = emlxs_send_menlo(port, sbp); 2504 break; 2505 #endif /* MENLO_SUPPORT */ 2506 2507 default: 2508 rval = emlxs_send_els_rsp(port, sbp); 2509 } 2510 break; 2511 2512 default: 2513 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2514 "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type); 2515 rval = FC_TRANSPORT_ERROR; 2516 break; 2517 } 2518 2519 /* Check if send was not successful */ 2520 if (rval != FC_SUCCESS) { 2521 /* Return packet to ULP */ 2522 mutex_enter(&sbp->mtx); 2523 sbp->pkt_flags |= PACKET_RETURNED; 2524 mutex_exit(&sbp->mtx); 2525 2526 return (rval); 2527 } 2528 2529 /* Check if this packet should be polled for completion before */ 2530 /* returning. This check must be done with a saved copy of the */ 2531 /* pkt_flags because the packet itself could already be freed from */ 2532 /* memory if it was not polled. */ 2533 if (pkt_flags & PACKET_POLLED) { 2534 emlxs_poll(port, sbp); 2535 } 2536 2537 return (FC_SUCCESS); 2538 2539 } /* emlxs_transport() */ 2540 2541 2542 2543 static void 2544 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp) 2545 { 2546 emlxs_hba_t *hba = HBA; 2547 fc_packet_t *pkt = PRIV2PKT(sbp); 2548 clock_t timeout; 2549 clock_t time; 2550 uint32_t att_bit; 2551 emlxs_ring_t *rp; 2552 2553 mutex_enter(&EMLXS_PORT_LOCK); 2554 hba->io_poll_count++; 2555 mutex_exit(&EMLXS_PORT_LOCK); 2556 2557 /* Check for panic situation */ 2558 if (ddi_in_panic()) { 2559 /* 2560 * In panic situations there will be one thread with 2561 * no interrrupts (hard or soft) and no timers 2562 */ 2563 2564 /* 2565 * We must manually poll everything in this thread 2566 * to keep the driver going. 2567 */ 2568 rp = (emlxs_ring_t *)sbp->ring; 2569 switch (rp->ringno) { 2570 case FC_FCP_RING: 2571 att_bit = HA_R0ATT; 2572 break; 2573 2574 case FC_IP_RING: 2575 att_bit = HA_R1ATT; 2576 break; 2577 2578 case FC_ELS_RING: 2579 att_bit = HA_R2ATT; 2580 break; 2581 2582 case FC_CT_RING: 2583 att_bit = HA_R3ATT; 2584 break; 2585 } 2586 2587 /* Keep polling the chip until our IO is completed */ 2588 /* Driver's timer will not function during panics. */ 2589 /* Therefore, timer checks must be performed manually. */ 2590 (void) drv_getparm(LBOLT, &time); 2591 timeout = time + drv_usectohz(1000000); 2592 while (!(sbp->pkt_flags & PACKET_COMPLETED)) { 2593 emlxs_sli_poll_intr(hba, att_bit); 2594 (void) drv_getparm(LBOLT, &time); 2595 2596 /* Trigger timer checks periodically */ 2597 if (time >= timeout) { 2598 emlxs_timer_checks(hba); 2599 timeout = time + drv_usectohz(1000000); 2600 } 2601 } 2602 } else { 2603 /* Wait for IO completion */ 2604 /* The driver's timer will detect */ 2605 /* any timeout and abort the I/O. */ 2606 mutex_enter(&EMLXS_PKT_LOCK); 2607 while (!(sbp->pkt_flags & PACKET_COMPLETED)) { 2608 cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK); 2609 } 2610 mutex_exit(&EMLXS_PKT_LOCK); 2611 } 2612 2613 /* Check for fcp reset pkt */ 2614 if (sbp->pkt_flags & PACKET_FCP_RESET) { 2615 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) { 2616 /* Flush the IO's on the chipq */ 2617 (void) emlxs_chipq_node_flush(port, 2618 &hba->ring[FC_FCP_RING], sbp->node, sbp); 2619 } else { 2620 /* Flush the IO's on the chipq for this lun */ 2621 (void) emlxs_chipq_lun_flush(port, 2622 sbp->node, sbp->lun, sbp); 2623 } 2624 2625 if (sbp->flush_count == 0) { 2626 emlxs_node_open(port, sbp->node, FC_FCP_RING); 2627 goto done; 2628 } 2629 2630 /* Set the timeout so the flush has time to complete */ 2631 timeout = emlxs_timeout(hba, 60); 2632 (void) drv_getparm(LBOLT, &time); 2633 while ((time < timeout) && sbp->flush_count > 0) { 2634 delay(drv_usectohz(500000)); 2635 (void) drv_getparm(LBOLT, &time); 2636 } 2637 2638 if (sbp->flush_count == 0) { 2639 emlxs_node_open(port, sbp->node, FC_FCP_RING); 2640 goto done; 2641 } 2642 2643 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2644 "sbp=%p flush_count=%d. Waiting...", sbp, 2645 sbp->flush_count); 2646 2647 /* Let's try this one more time */ 2648 2649 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) { 2650 /* Flush the IO's on the chipq */ 2651 (void) emlxs_chipq_node_flush(port, 2652 &hba->ring[FC_FCP_RING], sbp->node, sbp); 2653 } else { 2654 /* Flush the IO's on the chipq for this lun */ 2655 (void) emlxs_chipq_lun_flush(port, 2656 sbp->node, sbp->lun, sbp); 2657 } 2658 2659 /* Reset the timeout so the flush has time to complete */ 2660 timeout = emlxs_timeout(hba, 60); 2661 (void) drv_getparm(LBOLT, &time); 2662 while ((time < timeout) && sbp->flush_count > 0) { 2663 delay(drv_usectohz(500000)); 2664 (void) drv_getparm(LBOLT, &time); 2665 } 2666 2667 if (sbp->flush_count == 0) { 2668 emlxs_node_open(port, sbp->node, FC_FCP_RING); 2669 goto done; 2670 } 2671 2672 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2673 "sbp=%p flush_count=%d. Resetting link.", sbp, 2674 sbp->flush_count); 2675 2676 /* Let's first try to reset the link */ 2677 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 2678 2679 if (sbp->flush_count == 0) { 2680 goto done; 2681 } 2682 2683 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2684 "sbp=%p flush_count=%d. Resetting HBA.", sbp, 2685 sbp->flush_count); 2686 2687 /* If that doesn't work, reset the adapter */ 2688 (void) emlxs_reset(port, FC_FCA_RESET); 2689 2690 if (sbp->flush_count != 0) { 2691 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2692 "sbp=%p flush_count=%d. Giving up.", sbp, 2693 sbp->flush_count); 2694 } 2695 2696 } 2697 /* PACKET_FCP_RESET */ 2698 done: 2699 2700 /* Packet has been declared completed and is now ready to be returned */ 2701 2702 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 2703 emlxs_unswap_pkt(sbp); 2704 #endif /* EMLXS_MODREV2X */ 2705 2706 mutex_enter(&sbp->mtx); 2707 sbp->pkt_flags |= PACKET_RETURNED; 2708 mutex_exit(&sbp->mtx); 2709 2710 mutex_enter(&EMLXS_PORT_LOCK); 2711 hba->io_poll_count--; 2712 mutex_exit(&EMLXS_PORT_LOCK); 2713 2714 /* Make ULP completion callback if required */ 2715 if (pkt->pkt_comp) { 2716 (*pkt->pkt_comp) (pkt); 2717 } 2718 2719 return; 2720 2721 } /* emlxs_poll() */ 2722 2723 2724 static int 2725 emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size, 2726 uint32_t *count, uint32_t type) 2727 { 2728 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2729 emlxs_hba_t *hba = HBA; 2730 2731 char *err = NULL; 2732 emlxs_unsol_buf_t *pool; 2733 emlxs_unsol_buf_t *new_pool; 2734 int32_t i; 2735 int result; 2736 uint32_t free_resv; 2737 uint32_t free; 2738 emlxs_config_t *cfg = &CFG; 2739 fc_unsol_buf_t *ubp; 2740 emlxs_ub_priv_t *ub_priv; 2741 2742 if (port->tgt_mode) { 2743 if (tokens && count) { 2744 bzero(tokens, (sizeof (uint64_t) * (*count))); 2745 } 2746 return (FC_SUCCESS); 2747 } 2748 2749 if (!(port->flag & EMLXS_PORT_BOUND)) { 2750 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2751 "ub_alloc failed: Port not bound! size=%x count=%d " 2752 "type=%x", size, *count, type); 2753 2754 return (FC_FAILURE); 2755 } 2756 2757 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2758 "ub_alloc: size=%x count=%d type=%x", size, *count, type); 2759 2760 if (count && (*count > EMLXS_MAX_UBUFS)) { 2761 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2762 "ub_alloc failed: Too many unsolicted buffers requested. " 2763 "count=%x", *count); 2764 2765 return (FC_FAILURE); 2766 2767 } 2768 2769 if (tokens == NULL) { 2770 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2771 "ub_alloc failed: Token array is NULL."); 2772 2773 return (FC_FAILURE); 2774 } 2775 2776 /* Clear the token array */ 2777 bzero(tokens, (sizeof (uint64_t) * (*count))); 2778 2779 free_resv = 0; 2780 free = *count; 2781 switch (type) { 2782 case FC_TYPE_BASIC_LS: 2783 err = "BASIC_LS"; 2784 break; 2785 case FC_TYPE_EXTENDED_LS: 2786 err = "EXTENDED_LS"; 2787 free = *count / 2; /* Hold 50% for normal use */ 2788 free_resv = *count - free; /* Reserve 50% for RSCN use */ 2789 break; 2790 case FC_TYPE_IS8802: 2791 err = "IS8802"; 2792 break; 2793 case FC_TYPE_IS8802_SNAP: 2794 err = "IS8802_SNAP"; 2795 2796 if (cfg[CFG_NETWORK_ON].current == 0) { 2797 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2798 "ub_alloc failed: IP support is disabled."); 2799 2800 return (FC_FAILURE); 2801 } 2802 break; 2803 case FC_TYPE_SCSI_FCP: 2804 err = "SCSI_FCP"; 2805 break; 2806 case FC_TYPE_SCSI_GPP: 2807 err = "SCSI_GPP"; 2808 break; 2809 case FC_TYPE_HIPP_FP: 2810 err = "HIPP_FP"; 2811 break; 2812 case FC_TYPE_IPI3_MASTER: 2813 err = "IPI3_MASTER"; 2814 break; 2815 case FC_TYPE_IPI3_SLAVE: 2816 err = "IPI3_SLAVE"; 2817 break; 2818 case FC_TYPE_IPI3_PEER: 2819 err = "IPI3_PEER"; 2820 break; 2821 case FC_TYPE_FC_SERVICES: 2822 err = "FC_SERVICES"; 2823 break; 2824 } 2825 2826 mutex_enter(&EMLXS_UB_LOCK); 2827 2828 /* 2829 * Walk through the list of the unsolicited buffers 2830 * for this ddiinst of emlx. 2831 */ 2832 2833 pool = port->ub_pool; 2834 2835 /* 2836 * The emlxs_ub_alloc() can be called more than once with different 2837 * size. We will reject the call if there are 2838 * duplicate size with the same FC-4 type. 2839 */ 2840 while (pool) { 2841 if ((pool->pool_type == type) && 2842 (pool->pool_buf_size == size)) { 2843 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2844 "ub_alloc failed: Unsolicited buffer pool for %s " 2845 "of size 0x%x bytes already exists.", err, size); 2846 2847 result = FC_FAILURE; 2848 goto fail; 2849 } 2850 2851 pool = pool->pool_next; 2852 } 2853 2854 new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t), 2855 KM_SLEEP); 2856 if (new_pool == NULL) { 2857 result = FC_FAILURE; 2858 goto fail; 2859 } 2860 2861 new_pool->pool_next = NULL; 2862 new_pool->pool_type = type; 2863 new_pool->pool_buf_size = size; 2864 new_pool->pool_nentries = *count; 2865 new_pool->pool_available = new_pool->pool_nentries; 2866 new_pool->pool_free = free; 2867 new_pool->pool_free_resv = free_resv; 2868 new_pool->fc_ubufs = 2869 kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP); 2870 2871 if (new_pool->fc_ubufs == NULL) { 2872 kmem_free(new_pool, sizeof (emlxs_unsol_buf_t)); 2873 result = FC_FAILURE; 2874 goto fail; 2875 } 2876 2877 new_pool->pool_first_token = port->ub_count; 2878 new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries; 2879 2880 for (i = 0; i < new_pool->pool_nentries; i++) { 2881 ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i]; 2882 ubp->ub_port_handle = port->ulp_handle; 2883 ubp->ub_token = (uint64_t)((unsigned long)ubp); 2884 ubp->ub_bufsize = size; 2885 ubp->ub_class = FC_TRAN_CLASS3; 2886 ubp->ub_port_private = NULL; 2887 ubp->ub_fca_private = 2888 (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t), 2889 KM_SLEEP); 2890 2891 if (ubp->ub_fca_private == NULL) { 2892 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2893 "ub_alloc failed: Unable to allocate fca_private " 2894 "object."); 2895 2896 result = FC_FAILURE; 2897 goto fail; 2898 } 2899 2900 /* 2901 * Initialize emlxs_ub_priv_t 2902 */ 2903 ub_priv = ubp->ub_fca_private; 2904 ub_priv->ubp = ubp; 2905 ub_priv->port = port; 2906 ub_priv->flags = EMLXS_UB_FREE; 2907 ub_priv->available = 1; 2908 ub_priv->pool = new_pool; 2909 ub_priv->time = 0; 2910 ub_priv->timeout = 0; 2911 ub_priv->token = port->ub_count; 2912 ub_priv->cmd = 0; 2913 2914 /* Allocate the actual buffer */ 2915 ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP); 2916 2917 /* Check if we were not successful */ 2918 if (ubp->ub_buffer == NULL) { 2919 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2920 "ub_alloc failed: Unable to allocate buffer."); 2921 2922 /* Free the private area of the current object */ 2923 kmem_free(ubp->ub_fca_private, 2924 sizeof (emlxs_ub_priv_t)); 2925 2926 result = FC_FAILURE; 2927 goto fail; 2928 } 2929 2930 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 2931 "ub_alloc: buffer=%p token=%x size=%x type=%x ", ubp, 2932 ub_priv->token, ubp->ub_bufsize, type); 2933 2934 tokens[i] = (uint64_t)((unsigned long)ubp); 2935 port->ub_count++; 2936 } 2937 2938 /* Add the pool to the top of the pool list */ 2939 new_pool->pool_prev = NULL; 2940 new_pool->pool_next = port->ub_pool; 2941 2942 if (port->ub_pool) { 2943 port->ub_pool->pool_prev = new_pool; 2944 } 2945 port->ub_pool = new_pool; 2946 2947 /* Set the post counts */ 2948 if (type == FC_TYPE_IS8802_SNAP) { 2949 MAILBOXQ *mbox; 2950 2951 port->ub_post[FC_IP_RING] += new_pool->pool_nentries; 2952 2953 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 2954 MEM_MBOX | MEM_PRI))) { 2955 emlxs_mb_config_farp(hba, (MAILBOX *)mbox); 2956 if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mbox, 2957 MBX_NOWAIT, 0) != MBX_BUSY) { 2958 (void) emlxs_mem_put(hba, MEM_MBOX, 2959 (uint8_t *)mbox); 2960 } 2961 } 2962 port->flag |= EMLXS_PORT_IP_UP; 2963 } else if (type == FC_TYPE_EXTENDED_LS) { 2964 port->ub_post[FC_ELS_RING] += new_pool->pool_nentries; 2965 } else if (type == FC_TYPE_FC_SERVICES) { 2966 port->ub_post[FC_CT_RING] += new_pool->pool_nentries; 2967 } 2968 2969 mutex_exit(&EMLXS_UB_LOCK); 2970 2971 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2972 "%d unsolicited buffers allocated for %s of size 0x%x bytes.", 2973 *count, err, size); 2974 2975 return (FC_SUCCESS); 2976 2977 fail: 2978 2979 /* Clean the pool */ 2980 for (i = 0; tokens[i] != NULL; i++) { 2981 /* Get the buffer object */ 2982 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 2983 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 2984 2985 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 2986 "ub_alloc failed: Freed buffer=%p token=%x size=%x " 2987 "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type); 2988 2989 /* Free the actual buffer */ 2990 kmem_free(ubp->ub_buffer, ubp->ub_bufsize); 2991 2992 /* Free the private area of the buffer object */ 2993 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t)); 2994 2995 tokens[i] = 0; 2996 port->ub_count--; 2997 } 2998 2999 /* Free the array of buffer objects in the pool */ 3000 kmem_free((caddr_t)new_pool->fc_ubufs, 3001 (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries)); 3002 3003 /* Free the pool object */ 3004 kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t)); 3005 3006 mutex_exit(&EMLXS_UB_LOCK); 3007 3008 return (result); 3009 3010 } /* emlxs_ub_alloc() */ 3011 3012 3013 static void 3014 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp) 3015 { 3016 emlxs_hba_t *hba = HBA; 3017 emlxs_ub_priv_t *ub_priv; 3018 fc_packet_t *pkt; 3019 ELS_PKT *els; 3020 uint32_t sid; 3021 3022 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3023 3024 if (hba->state <= FC_LINK_DOWN) { 3025 return; 3026 } 3027 3028 if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) + 3029 sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) { 3030 return; 3031 } 3032 3033 sid = SWAP_DATA24_LO(ubp->ub_frame.s_id); 3034 3035 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg, 3036 "%s dropped: sid=%x. Rejecting.", 3037 emlxs_elscmd_xlate(ub_priv->cmd), sid); 3038 3039 pkt->pkt_tran_type = FC_PKT_OUTBOUND; 3040 pkt->pkt_timeout = (2 * hba->fc_ratov); 3041 3042 if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) { 3043 pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3; 3044 pkt->pkt_tran_flags |= FC_TRAN_CLASS2; 3045 } 3046 3047 /* Build the fc header */ 3048 pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id; 3049 pkt->pkt_cmd_fhdr.r_ctl = 3050 R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL; 3051 pkt->pkt_cmd_fhdr.s_id = SWAP_DATA24_LO(port->did); 3052 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 3053 pkt->pkt_cmd_fhdr.f_ctl = 3054 F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ; 3055 pkt->pkt_cmd_fhdr.seq_id = 0; 3056 pkt->pkt_cmd_fhdr.df_ctl = 0; 3057 pkt->pkt_cmd_fhdr.seq_cnt = 0; 3058 pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff; 3059 pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id; 3060 pkt->pkt_cmd_fhdr.ro = 0; 3061 3062 /* Build the command */ 3063 els = (ELS_PKT *) pkt->pkt_cmd; 3064 els->elsCode = 0x01; 3065 els->un.lsRjt.un.b.lsRjtRsvd0 = 0; 3066 els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3067 els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 3068 els->un.lsRjt.un.b.vendorUnique = 0x02; 3069 3070 /* Send the pkt later in another thread */ 3071 (void) emlxs_pkt_send(pkt, 0); 3072 3073 return; 3074 3075 } /* emlxs_ub_els_reject() */ 3076 3077 extern int 3078 emlxs_ub_release(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[]) 3079 { 3080 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3081 emlxs_hba_t *hba = HBA; 3082 fc_unsol_buf_t *ubp; 3083 emlxs_ub_priv_t *ub_priv; 3084 uint32_t i; 3085 uint32_t time; 3086 emlxs_unsol_buf_t *pool; 3087 3088 if (count == 0) { 3089 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3090 "ub_release: Nothing to do. count=%d", count); 3091 3092 return (FC_SUCCESS); 3093 } 3094 3095 if (!(port->flag & EMLXS_PORT_BOUND)) { 3096 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3097 "ub_release failed: Port not bound. count=%d token[0]=%p", 3098 count, tokens[0]); 3099 3100 return (FC_UNBOUND); 3101 } 3102 3103 mutex_enter(&EMLXS_UB_LOCK); 3104 3105 if (!port->ub_pool) { 3106 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3107 "ub_release failed: No pools! count=%d token[0]=%p", 3108 count, tokens[0]); 3109 3110 mutex_exit(&EMLXS_UB_LOCK); 3111 return (FC_UB_BADTOKEN); 3112 } 3113 3114 for (i = 0; i < count; i++) { 3115 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3116 3117 if (!ubp) { 3118 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3119 "ub_release failed: count=%d tokens[%d]=0", count, 3120 i); 3121 3122 mutex_exit(&EMLXS_UB_LOCK); 3123 return (FC_UB_BADTOKEN); 3124 } 3125 3126 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3127 3128 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) { 3129 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3130 "ub_release failed: Dead buffer found. ubp=%p", 3131 ubp); 3132 3133 mutex_exit(&EMLXS_UB_LOCK); 3134 return (FC_UB_BADTOKEN); 3135 } 3136 3137 if (ub_priv->flags == EMLXS_UB_FREE) { 3138 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3139 "ub_release: Buffer already free! ubp=%p token=%x", 3140 ubp, ub_priv->token); 3141 3142 continue; 3143 } 3144 3145 /* Check for dropped els buffer */ 3146 /* ULP will do this sometimes without sending a reply */ 3147 if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) && 3148 !(ub_priv->flags & EMLXS_UB_REPLY)) { 3149 emlxs_ub_els_reject(port, ubp); 3150 } 3151 3152 /* Mark the buffer free */ 3153 ub_priv->flags = EMLXS_UB_FREE; 3154 bzero(ubp->ub_buffer, ubp->ub_bufsize); 3155 3156 time = hba->timer_tics - ub_priv->time; 3157 ub_priv->time = 0; 3158 ub_priv->timeout = 0; 3159 3160 pool = ub_priv->pool; 3161 3162 if (ub_priv->flags & EMLXS_UB_RESV) { 3163 pool->pool_free_resv++; 3164 } else { 3165 pool->pool_free++; 3166 } 3167 3168 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3169 "ub_release: ubp=%p token=%x time=%d av=%d (%d,%d,%d,%d)", 3170 ubp, ub_priv->token, time, ub_priv->available, 3171 pool->pool_nentries, pool->pool_available, 3172 pool->pool_free, pool->pool_free_resv); 3173 3174 /* Check if pool can be destroyed now */ 3175 if ((pool->pool_available == 0) && 3176 (pool->pool_free + pool->pool_free_resv == 3177 pool->pool_nentries)) { 3178 emlxs_ub_destroy(port, pool); 3179 } 3180 } 3181 3182 mutex_exit(&EMLXS_UB_LOCK); 3183 3184 return (FC_SUCCESS); 3185 3186 } /* emlxs_ub_release() */ 3187 3188 3189 static int 3190 emlxs_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[]) 3191 { 3192 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3193 emlxs_unsol_buf_t *pool; 3194 fc_unsol_buf_t *ubp; 3195 emlxs_ub_priv_t *ub_priv; 3196 uint32_t i; 3197 3198 if (port->tgt_mode) { 3199 return (FC_SUCCESS); 3200 } 3201 3202 if (count == 0) { 3203 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3204 "ub_free: Nothing to do. count=%d token[0]=%p", count, 3205 tokens[0]); 3206 3207 return (FC_SUCCESS); 3208 } 3209 3210 if (!(port->flag & EMLXS_PORT_BOUND)) { 3211 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3212 "ub_free: Port not bound. count=%d token[0]=%p", count, 3213 tokens[0]); 3214 3215 return (FC_SUCCESS); 3216 } 3217 3218 mutex_enter(&EMLXS_UB_LOCK); 3219 3220 if (!port->ub_pool) { 3221 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3222 "ub_free failed: No pools! count=%d token[0]=%p", count, 3223 tokens[0]); 3224 3225 mutex_exit(&EMLXS_UB_LOCK); 3226 return (FC_UB_BADTOKEN); 3227 } 3228 3229 /* Process buffer list */ 3230 for (i = 0; i < count; i++) { 3231 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3232 3233 if (!ubp) { 3234 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3235 "ub_free failed: count=%d tokens[%d]=0", count, 3236 i); 3237 3238 mutex_exit(&EMLXS_UB_LOCK); 3239 return (FC_UB_BADTOKEN); 3240 } 3241 3242 /* Mark buffer unavailable */ 3243 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3244 3245 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) { 3246 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3247 "ub_free failed: Dead buffer found. ubp=%p", ubp); 3248 3249 mutex_exit(&EMLXS_UB_LOCK); 3250 return (FC_UB_BADTOKEN); 3251 } 3252 3253 ub_priv->available = 0; 3254 3255 /* Mark one less buffer available in the parent pool */ 3256 pool = ub_priv->pool; 3257 3258 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3259 "ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp, 3260 ub_priv->token, pool->pool_nentries, 3261 pool->pool_available - 1, pool->pool_free, 3262 pool->pool_free_resv); 3263 3264 if (pool->pool_available) { 3265 pool->pool_available--; 3266 3267 /* Check if pool can be destroyed */ 3268 if ((pool->pool_available == 0) && 3269 (pool->pool_free + pool->pool_free_resv == 3270 pool->pool_nentries)) { 3271 emlxs_ub_destroy(port, pool); 3272 } 3273 } 3274 } 3275 3276 mutex_exit(&EMLXS_UB_LOCK); 3277 3278 return (FC_SUCCESS); 3279 3280 } /* emlxs_ub_free() */ 3281 3282 3283 /* EMLXS_UB_LOCK must be held when calling this routine */ 3284 extern void 3285 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool) 3286 { 3287 emlxs_unsol_buf_t *next; 3288 emlxs_unsol_buf_t *prev; 3289 fc_unsol_buf_t *ubp; 3290 uint32_t i; 3291 3292 /* Remove the pool object from the pool list */ 3293 next = pool->pool_next; 3294 prev = pool->pool_prev; 3295 3296 if (port->ub_pool == pool) { 3297 port->ub_pool = next; 3298 } 3299 3300 if (prev) { 3301 prev->pool_next = next; 3302 } 3303 3304 if (next) { 3305 next->pool_prev = prev; 3306 } 3307 3308 pool->pool_prev = NULL; 3309 pool->pool_next = NULL; 3310 3311 /* Clear the post counts */ 3312 switch (pool->pool_type) { 3313 case FC_TYPE_IS8802_SNAP: 3314 port->ub_post[FC_IP_RING] -= pool->pool_nentries; 3315 break; 3316 3317 case FC_TYPE_EXTENDED_LS: 3318 port->ub_post[FC_ELS_RING] -= pool->pool_nentries; 3319 break; 3320 3321 case FC_TYPE_FC_SERVICES: 3322 port->ub_post[FC_CT_RING] -= pool->pool_nentries; 3323 break; 3324 } 3325 3326 /* Now free the pool memory */ 3327 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3328 "ub_destroy: pool=%p type=%d size=%d count=%d", pool, 3329 pool->pool_type, pool->pool_buf_size, pool->pool_nentries); 3330 3331 /* Process the array of buffer objects in the pool */ 3332 for (i = 0; i < pool->pool_nentries; i++) { 3333 /* Get the buffer object */ 3334 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i]; 3335 3336 /* Free the memory the buffer object represents */ 3337 kmem_free(ubp->ub_buffer, ubp->ub_bufsize); 3338 3339 /* Free the private area of the buffer object */ 3340 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t)); 3341 } 3342 3343 /* Free the array of buffer objects in the pool */ 3344 kmem_free((caddr_t)pool->fc_ubufs, 3345 (sizeof (fc_unsol_buf_t)*pool->pool_nentries)); 3346 3347 /* Free the pool object */ 3348 kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t)); 3349 3350 return; 3351 3352 } /* emlxs_ub_destroy() */ 3353 3354 3355 /*ARGSUSED*/ 3356 extern int 3357 emlxs_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep) 3358 { 3359 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3360 emlxs_hba_t *hba = HBA; 3361 3362 emlxs_buf_t *sbp; 3363 NODELIST *nlp; 3364 NODELIST *prev_nlp; 3365 uint8_t ringno; 3366 RING *rp; 3367 clock_t timeout; 3368 clock_t time; 3369 int32_t pkt_ret; 3370 IOCBQ *iocbq; 3371 IOCBQ *next; 3372 IOCBQ *prev; 3373 uint32_t found; 3374 uint32_t att_bit; 3375 uint32_t pass = 0; 3376 3377 sbp = (emlxs_buf_t *)pkt->pkt_fca_private; 3378 iocbq = &sbp->iocbq; 3379 nlp = (NODELIST *)sbp->node; 3380 rp = (RING *)sbp->ring; 3381 ringno = (rp) ? rp->ringno : 0; 3382 3383 if (!(port->flag & EMLXS_PORT_BOUND)) { 3384 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3385 "Port not bound."); 3386 return (FC_UNBOUND); 3387 } 3388 3389 if (!(hba->flag & FC_ONLINE_MODE)) { 3390 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3391 "Adapter offline."); 3392 return (FC_OFFLINE); 3393 } 3394 3395 /* ULP requires the aborted pkt to be completed */ 3396 /* back to ULP before returning from this call. */ 3397 /* SUN knows of problems with this call so they suggested that we */ 3398 /* always return a FC_FAILURE for this call, until it is worked out. */ 3399 3400 /* Check if pkt is no good */ 3401 if (!(sbp->pkt_flags & PACKET_VALID) || 3402 (sbp->pkt_flags & PACKET_RETURNED)) { 3403 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3404 "Bad sbp. flags=%x", sbp->pkt_flags); 3405 return (FC_FAILURE); 3406 } 3407 3408 /* Tag this now */ 3409 /* This will prevent any thread except ours from completing it */ 3410 mutex_enter(&sbp->mtx); 3411 3412 /* Check again if we still own this */ 3413 if (!(sbp->pkt_flags & PACKET_VALID) || 3414 (sbp->pkt_flags & PACKET_RETURNED)) { 3415 mutex_exit(&sbp->mtx); 3416 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3417 "Bad sbp. flags=%x", sbp->pkt_flags); 3418 return (FC_FAILURE); 3419 } 3420 3421 /* Check if pkt is a real polled command */ 3422 if (!(sbp->pkt_flags & PACKET_IN_ABORT) && 3423 (sbp->pkt_flags & PACKET_POLLED)) { 3424 mutex_exit(&sbp->mtx); 3425 3426 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3427 "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp, 3428 sbp->pkt_flags); 3429 return (FC_FAILURE); 3430 } 3431 3432 sbp->pkt_flags |= PACKET_POLLED; 3433 sbp->pkt_flags |= PACKET_IN_ABORT; 3434 3435 if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH | 3436 PACKET_IN_TIMEOUT)) { 3437 mutex_exit(&sbp->mtx); 3438 3439 /* Do nothing, pkt already on its way out */ 3440 goto done; 3441 } 3442 3443 mutex_exit(&sbp->mtx); 3444 3445 begin: 3446 pass++; 3447 3448 mutex_enter(&EMLXS_RINGTX_LOCK); 3449 3450 if (sbp->pkt_flags & PACKET_IN_TXQ) { 3451 /* Find it on the queue */ 3452 found = 0; 3453 if (iocbq->flag & IOCB_PRIORITY) { 3454 /* Search the priority queue */ 3455 prev = NULL; 3456 next = (IOCBQ *) nlp->nlp_ptx[ringno].q_first; 3457 3458 while (next) { 3459 if (next == iocbq) { 3460 /* Remove it */ 3461 if (prev) { 3462 prev->next = iocbq->next; 3463 } 3464 3465 if (nlp->nlp_ptx[ringno].q_last == 3466 (void *)iocbq) { 3467 nlp->nlp_ptx[ringno].q_last = 3468 (void *)prev; 3469 } 3470 3471 if (nlp->nlp_ptx[ringno].q_first == 3472 (void *)iocbq) { 3473 nlp->nlp_ptx[ringno].q_first = 3474 (void *)iocbq->next; 3475 } 3476 3477 nlp->nlp_ptx[ringno].q_cnt--; 3478 iocbq->next = NULL; 3479 found = 1; 3480 break; 3481 } 3482 3483 prev = next; 3484 next = next->next; 3485 } 3486 } else { 3487 /* Search the normal queue */ 3488 prev = NULL; 3489 next = (IOCBQ *) nlp->nlp_tx[ringno].q_first; 3490 3491 while (next) { 3492 if (next == iocbq) { 3493 /* Remove it */ 3494 if (prev) { 3495 prev->next = iocbq->next; 3496 } 3497 3498 if (nlp->nlp_tx[ringno].q_last == 3499 (void *)iocbq) { 3500 nlp->nlp_tx[ringno].q_last = 3501 (void *)prev; 3502 } 3503 3504 if (nlp->nlp_tx[ringno].q_first == 3505 (void *)iocbq) { 3506 nlp->nlp_tx[ringno].q_first = 3507 (void *)iocbq->next; 3508 } 3509 3510 nlp->nlp_tx[ringno].q_cnt--; 3511 iocbq->next = NULL; 3512 found = 1; 3513 break; 3514 } 3515 3516 prev = next; 3517 next = (IOCBQ *) next->next; 3518 } 3519 } 3520 3521 if (!found) { 3522 mutex_exit(&EMLXS_RINGTX_LOCK); 3523 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3524 "I/O not found in driver. sbp=%p flags=%x", sbp, 3525 sbp->pkt_flags); 3526 goto done; 3527 } 3528 3529 /* Check if node still needs servicing */ 3530 if ((nlp->nlp_ptx[ringno].q_first) || 3531 (nlp->nlp_tx[ringno].q_first && 3532 !(nlp->nlp_flag[ringno] & NLP_CLOSED))) { 3533 3534 /* 3535 * If this is the base node, 3536 * then don't shift the pointers 3537 */ 3538 /* We want to drain the base node before moving on */ 3539 if (!nlp->nlp_base) { 3540 /* Just shift ring queue */ 3541 /* pointers to next node */ 3542 rp->nodeq.q_last = (void *) nlp; 3543 rp->nodeq.q_first = nlp->nlp_next[ringno]; 3544 } 3545 } else { 3546 /* Remove node from ring queue */ 3547 3548 /* If this is the only node on list */ 3549 if (rp->nodeq.q_first == (void *)nlp && 3550 rp->nodeq.q_last == (void *)nlp) { 3551 rp->nodeq.q_last = NULL; 3552 rp->nodeq.q_first = NULL; 3553 rp->nodeq.q_cnt = 0; 3554 } else if (rp->nodeq.q_first == (void *)nlp) { 3555 rp->nodeq.q_first = nlp->nlp_next[ringno]; 3556 ((NODELIST *) rp->nodeq.q_last)-> 3557 nlp_next[ringno] = rp->nodeq.q_first; 3558 rp->nodeq.q_cnt--; 3559 } else { 3560 /* 3561 * This is a little more difficult find the 3562 * previous node in the circular ring queue 3563 */ 3564 prev_nlp = nlp; 3565 while (prev_nlp->nlp_next[ringno] != nlp) { 3566 prev_nlp = prev_nlp->nlp_next[ringno]; 3567 } 3568 3569 prev_nlp->nlp_next[ringno] = 3570 nlp->nlp_next[ringno]; 3571 3572 if (rp->nodeq.q_last == (void *)nlp) { 3573 rp->nodeq.q_last = (void *)prev_nlp; 3574 } 3575 rp->nodeq.q_cnt--; 3576 3577 } 3578 3579 /* Clear node */ 3580 nlp->nlp_next[ringno] = NULL; 3581 } 3582 3583 mutex_enter(&sbp->mtx); 3584 3585 if (sbp->pkt_flags & PACKET_IN_TXQ) { 3586 sbp->pkt_flags &= ~PACKET_IN_TXQ; 3587 hba->ring_tx_count[ringno]--; 3588 } 3589 3590 mutex_exit(&sbp->mtx); 3591 3592 /* Free the ulpIoTag and the bmp */ 3593 (void) emlxs_unregister_pkt(rp, sbp->iotag, 0); 3594 3595 mutex_exit(&EMLXS_RINGTX_LOCK); 3596 3597 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 3598 IOERR_ABORT_REQUESTED, 1); 3599 3600 goto done; 3601 } 3602 3603 mutex_exit(&EMLXS_RINGTX_LOCK); 3604 3605 3606 /* Check the chip queue */ 3607 mutex_enter(&EMLXS_FCTAB_LOCK(ringno)); 3608 3609 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) && 3610 !(sbp->pkt_flags & PACKET_XRI_CLOSED) && 3611 (sbp == rp->fc_table[sbp->iotag])) { 3612 3613 /* Create the abort IOCB */ 3614 if (hba->state >= FC_LINK_UP) { 3615 iocbq = 3616 emlxs_create_abort_xri_cn(port, sbp->node, 3617 sbp->iotag, rp, sbp->class, ABORT_TYPE_ABTS); 3618 3619 mutex_enter(&sbp->mtx); 3620 sbp->pkt_flags |= PACKET_XRI_CLOSED; 3621 sbp->ticks = 3622 hba->timer_tics + (4 * hba->fc_ratov) + 10; 3623 sbp->abort_attempts++; 3624 mutex_exit(&sbp->mtx); 3625 } else { 3626 iocbq = 3627 emlxs_create_close_xri_cn(port, sbp->node, 3628 sbp->iotag, rp); 3629 3630 mutex_enter(&sbp->mtx); 3631 sbp->pkt_flags |= PACKET_XRI_CLOSED; 3632 sbp->ticks = hba->timer_tics + 30; 3633 sbp->abort_attempts++; 3634 mutex_exit(&sbp->mtx); 3635 } 3636 3637 mutex_exit(&EMLXS_FCTAB_LOCK(ringno)); 3638 3639 /* Send this iocbq */ 3640 if (iocbq) { 3641 emlxs_sli_issue_iocb_cmd(hba, rp, iocbq); 3642 iocbq = NULL; 3643 } 3644 3645 goto done; 3646 } 3647 3648 mutex_exit(&EMLXS_FCTAB_LOCK(ringno)); 3649 3650 /* Pkt was not on any queues */ 3651 3652 /* Check again if we still own this */ 3653 if (!(sbp->pkt_flags & PACKET_VALID) || 3654 (sbp->pkt_flags & 3655 (PACKET_RETURNED | PACKET_IN_COMPLETION | 3656 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) { 3657 goto done; 3658 } 3659 3660 /* Apparently the pkt was not found. Let's delay and try again */ 3661 if (pass < 5) { 3662 delay(drv_usectohz(5000000)); /* 5 seconds */ 3663 3664 /* Check again if we still own this */ 3665 if (!(sbp->pkt_flags & PACKET_VALID) || 3666 (sbp->pkt_flags & 3667 (PACKET_RETURNED | PACKET_IN_COMPLETION | 3668 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) { 3669 goto done; 3670 } 3671 3672 goto begin; 3673 } 3674 3675 force_it: 3676 3677 /* Force the completion now */ 3678 3679 /* Unregister the pkt */ 3680 (void) emlxs_unregister_pkt(rp, sbp->iotag, 1); 3681 3682 /* Now complete it */ 3683 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED, 3684 1); 3685 3686 done: 3687 3688 /* Now wait for the pkt to complete */ 3689 if (!(sbp->pkt_flags & PACKET_COMPLETED)) { 3690 /* Set thread timeout */ 3691 timeout = emlxs_timeout(hba, 30); 3692 3693 /* Check for panic situation */ 3694 if (ddi_in_panic()) { 3695 3696 /* 3697 * In panic situations there will be one thread with no 3698 * interrrupts (hard or soft) and no timers 3699 */ 3700 3701 /* 3702 * We must manually poll everything in this thread 3703 * to keep the driver going. 3704 */ 3705 3706 rp = (emlxs_ring_t *)sbp->ring; 3707 switch (rp->ringno) { 3708 case FC_FCP_RING: 3709 att_bit = HA_R0ATT; 3710 break; 3711 3712 case FC_IP_RING: 3713 att_bit = HA_R1ATT; 3714 break; 3715 3716 case FC_ELS_RING: 3717 att_bit = HA_R2ATT; 3718 break; 3719 3720 case FC_CT_RING: 3721 att_bit = HA_R3ATT; 3722 break; 3723 } 3724 3725 /* Keep polling the chip until our IO is completed */ 3726 (void) drv_getparm(LBOLT, &time); 3727 while ((time < timeout) && 3728 !(sbp->pkt_flags & PACKET_COMPLETED)) { 3729 emlxs_sli_poll_intr(hba, att_bit); 3730 (void) drv_getparm(LBOLT, &time); 3731 } 3732 } else { 3733 /* Wait for IO completion or timeout */ 3734 mutex_enter(&EMLXS_PKT_LOCK); 3735 pkt_ret = 0; 3736 while ((pkt_ret != -1) && 3737 !(sbp->pkt_flags & PACKET_COMPLETED)) { 3738 pkt_ret = 3739 cv_timedwait(&EMLXS_PKT_CV, 3740 &EMLXS_PKT_LOCK, timeout); 3741 } 3742 mutex_exit(&EMLXS_PKT_LOCK); 3743 } 3744 3745 /* Check if timeout occured. This is not good. */ 3746 /* Something happened to our IO. */ 3747 if (!(sbp->pkt_flags & PACKET_COMPLETED)) { 3748 /* Force the completion now */ 3749 goto force_it; 3750 } 3751 } 3752 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 3753 emlxs_unswap_pkt(sbp); 3754 #endif /* EMLXS_MODREV2X */ 3755 3756 /* Check again if we still own this */ 3757 if ((sbp->pkt_flags & PACKET_VALID) && 3758 !(sbp->pkt_flags & PACKET_RETURNED)) { 3759 mutex_enter(&sbp->mtx); 3760 if ((sbp->pkt_flags & PACKET_VALID) && 3761 !(sbp->pkt_flags & PACKET_RETURNED)) { 3762 sbp->pkt_flags |= PACKET_RETURNED; 3763 } 3764 mutex_exit(&sbp->mtx); 3765 } 3766 #ifdef ULP_PATCH5 3767 return (FC_FAILURE); 3768 3769 #else 3770 return (FC_SUCCESS); 3771 3772 #endif /* ULP_PATCH5 */ 3773 3774 3775 } /* emlxs_pkt_abort() */ 3776 3777 3778 extern int32_t 3779 emlxs_reset(opaque_t fca_port_handle, uint32_t cmd) 3780 { 3781 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3782 emlxs_hba_t *hba = HBA; 3783 int rval; 3784 int ret; 3785 clock_t timeout; 3786 3787 if (!(port->flag & EMLXS_PORT_BOUND)) { 3788 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3789 "fca_reset failed. Port not bound."); 3790 3791 return (FC_UNBOUND); 3792 } 3793 3794 switch (cmd) { 3795 case FC_FCA_LINK_RESET: 3796 3797 if (!(hba->flag & FC_ONLINE_MODE) || 3798 (hba->state <= FC_LINK_DOWN)) { 3799 return (FC_SUCCESS); 3800 } 3801 3802 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3803 "fca_reset: Resetting Link."); 3804 3805 mutex_enter(&EMLXS_LINKUP_LOCK); 3806 hba->linkup_wait_flag = TRUE; 3807 mutex_exit(&EMLXS_LINKUP_LOCK); 3808 3809 if (emlxs_reset_link(hba, 1)) { 3810 mutex_enter(&EMLXS_LINKUP_LOCK); 3811 hba->linkup_wait_flag = FALSE; 3812 mutex_exit(&EMLXS_LINKUP_LOCK); 3813 3814 return (FC_FAILURE); 3815 } 3816 3817 mutex_enter(&EMLXS_LINKUP_LOCK); 3818 timeout = emlxs_timeout(hba, 60); 3819 ret = 0; 3820 while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) { 3821 ret = 3822 cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK, 3823 timeout); 3824 } 3825 3826 hba->linkup_wait_flag = FALSE; 3827 mutex_exit(&EMLXS_LINKUP_LOCK); 3828 3829 if (ret == -1) { 3830 return (FC_FAILURE); 3831 } 3832 3833 return (FC_SUCCESS); 3834 3835 case FC_FCA_CORE: 3836 #ifdef DUMP_SUPPORT 3837 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3838 "fca_reset: Core dump."); 3839 3840 /* Schedule a USER dump */ 3841 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0); 3842 3843 /* Wait for dump to complete */ 3844 emlxs_dump_wait(hba); 3845 3846 return (FC_SUCCESS); 3847 #endif /* DUMP_SUPPORT */ 3848 3849 case FC_FCA_RESET: 3850 case FC_FCA_RESET_CORE: 3851 3852 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3853 "fca_reset: Resetting Adapter."); 3854 3855 rval = FC_SUCCESS; 3856 3857 if (emlxs_offline(hba) == 0) { 3858 (void) emlxs_online(hba); 3859 } else { 3860 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3861 "fca_reset: Adapter reset failed. Device busy."); 3862 3863 rval = FC_DEVICE_BUSY; 3864 } 3865 3866 return (rval); 3867 3868 default: 3869 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3870 "fca_reset: Unknown command. cmd=%x", cmd); 3871 3872 break; 3873 } 3874 3875 return (FC_FAILURE); 3876 3877 } /* emlxs_reset() */ 3878 3879 3880 extern uint32_t emlxs_core_dump(emlxs_hba_t *hba, char *buffer, 3881 uint32_t size); 3882 extern uint32_t emlxs_core_size(emlxs_hba_t *hba); 3883 3884 extern int 3885 emlxs_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm) 3886 { 3887 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3888 emlxs_hba_t *hba = HBA; 3889 int32_t ret; 3890 emlxs_vpd_t *vpd = &VPD; 3891 3892 3893 ret = FC_SUCCESS; 3894 3895 if (!(port->flag & EMLXS_PORT_BOUND)) { 3896 return (FC_UNBOUND); 3897 } 3898 3899 3900 #ifdef IDLE_TIMER 3901 emlxs_pm_busy_component(hba); 3902 #endif /* IDLE_TIMER */ 3903 3904 switch (pm->pm_cmd_code) { 3905 3906 case FC_PORT_GET_FW_REV: 3907 { 3908 char buffer[128]; 3909 3910 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3911 "fca_port_manage: FC_PORT_GET_FW_REV"); 3912 3913 (void) sprintf(buffer, "%s %s", hba->model_info.model, 3914 vpd->fw_version); 3915 bzero(pm->pm_data_buf, pm->pm_data_len); 3916 3917 if (pm->pm_data_len < strlen(buffer) + 1) { 3918 ret = FC_NOMEM; 3919 3920 break; 3921 } 3922 3923 (void) strcpy(pm->pm_data_buf, buffer); 3924 break; 3925 } 3926 3927 case FC_PORT_GET_FCODE_REV: 3928 { 3929 char buffer[128]; 3930 3931 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3932 "fca_port_manage: FC_PORT_GET_FCODE_REV"); 3933 3934 /* Force update here just to be sure */ 3935 emlxs_get_fcode_version(hba); 3936 3937 (void) sprintf(buffer, "%s %s", hba->model_info.model, 3938 vpd->fcode_version); 3939 bzero(pm->pm_data_buf, pm->pm_data_len); 3940 3941 if (pm->pm_data_len < strlen(buffer) + 1) { 3942 ret = FC_NOMEM; 3943 break; 3944 } 3945 3946 (void) strcpy(pm->pm_data_buf, buffer); 3947 break; 3948 } 3949 3950 case FC_PORT_GET_DUMP_SIZE: 3951 { 3952 #ifdef DUMP_SUPPORT 3953 uint32_t dump_size = 0; 3954 3955 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3956 "fca_port_manage: FC_PORT_GET_DUMP_SIZE"); 3957 3958 if (pm->pm_data_len < sizeof (uint32_t)) { 3959 ret = FC_NOMEM; 3960 break; 3961 } 3962 3963 (void) emlxs_get_dump(hba, NULL, &dump_size); 3964 3965 *((uint32_t *)pm->pm_data_buf) = dump_size; 3966 3967 #else 3968 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3969 "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported."); 3970 3971 #endif /* DUMP_SUPPORT */ 3972 3973 break; 3974 } 3975 3976 case FC_PORT_GET_DUMP: 3977 { 3978 #ifdef DUMP_SUPPORT 3979 uint32_t dump_size = 0; 3980 3981 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3982 "fca_port_manage: FC_PORT_GET_DUMP"); 3983 3984 (void) emlxs_get_dump(hba, NULL, &dump_size); 3985 3986 if (pm->pm_data_len < dump_size) { 3987 ret = FC_NOMEM; 3988 break; 3989 } 3990 3991 (void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf, 3992 (uint32_t *)&dump_size); 3993 #else 3994 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3995 "fca_port_manage: FC_PORT_GET_DUMP unsupported."); 3996 3997 #endif /* DUMP_SUPPORT */ 3998 3999 break; 4000 } 4001 4002 case FC_PORT_FORCE_DUMP: 4003 { 4004 #ifdef DUMP_SUPPORT 4005 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4006 "fca_port_manage: FC_PORT_FORCE_DUMP"); 4007 4008 /* Schedule a USER dump */ 4009 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0); 4010 4011 /* Wait for dump to complete */ 4012 emlxs_dump_wait(hba); 4013 #else 4014 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4015 "fca_port_manage: FC_PORT_FORCE_DUMP unsupported."); 4016 4017 #endif /* DUMP_SUPPORT */ 4018 break; 4019 } 4020 4021 case FC_PORT_LINK_STATE: 4022 { 4023 uint32_t *link_state; 4024 4025 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4026 "fca_port_manage: FC_PORT_LINK_STATE"); 4027 4028 if (pm->pm_stat_len != sizeof (*link_state)) { 4029 ret = FC_NOMEM; 4030 break; 4031 } 4032 4033 if (pm->pm_cmd_buf != NULL) { 4034 /* 4035 * Can't look beyond the FCA port. 4036 */ 4037 ret = FC_INVALID_REQUEST; 4038 break; 4039 } 4040 4041 link_state = (uint32_t *)pm->pm_stat_buf; 4042 4043 /* Set the state */ 4044 if (hba->state >= FC_LINK_UP) { 4045 /* Check for loop topology */ 4046 if (hba->topology == TOPOLOGY_LOOP) { 4047 *link_state = FC_STATE_LOOP; 4048 } else { 4049 *link_state = FC_STATE_ONLINE; 4050 } 4051 4052 /* Set the link speed */ 4053 switch (hba->linkspeed) { 4054 case LA_2GHZ_LINK: 4055 *link_state |= FC_STATE_2GBIT_SPEED; 4056 break; 4057 case LA_4GHZ_LINK: 4058 *link_state |= FC_STATE_4GBIT_SPEED; 4059 break; 4060 case LA_8GHZ_LINK: 4061 *link_state |= FC_STATE_8GBIT_SPEED; 4062 break; 4063 case LA_10GHZ_LINK: 4064 *link_state |= FC_STATE_10GBIT_SPEED; 4065 break; 4066 case LA_1GHZ_LINK: 4067 default: 4068 *link_state |= FC_STATE_1GBIT_SPEED; 4069 break; 4070 } 4071 } else { 4072 *link_state = FC_STATE_OFFLINE; 4073 } 4074 4075 break; 4076 } 4077 4078 4079 case FC_PORT_ERR_STATS: 4080 case FC_PORT_RLS: 4081 { 4082 MAILBOX *mb; 4083 fc_rls_acc_t *bp; 4084 4085 if (!(hba->flag & FC_ONLINE_MODE)) { 4086 return (FC_OFFLINE); 4087 } 4088 4089 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4090 "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS"); 4091 4092 if (pm->pm_data_len < sizeof (fc_rls_acc_t)) { 4093 ret = FC_NOMEM; 4094 break; 4095 } 4096 4097 if ((mb = (MAILBOX *)emlxs_mem_get(hba, 4098 MEM_MBOX | MEM_PRI)) == 0) { 4099 ret = FC_NOMEM; 4100 break; 4101 } 4102 4103 emlxs_mb_read_lnk_stat(hba, mb); 4104 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) 4105 != MBX_SUCCESS) { 4106 ret = FC_PBUSY; 4107 } else { 4108 bp = (fc_rls_acc_t *)pm->pm_data_buf; 4109 4110 bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt; 4111 bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt; 4112 bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt; 4113 bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt; 4114 bp->rls_invalid_word = 4115 mb->un.varRdLnk.invalidXmitWord; 4116 bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt; 4117 } 4118 4119 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb); 4120 break; 4121 } 4122 4123 case FC_PORT_DOWNLOAD_FW: 4124 if (!(hba->flag & FC_ONLINE_MODE)) { 4125 return (FC_OFFLINE); 4126 } 4127 4128 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4129 "fca_port_manage: FC_PORT_DOWNLOAD_FW"); 4130 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4131 pm->pm_data_len, 1); 4132 break; 4133 4134 case FC_PORT_DOWNLOAD_FCODE: 4135 if (!(hba->flag & FC_ONLINE_MODE)) { 4136 return (FC_OFFLINE); 4137 } 4138 4139 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4140 "fca_port_manage: FC_PORT_DOWNLOAD_FCODE"); 4141 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4142 pm->pm_data_len, 1); 4143 break; 4144 4145 case FC_PORT_DIAG: 4146 { 4147 uint32_t errno = 0; 4148 uint32_t did = 0; 4149 uint32_t pattern = 0; 4150 4151 switch (pm->pm_cmd_flags) { 4152 case EMLXS_DIAG_BIU: 4153 4154 if (!(hba->flag & FC_ONLINE_MODE)) { 4155 return (FC_OFFLINE); 4156 } 4157 4158 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4159 "fca_port_manage: EMLXS_DIAG_BIU"); 4160 4161 if (pm->pm_data_len) { 4162 pattern = *((uint32_t *)pm->pm_data_buf); 4163 } 4164 4165 errno = emlxs_diag_biu_run(hba, pattern); 4166 4167 if (pm->pm_stat_len == sizeof (errno)) { 4168 *(int *)pm->pm_stat_buf = errno; 4169 } 4170 4171 break; 4172 4173 4174 case EMLXS_DIAG_POST: 4175 4176 if (!(hba->flag & FC_ONLINE_MODE)) { 4177 return (FC_OFFLINE); 4178 } 4179 4180 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4181 "fca_port_manage: EMLXS_DIAG_POST"); 4182 4183 errno = emlxs_diag_post_run(hba); 4184 4185 if (pm->pm_stat_len == sizeof (errno)) { 4186 *(int *)pm->pm_stat_buf = errno; 4187 } 4188 4189 break; 4190 4191 4192 case EMLXS_DIAG_ECHO: 4193 4194 if (!(hba->flag & FC_ONLINE_MODE)) { 4195 return (FC_OFFLINE); 4196 } 4197 4198 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4199 "fca_port_manage: EMLXS_DIAG_ECHO"); 4200 4201 if (pm->pm_cmd_len != sizeof (uint32_t)) { 4202 ret = FC_INVALID_REQUEST; 4203 break; 4204 } 4205 4206 did = *((uint32_t *)pm->pm_cmd_buf); 4207 4208 if (pm->pm_data_len) { 4209 pattern = *((uint32_t *)pm->pm_data_buf); 4210 } 4211 4212 errno = emlxs_diag_echo_run(port, did, pattern); 4213 4214 if (pm->pm_stat_len == sizeof (errno)) { 4215 *(int *)pm->pm_stat_buf = errno; 4216 } 4217 4218 break; 4219 4220 4221 case EMLXS_PARM_GET_NUM: 4222 { 4223 uint32_t *num; 4224 emlxs_config_t *cfg; 4225 uint32_t i; 4226 uint32_t count; 4227 4228 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4229 "fca_port_manage: EMLXS_PARM_GET_NUM"); 4230 4231 if (pm->pm_stat_len < sizeof (uint32_t)) { 4232 ret = FC_NOMEM; 4233 break; 4234 } 4235 4236 num = (uint32_t *)pm->pm_stat_buf; 4237 count = 0; 4238 cfg = &CFG; 4239 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4240 if (!(cfg->flags & PARM_HIDDEN)) { 4241 count++; 4242 } 4243 4244 } 4245 4246 *num = count; 4247 4248 break; 4249 } 4250 4251 case EMLXS_PARM_GET_LIST: 4252 { 4253 emlxs_parm_t *parm; 4254 emlxs_config_t *cfg; 4255 uint32_t i; 4256 uint32_t max_count; 4257 4258 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4259 "fca_port_manage: EMLXS_PARM_GET_LIST"); 4260 4261 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4262 ret = FC_NOMEM; 4263 break; 4264 } 4265 4266 max_count = pm->pm_stat_len / sizeof (emlxs_parm_t); 4267 4268 parm = (emlxs_parm_t *)pm->pm_stat_buf; 4269 cfg = &CFG; 4270 for (i = 0; i < NUM_CFG_PARAM && max_count; i++, 4271 cfg++) { 4272 if (!(cfg->flags & PARM_HIDDEN)) { 4273 (void) strcpy(parm->label, cfg->string); 4274 parm->min = cfg->low; 4275 parm->max = cfg->hi; 4276 parm->def = cfg->def; 4277 parm->current = cfg->current; 4278 parm->flags = cfg->flags; 4279 (void) strcpy(parm->help, cfg->help); 4280 parm++; 4281 max_count--; 4282 } 4283 } 4284 4285 break; 4286 } 4287 4288 case EMLXS_PARM_GET: 4289 { 4290 emlxs_parm_t *parm_in; 4291 emlxs_parm_t *parm_out; 4292 emlxs_config_t *cfg; 4293 uint32_t i; 4294 uint32_t len; 4295 4296 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) { 4297 EMLXS_MSGF(EMLXS_CONTEXT, 4298 &emlxs_sfs_debug_msg, 4299 "fca_port_manage: EMLXS_PARM_GET. " 4300 "inbuf too small."); 4301 4302 ret = FC_BADCMD; 4303 break; 4304 } 4305 4306 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4307 EMLXS_MSGF(EMLXS_CONTEXT, 4308 &emlxs_sfs_debug_msg, 4309 "fca_port_manage: EMLXS_PARM_GET. " 4310 "outbuf too small"); 4311 4312 ret = FC_BADCMD; 4313 break; 4314 } 4315 4316 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf; 4317 parm_out = (emlxs_parm_t *)pm->pm_stat_buf; 4318 len = strlen(parm_in->label); 4319 cfg = &CFG; 4320 ret = FC_BADOBJECT; 4321 4322 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4323 "fca_port_manage: EMLXS_PARM_GET: %s", 4324 parm_in->label); 4325 4326 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4327 if (len == strlen(cfg->string) && 4328 (strcmp(parm_in->label, 4329 cfg->string) == 0)) { 4330 (void) strcpy(parm_out->label, 4331 cfg->string); 4332 parm_out->min = cfg->low; 4333 parm_out->max = cfg->hi; 4334 parm_out->def = cfg->def; 4335 parm_out->current = cfg->current; 4336 parm_out->flags = cfg->flags; 4337 (void) strcpy(parm_out->help, 4338 cfg->help); 4339 4340 ret = FC_SUCCESS; 4341 break; 4342 } 4343 } 4344 4345 break; 4346 } 4347 4348 case EMLXS_PARM_SET: 4349 { 4350 emlxs_parm_t *parm_in; 4351 emlxs_parm_t *parm_out; 4352 emlxs_config_t *cfg; 4353 uint32_t i; 4354 uint32_t len; 4355 4356 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) { 4357 EMLXS_MSGF(EMLXS_CONTEXT, 4358 &emlxs_sfs_debug_msg, 4359 "fca_port_manage: EMLXS_PARM_GET. " 4360 "inbuf too small."); 4361 4362 ret = FC_BADCMD; 4363 break; 4364 } 4365 4366 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4367 EMLXS_MSGF(EMLXS_CONTEXT, 4368 &emlxs_sfs_debug_msg, 4369 "fca_port_manage: EMLXS_PARM_GET. " 4370 "outbuf too small"); 4371 ret = FC_BADCMD; 4372 break; 4373 } 4374 4375 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf; 4376 parm_out = (emlxs_parm_t *)pm->pm_stat_buf; 4377 len = strlen(parm_in->label); 4378 cfg = &CFG; 4379 ret = FC_BADOBJECT; 4380 4381 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4382 "fca_port_manage: EMLXS_PARM_SET: %s=0x%x,%d", 4383 parm_in->label, parm_in->current, 4384 parm_in->current); 4385 4386 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4387 /* Find matching parameter string */ 4388 if (len == strlen(cfg->string) && 4389 (strcmp(parm_in->label, 4390 cfg->string) == 0)) { 4391 /* Attempt to update parameter */ 4392 if (emlxs_set_parm(hba, i, 4393 parm_in->current) == FC_SUCCESS) { 4394 (void) strcpy(parm_out->label, 4395 cfg->string); 4396 parm_out->min = cfg->low; 4397 parm_out->max = cfg->hi; 4398 parm_out->def = cfg->def; 4399 parm_out->current = 4400 cfg->current; 4401 parm_out->flags = cfg->flags; 4402 (void) strcpy(parm_out->help, 4403 cfg->help); 4404 4405 ret = FC_SUCCESS; 4406 } 4407 4408 break; 4409 } 4410 } 4411 4412 break; 4413 } 4414 4415 case EMLXS_LOG_GET: 4416 { 4417 emlxs_log_req_t *req; 4418 emlxs_log_resp_t *resp; 4419 uint32_t len; 4420 4421 /* Check command size */ 4422 if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) { 4423 ret = FC_BADCMD; 4424 break; 4425 } 4426 4427 /* Get the request */ 4428 req = (emlxs_log_req_t *)pm->pm_cmd_buf; 4429 4430 /* Calculate the response length from the request */ 4431 len = sizeof (emlxs_log_resp_t) + 4432 (req->count * MAX_LOG_MSG_LENGTH); 4433 4434 /* Check the response buffer length */ 4435 if (pm->pm_stat_len < len) { 4436 ret = FC_BADCMD; 4437 break; 4438 } 4439 4440 /* Get the response pointer */ 4441 resp = (emlxs_log_resp_t *)pm->pm_stat_buf; 4442 4443 /* Get the request log enties */ 4444 (void) emlxs_msg_log_get(hba, req, resp); 4445 4446 ret = FC_SUCCESS; 4447 break; 4448 } 4449 4450 case EMLXS_GET_BOOT_REV: 4451 { 4452 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4453 "fca_port_manage: EMLXS_GET_BOOT_REV"); 4454 4455 if (pm->pm_stat_len < strlen(vpd->boot_version)) { 4456 ret = FC_NOMEM; 4457 break; 4458 } 4459 4460 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4461 (void) sprintf(pm->pm_stat_buf, "%s %s", 4462 hba->model_info.model, vpd->boot_version); 4463 4464 break; 4465 } 4466 4467 case EMLXS_DOWNLOAD_BOOT: 4468 if (!(hba->flag & FC_ONLINE_MODE)) { 4469 return (FC_OFFLINE); 4470 } 4471 4472 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4473 "fca_port_manage: EMLXS_DOWNLOAD_BOOT"); 4474 4475 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4476 pm->pm_data_len, 1); 4477 break; 4478 4479 case EMLXS_DOWNLOAD_CFL: 4480 { 4481 uint32_t *buffer; 4482 uint32_t region; 4483 uint32_t length; 4484 4485 if (!(hba->flag & FC_ONLINE_MODE)) { 4486 return (FC_OFFLINE); 4487 } 4488 4489 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4490 "fca_port_manage: EMLXS_DOWNLOAD_CFL"); 4491 4492 /* Extract the region number from the first word. */ 4493 buffer = (uint32_t *)pm->pm_data_buf; 4494 region = *buffer++; 4495 4496 /* Adjust the image length for the header word */ 4497 length = pm->pm_data_len - 4; 4498 4499 ret = 4500 emlxs_cfl_download(hba, region, (caddr_t)buffer, 4501 length); 4502 break; 4503 } 4504 4505 case EMLXS_VPD_GET: 4506 { 4507 emlxs_vpd_desc_t *vpd_out; 4508 4509 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4510 "fca_port_manage: EMLXS_VPD_GET"); 4511 4512 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) { 4513 ret = FC_BADCMD; 4514 break; 4515 } 4516 4517 vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf; 4518 bzero(vpd_out, sizeof (emlxs_vpd_desc_t)); 4519 4520 (void) strncpy(vpd_out->id, vpd->id, 4521 sizeof (vpd_out->id)); 4522 (void) strncpy(vpd_out->part_num, vpd->part_num, 4523 sizeof (vpd_out->part_num)); 4524 (void) strncpy(vpd_out->eng_change, vpd->eng_change, 4525 sizeof (vpd_out->eng_change)); 4526 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer, 4527 sizeof (vpd_out->manufacturer)); 4528 (void) strncpy(vpd_out->serial_num, vpd->serial_num, 4529 sizeof (vpd_out->serial_num)); 4530 (void) strncpy(vpd_out->model, vpd->model, 4531 sizeof (vpd_out->model)); 4532 (void) strncpy(vpd_out->model_desc, vpd->model_desc, 4533 sizeof (vpd_out->model_desc)); 4534 (void) strncpy(vpd_out->port_num, vpd->port_num, 4535 sizeof (vpd_out->port_num)); 4536 (void) strncpy(vpd_out->prog_types, vpd->prog_types, 4537 sizeof (vpd_out->prog_types)); 4538 4539 ret = FC_SUCCESS; 4540 4541 break; 4542 } 4543 4544 case EMLXS_GET_FCIO_REV: 4545 { 4546 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4547 "fca_port_manage: EMLXS_GET_FCIO_REV"); 4548 4549 if (pm->pm_stat_len < sizeof (uint32_t)) { 4550 ret = FC_NOMEM; 4551 break; 4552 } 4553 4554 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4555 *(uint32_t *)pm->pm_stat_buf = FCIO_REV; 4556 4557 break; 4558 } 4559 4560 case EMLXS_GET_DFC_REV: 4561 { 4562 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4563 "fca_port_manage: EMLXS_GET_DFC_REV"); 4564 4565 if (pm->pm_stat_len < sizeof (uint32_t)) { 4566 ret = FC_NOMEM; 4567 break; 4568 } 4569 4570 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4571 *(uint32_t *)pm->pm_stat_buf = DFC_REV; 4572 4573 break; 4574 } 4575 4576 case EMLXS_SET_BOOT_STATE: 4577 case EMLXS_SET_BOOT_STATE_old: 4578 { 4579 uint32_t state; 4580 4581 if (!(hba->flag & FC_ONLINE_MODE)) { 4582 return (FC_OFFLINE); 4583 } 4584 4585 if (pm->pm_cmd_len < sizeof (uint32_t)) { 4586 EMLXS_MSGF(EMLXS_CONTEXT, 4587 &emlxs_sfs_debug_msg, 4588 "fca_port_manage: EMLXS_SET_BOOT_STATE"); 4589 ret = FC_BADCMD; 4590 break; 4591 } 4592 4593 state = *(uint32_t *)pm->pm_cmd_buf; 4594 4595 if (state == 0) { 4596 EMLXS_MSGF(EMLXS_CONTEXT, 4597 &emlxs_sfs_debug_msg, 4598 "fca_port_manage: EMLXS_SET_BOOT_STATE: " 4599 "Disable"); 4600 ret = emlxs_boot_code_disable(hba); 4601 } else { 4602 EMLXS_MSGF(EMLXS_CONTEXT, 4603 &emlxs_sfs_debug_msg, 4604 "fca_port_manage: EMLXS_SET_BOOT_STATE: " 4605 "Enable"); 4606 ret = emlxs_boot_code_enable(hba); 4607 } 4608 4609 break; 4610 } 4611 4612 case EMLXS_GET_BOOT_STATE: 4613 case EMLXS_GET_BOOT_STATE_old: 4614 { 4615 if (!(hba->flag & FC_ONLINE_MODE)) { 4616 return (FC_OFFLINE); 4617 } 4618 4619 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4620 "fca_port_manage: EMLXS_GET_BOOT_STATE"); 4621 4622 if (pm->pm_stat_len < sizeof (uint32_t)) { 4623 ret = FC_NOMEM; 4624 break; 4625 } 4626 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4627 4628 ret = emlxs_boot_code_state(hba); 4629 4630 if (ret == FC_SUCCESS) { 4631 *(uint32_t *)pm->pm_stat_buf = 1; 4632 ret = FC_SUCCESS; 4633 } else if (ret == FC_FAILURE) { 4634 ret = FC_SUCCESS; 4635 } 4636 4637 break; 4638 } 4639 4640 case EMLXS_HW_ERROR_TEST: 4641 { 4642 if (!(hba->flag & FC_ONLINE_MODE)) { 4643 return (FC_OFFLINE); 4644 } 4645 4646 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4647 "fca_port_manage: EMLXS_HW_ERROR_TEST"); 4648 4649 /* Trigger a mailbox timeout */ 4650 hba->mbox_timer = hba->timer_tics; 4651 4652 break; 4653 } 4654 4655 case EMLXS_TEST_CODE: 4656 { 4657 uint32_t *cmd; 4658 4659 if (!(hba->flag & FC_ONLINE_MODE)) { 4660 return (FC_OFFLINE); 4661 } 4662 4663 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4664 "fca_port_manage: EMLXS_TEST_CODE"); 4665 4666 if (pm->pm_cmd_len < sizeof (uint32_t)) { 4667 EMLXS_MSGF(EMLXS_CONTEXT, 4668 &emlxs_sfs_debug_msg, 4669 "fca_port_manage: EMLXS_TEST_CODE. " 4670 "inbuf to small."); 4671 4672 ret = FC_BADCMD; 4673 break; 4674 } 4675 4676 cmd = (uint32_t *)pm->pm_cmd_buf; 4677 4678 ret = emlxs_test(hba, cmd[0], 4679 (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]); 4680 4681 break; 4682 } 4683 4684 default: 4685 4686 ret = FC_INVALID_REQUEST; 4687 break; 4688 } 4689 4690 break; 4691 4692 } 4693 4694 case FC_PORT_INITIALIZE: 4695 if (!(hba->flag & FC_ONLINE_MODE)) { 4696 return (FC_OFFLINE); 4697 } 4698 4699 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4700 "fca_port_manage: FC_PORT_INITIALIZE"); 4701 break; 4702 4703 case FC_PORT_LOOPBACK: 4704 if (!(hba->flag & FC_ONLINE_MODE)) { 4705 return (FC_OFFLINE); 4706 } 4707 4708 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4709 "fca_port_manage: FC_PORT_LOOPBACK"); 4710 break; 4711 4712 case FC_PORT_BYPASS: 4713 if (!(hba->flag & FC_ONLINE_MODE)) { 4714 return (FC_OFFLINE); 4715 } 4716 4717 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4718 "fca_port_manage: FC_PORT_BYPASS"); 4719 ret = FC_INVALID_REQUEST; 4720 break; 4721 4722 case FC_PORT_UNBYPASS: 4723 if (!(hba->flag & FC_ONLINE_MODE)) { 4724 return (FC_OFFLINE); 4725 } 4726 4727 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4728 "fca_port_manage: FC_PORT_UNBYPASS"); 4729 ret = FC_INVALID_REQUEST; 4730 break; 4731 4732 case FC_PORT_GET_NODE_ID: 4733 { 4734 fc_rnid_t *rnid; 4735 4736 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4737 "fca_port_manage: FC_PORT_GET_NODE_ID"); 4738 4739 bzero(pm->pm_data_buf, pm->pm_data_len); 4740 4741 if (pm->pm_data_len < sizeof (fc_rnid_t)) { 4742 ret = FC_NOMEM; 4743 break; 4744 } 4745 4746 rnid = (fc_rnid_t *)pm->pm_data_buf; 4747 4748 (void) sprintf((char *)rnid->global_id, 4749 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", 4750 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, 4751 hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0], 4752 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3], 4753 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 4754 4755 rnid->unit_type = RNID_HBA; 4756 rnid->port_id = port->did; 4757 rnid->ip_version = RNID_IPV4; 4758 4759 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4760 "GET_NODE_ID: wwpn: %s", rnid->global_id); 4761 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4762 "GET_NODE_ID: unit_type: 0x%x", rnid->unit_type); 4763 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4764 "GET_NODE_ID: port_id: 0x%x", rnid->port_id); 4765 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4766 "GET_NODE_ID: num_attach: %d", rnid->num_attached); 4767 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4768 "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version); 4769 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4770 "GET_NODE_ID: udp_port: 0x%x", rnid->udp_port); 4771 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4772 "GET_NODE_ID: ip_addr: %s", rnid->ip_addr); 4773 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4774 "GET_NODE_ID: resv: 0x%x", rnid->specific_id_resv); 4775 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4776 "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags); 4777 4778 ret = FC_SUCCESS; 4779 break; 4780 } 4781 4782 case FC_PORT_SET_NODE_ID: 4783 { 4784 fc_rnid_t *rnid; 4785 4786 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4787 "fca_port_manage: FC_PORT_SET_NODE_ID"); 4788 4789 if (pm->pm_data_len < sizeof (fc_rnid_t)) { 4790 ret = FC_NOMEM; 4791 break; 4792 } 4793 4794 rnid = (fc_rnid_t *)pm->pm_data_buf; 4795 4796 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4797 "SET_NODE_ID: wwpn: %s", rnid->global_id); 4798 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4799 "SET_NODE_ID: unit_type: 0x%x", rnid->unit_type); 4800 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4801 "SET_NODE_ID: port_id: 0x%x", rnid->port_id); 4802 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4803 "SET_NODE_ID: num_attach: %d", rnid->num_attached); 4804 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4805 "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version); 4806 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4807 "SET_NODE_ID: udp_port: 0x%x", rnid->udp_port); 4808 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4809 "SET_NODE_ID: ip_addr: %s", rnid->ip_addr); 4810 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4811 "SET_NODE_ID: resv: 0x%x", rnid->specific_id_resv); 4812 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4813 "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags); 4814 4815 ret = FC_SUCCESS; 4816 break; 4817 } 4818 4819 default: 4820 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4821 "fca_port_manage: code=%x", pm->pm_cmd_code); 4822 ret = FC_INVALID_REQUEST; 4823 break; 4824 4825 } 4826 4827 return (ret); 4828 4829 } /* emlxs_port_manage() */ 4830 4831 4832 /*ARGSUSED*/ 4833 static uint32_t 4834 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args, 4835 uint32_t *arg) 4836 { 4837 uint32_t rval = 0; 4838 emlxs_port_t *port = &PPORT; 4839 4840 switch (test_code) { 4841 #ifdef TEST_SUPPORT 4842 case 1: /* SCSI underrun */ 4843 { 4844 hba->underrun_counter = (args)? arg[0]:1; 4845 break; 4846 } 4847 #endif /* TEST_SUPPORT */ 4848 4849 default: 4850 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4851 "emlxs_test: Unsupported test code. (0x%x)", test_code); 4852 rval = FC_INVALID_REQUEST; 4853 } 4854 4855 return (rval); 4856 4857 } /* emlxs_test() */ 4858 4859 4860 /* 4861 * Given the device number, return the devinfo pointer or the ddiinst number. 4862 * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even 4863 * before attach. 4864 * 4865 * Translate "dev_t" to a pointer to the associated "dev_info_t". 4866 */ 4867 /*ARGSUSED*/ 4868 static int 4869 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 4870 { 4871 emlxs_hba_t *hba; 4872 int32_t ddiinst; 4873 4874 ddiinst = getminor((dev_t)arg); 4875 4876 switch (infocmd) { 4877 case DDI_INFO_DEVT2DEVINFO: 4878 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 4879 if (hba) 4880 *result = hba->dip; 4881 else 4882 *result = NULL; 4883 break; 4884 4885 case DDI_INFO_DEVT2INSTANCE: 4886 *result = (void *)((unsigned long)ddiinst); 4887 break; 4888 4889 default: 4890 return (DDI_FAILURE); 4891 } 4892 4893 return (DDI_SUCCESS); 4894 4895 } /* emlxs_info() */ 4896 4897 4898 static int32_t 4899 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level) 4900 { 4901 emlxs_hba_t *hba; 4902 emlxs_port_t *port; 4903 int32_t ddiinst; 4904 int rval = DDI_SUCCESS; 4905 4906 ddiinst = ddi_get_instance(dip); 4907 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 4908 port = &PPORT; 4909 4910 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4911 "fca_power: comp=%x level=%x", comp, level); 4912 4913 if (hba == NULL || comp != EMLXS_PM_ADAPTER) { 4914 return (DDI_FAILURE); 4915 } 4916 4917 mutex_enter(&hba->pm_lock); 4918 4919 /* If we are already at the proper level then return success */ 4920 if (hba->pm_level == level) { 4921 mutex_exit(&hba->pm_lock); 4922 return (DDI_SUCCESS); 4923 } 4924 4925 switch (level) { 4926 case EMLXS_PM_ADAPTER_UP: 4927 4928 /* 4929 * If we are already in emlxs_attach, 4930 * let emlxs_hba_attach take care of things 4931 */ 4932 if (hba->pm_state & EMLXS_PM_IN_ATTACH) { 4933 hba->pm_level = EMLXS_PM_ADAPTER_UP; 4934 break; 4935 } 4936 4937 /* Check if adapter is suspended */ 4938 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 4939 hba->pm_level = EMLXS_PM_ADAPTER_UP; 4940 4941 /* Try to resume the port */ 4942 rval = emlxs_hba_resume(dip); 4943 4944 if (rval != DDI_SUCCESS) { 4945 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 4946 } 4947 break; 4948 } 4949 4950 /* Set adapter up */ 4951 hba->pm_level = EMLXS_PM_ADAPTER_UP; 4952 break; 4953 4954 case EMLXS_PM_ADAPTER_DOWN: 4955 4956 4957 /* 4958 * If we are already in emlxs_detach, 4959 * let emlxs_hba_detach take care of things 4960 */ 4961 if (hba->pm_state & EMLXS_PM_IN_DETACH) { 4962 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 4963 break; 4964 } 4965 4966 /* Check if adapter is not suspended */ 4967 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) { 4968 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 4969 4970 /* Try to suspend the port */ 4971 rval = emlxs_hba_suspend(dip); 4972 4973 if (rval != DDI_SUCCESS) { 4974 hba->pm_level = EMLXS_PM_ADAPTER_UP; 4975 } 4976 4977 break; 4978 } 4979 4980 /* Set adapter down */ 4981 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 4982 break; 4983 4984 default: 4985 rval = DDI_FAILURE; 4986 break; 4987 4988 } 4989 4990 mutex_exit(&hba->pm_lock); 4991 4992 return (rval); 4993 4994 } /* emlxs_power() */ 4995 4996 4997 #ifdef EMLXS_I386 4998 #ifdef S11 4999 /* 5000 * quiesce(9E) entry point. 5001 * 5002 * This function is called when the system is single-thread at hight PIL 5003 * with preemption disabled. Therefore, this function must not be blocked. 5004 * 5005 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 5006 * DDI_FAILURE indicates an eerror condition and should almost never happen. 5007 */ 5008 static int 5009 emlxs_quiesce(dev_info_t *dip) 5010 { 5011 emlxs_hba_t *hba; 5012 emlxs_port_t *port; 5013 int32_t ddiinst; 5014 int rval = DDI_SUCCESS; 5015 5016 ddiinst = ddi_get_instance(dip); 5017 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5018 port = &PPORT; 5019 5020 prom_printf("emlxs%d: emlxs_quiesce...\n ", ddiinst); 5021 5022 if (hba == NULL || port == NULL) { 5023 return (DDI_FAILURE); 5024 } 5025 5026 if (emlxs_sli_hba_reset(hba, 0, 0) == 0) { 5027 return (rval); 5028 } else { 5029 return (DDI_FAILURE); 5030 } 5031 5032 } /* emlxs_quiesce */ 5033 #endif 5034 #endif /* EMLXS_I386 */ 5035 5036 5037 static int 5038 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p) 5039 { 5040 emlxs_hba_t *hba; 5041 emlxs_port_t *port; 5042 int ddiinst; 5043 5044 ddiinst = getminor(*dev_p); 5045 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5046 5047 if (hba == NULL) { 5048 return (ENXIO); 5049 } 5050 5051 port = &PPORT; 5052 5053 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5054 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5055 "open failed: Driver suspended."); 5056 return (ENXIO); 5057 } 5058 5059 if (otype != OTYP_CHR) { 5060 return (EINVAL); 5061 } 5062 5063 if (drv_priv(cred_p)) { 5064 return (EPERM); 5065 } 5066 5067 mutex_enter(&EMLXS_IOCTL_LOCK); 5068 5069 if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) { 5070 mutex_exit(&EMLXS_IOCTL_LOCK); 5071 return (EBUSY); 5072 } 5073 5074 if (flag & FEXCL) { 5075 if (hba->ioctl_flags & EMLXS_OPEN) { 5076 mutex_exit(&EMLXS_IOCTL_LOCK); 5077 return (EBUSY); 5078 } 5079 5080 hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE; 5081 } 5082 5083 hba->ioctl_flags |= EMLXS_OPEN; 5084 5085 mutex_exit(&EMLXS_IOCTL_LOCK); 5086 5087 return (0); 5088 5089 } /* emlxs_open() */ 5090 5091 5092 /*ARGSUSED*/ 5093 static int 5094 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p) 5095 { 5096 emlxs_hba_t *hba; 5097 int ddiinst; 5098 5099 ddiinst = getminor(dev); 5100 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5101 5102 if (hba == NULL) { 5103 return (ENXIO); 5104 } 5105 5106 if (otype != OTYP_CHR) { 5107 return (EINVAL); 5108 } 5109 5110 mutex_enter(&EMLXS_IOCTL_LOCK); 5111 5112 if (!(hba->ioctl_flags & EMLXS_OPEN)) { 5113 mutex_exit(&EMLXS_IOCTL_LOCK); 5114 return (ENODEV); 5115 } 5116 5117 hba->ioctl_flags &= ~EMLXS_OPEN; 5118 hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE; 5119 5120 mutex_exit(&EMLXS_IOCTL_LOCK); 5121 5122 return (0); 5123 5124 } /* emlxs_close() */ 5125 5126 5127 /*ARGSUSED*/ 5128 static int 5129 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode, 5130 cred_t *cred_p, int32_t *rval_p) 5131 { 5132 emlxs_hba_t *hba; 5133 emlxs_port_t *port; 5134 int rval = 0; /* return code */ 5135 int ddiinst; 5136 5137 ddiinst = getminor(dev); 5138 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5139 5140 if (hba == NULL) { 5141 return (ENXIO); 5142 } 5143 5144 port = &PPORT; 5145 5146 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5147 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5148 "ioctl failed: Driver suspended."); 5149 5150 return (ENXIO); 5151 } 5152 5153 mutex_enter(&EMLXS_IOCTL_LOCK); 5154 if (!(hba->ioctl_flags & EMLXS_OPEN)) { 5155 mutex_exit(&EMLXS_IOCTL_LOCK); 5156 return (ENXIO); 5157 } 5158 mutex_exit(&EMLXS_IOCTL_LOCK); 5159 5160 #ifdef IDLE_TIMER 5161 emlxs_pm_busy_component(hba); 5162 #endif /* IDLE_TIMER */ 5163 5164 switch (cmd) { 5165 #ifdef DFC_SUPPORT 5166 case EMLXS_DFC_COMMAND: 5167 rval = emlxs_dfc_manage(hba, (void *)arg, mode); 5168 break; 5169 #endif /* DFC_SUPPORT */ 5170 5171 default: 5172 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5173 "ioctl: Invalid command received. cmd=%x", cmd); 5174 rval = EINVAL; 5175 } 5176 5177 done: 5178 return (rval); 5179 5180 } /* emlxs_ioctl() */ 5181 5182 5183 5184 /* 5185 * 5186 * Device Driver Common Routines 5187 * 5188 */ 5189 5190 /* emlxs_pm_lock must be held for this call */ 5191 static int 5192 emlxs_hba_resume(dev_info_t *dip) 5193 { 5194 emlxs_hba_t *hba; 5195 emlxs_port_t *port; 5196 int ddiinst; 5197 5198 ddiinst = ddi_get_instance(dip); 5199 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5200 port = &PPORT; 5201 5202 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL); 5203 5204 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) { 5205 return (DDI_SUCCESS); 5206 } 5207 5208 hba->pm_state &= ~EMLXS_PM_SUSPENDED; 5209 5210 /* Take the adapter online */ 5211 if (emlxs_power_up(hba)) { 5212 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg, 5213 "Unable to take adapter online."); 5214 5215 hba->pm_state |= EMLXS_PM_SUSPENDED; 5216 5217 return (DDI_FAILURE); 5218 } 5219 5220 return (DDI_SUCCESS); 5221 5222 } /* emlxs_hba_resume() */ 5223 5224 5225 /* emlxs_pm_lock must be held for this call */ 5226 static int 5227 emlxs_hba_suspend(dev_info_t *dip) 5228 { 5229 emlxs_hba_t *hba; 5230 emlxs_port_t *port; 5231 int ddiinst; 5232 5233 ddiinst = ddi_get_instance(dip); 5234 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5235 port = &PPORT; 5236 5237 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL); 5238 5239 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5240 return (DDI_SUCCESS); 5241 } 5242 5243 hba->pm_state |= EMLXS_PM_SUSPENDED; 5244 5245 /* Take the adapter offline */ 5246 if (emlxs_power_down(hba)) { 5247 hba->pm_state &= ~EMLXS_PM_SUSPENDED; 5248 5249 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg, 5250 "Unable to take adapter offline."); 5251 5252 return (DDI_FAILURE); 5253 } 5254 5255 return (DDI_SUCCESS); 5256 5257 } /* emlxs_hba_suspend() */ 5258 5259 5260 5261 static void 5262 emlxs_lock_init(emlxs_hba_t *hba) 5263 { 5264 emlxs_port_t *port = &PPORT; 5265 int32_t ddiinst; 5266 char buf[64]; 5267 uint32_t i; 5268 5269 ddiinst = hba->ddiinst; 5270 5271 /* Initialize the power management */ 5272 (void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst); 5273 mutex_init(&hba->pm_lock, buf, MUTEX_DRIVER, (void *)hba->intr_arg); 5274 5275 (void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst); 5276 mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER, 5277 (void *)hba->intr_arg); 5278 5279 (void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst); 5280 cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL); 5281 5282 (void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst); 5283 mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER, 5284 (void *)hba->intr_arg); 5285 5286 (void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst); 5287 mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER, 5288 (void *)hba->intr_arg); 5289 5290 (void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst); 5291 cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL); 5292 5293 (void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst); 5294 mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER, 5295 (void *)hba->intr_arg); 5296 5297 (void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst); 5298 cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL); 5299 5300 (void) sprintf(buf, "%s%d_ring_tx_lock mutex", DRIVER_NAME, ddiinst); 5301 mutex_init(&EMLXS_RINGTX_LOCK, buf, MUTEX_DRIVER, 5302 (void *)hba->intr_arg); 5303 5304 for (i = 0; i < MAX_RINGS; i++) { 5305 (void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex", DRIVER_NAME, 5306 ddiinst, i); 5307 mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER, 5308 (void *)hba->intr_arg); 5309 5310 (void) sprintf(buf, "%s%d_fctab%d_lock mutex", DRIVER_NAME, 5311 ddiinst, i); 5312 mutex_init(&EMLXS_FCTAB_LOCK(i), buf, MUTEX_DRIVER, 5313 (void *)hba->intr_arg); 5314 } 5315 5316 (void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst); 5317 mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER, 5318 (void *)hba->intr_arg); 5319 5320 (void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst); 5321 mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER, 5322 (void *)hba->intr_arg); 5323 5324 (void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst); 5325 mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER, 5326 (void *)hba->intr_arg); 5327 5328 #ifdef DUMP_SUPPORT 5329 (void) sprintf(buf, "%s%d_dump mutex", DRIVER_NAME, ddiinst); 5330 mutex_init(&EMLXS_DUMP_LOCK, buf, MUTEX_DRIVER, 5331 (void *)hba->intr_arg); 5332 #endif /* DUMP_SUPPORT */ 5333 5334 /* Create per port locks */ 5335 for (i = 0; i < MAX_VPORTS; i++) { 5336 port = &VPORT(i); 5337 5338 rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL); 5339 5340 if (i == 0) { 5341 (void) sprintf(buf, "%s%d_pkt_lock mutex", DRIVER_NAME, 5342 ddiinst); 5343 mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER, 5344 (void *)hba->intr_arg); 5345 5346 (void) sprintf(buf, "%s%d_pkt_lock cv", DRIVER_NAME, 5347 ddiinst); 5348 cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL); 5349 5350 (void) sprintf(buf, "%s%d_ub_lock mutex", DRIVER_NAME, 5351 ddiinst); 5352 mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER, 5353 (void *)hba->intr_arg); 5354 } else { 5355 (void) sprintf(buf, "%s%d.%d_pkt_lock mutex", 5356 DRIVER_NAME, ddiinst, port->vpi); 5357 mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER, 5358 (void *)hba->intr_arg); 5359 5360 (void) sprintf(buf, "%s%d.%d_pkt_lock cv", DRIVER_NAME, 5361 ddiinst, port->vpi); 5362 cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL); 5363 5364 (void) sprintf(buf, "%s%d.%d_ub_lock mutex", 5365 DRIVER_NAME, ddiinst, port->vpi); 5366 mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER, 5367 (void *)hba->intr_arg); 5368 } 5369 } 5370 5371 return; 5372 5373 } /* emlxs_lock_init() */ 5374 5375 5376 5377 static void 5378 emlxs_lock_destroy(emlxs_hba_t *hba) 5379 { 5380 emlxs_port_t *port = &PPORT; 5381 uint32_t i; 5382 5383 mutex_destroy(&EMLXS_TIMER_LOCK); 5384 cv_destroy(&hba->timer_lock_cv); 5385 5386 mutex_destroy(&EMLXS_PORT_LOCK); 5387 5388 cv_destroy(&EMLXS_MBOX_CV); 5389 cv_destroy(&EMLXS_LINKUP_CV); 5390 5391 mutex_destroy(&EMLXS_LINKUP_LOCK); 5392 mutex_destroy(&EMLXS_MBOX_LOCK); 5393 5394 mutex_destroy(&EMLXS_RINGTX_LOCK); 5395 5396 for (i = 0; i < MAX_RINGS; i++) { 5397 mutex_destroy(&EMLXS_CMD_RING_LOCK(i)); 5398 mutex_destroy(&EMLXS_FCTAB_LOCK(i)); 5399 } 5400 5401 mutex_destroy(&EMLXS_MEMGET_LOCK); 5402 mutex_destroy(&EMLXS_MEMPUT_LOCK); 5403 mutex_destroy(&EMLXS_IOCTL_LOCK); 5404 mutex_destroy(&hba->pm_lock); 5405 5406 #ifdef DUMP_SUPPORT 5407 mutex_destroy(&EMLXS_DUMP_LOCK); 5408 #endif /* DUMP_SUPPORT */ 5409 5410 /* Destroy per port locks */ 5411 for (i = 0; i < MAX_VPORTS; i++) { 5412 port = &VPORT(i); 5413 rw_destroy(&port->node_rwlock); 5414 mutex_destroy(&EMLXS_PKT_LOCK); 5415 cv_destroy(&EMLXS_PKT_CV); 5416 mutex_destroy(&EMLXS_UB_LOCK); 5417 } 5418 5419 return; 5420 5421 } /* emlxs_lock_destroy() */ 5422 5423 5424 /* init_flag values */ 5425 #define ATTACH_SOFT_STATE 0x00000001 5426 #define ATTACH_FCA_TRAN 0x00000002 5427 #define ATTACH_HBA 0x00000004 5428 #define ATTACH_LOG 0x00000008 5429 #define ATTACH_MAP_BUS 0x00000010 5430 #define ATTACH_INTR_INIT 0x00000020 5431 #define ATTACH_PROP 0x00000040 5432 #define ATTACH_LOCK 0x00000080 5433 #define ATTACH_THREAD 0x00000100 5434 #define ATTACH_INTR_ADD 0x00000200 5435 #define ATTACH_ONLINE 0x00000400 5436 #define ATTACH_NODE 0x00000800 5437 #define ATTACH_FCT 0x00001000 5438 #define ATTACH_FCA 0x00002000 5439 #define ATTACH_KSTAT 0x00004000 5440 #define ATTACH_DHCHAP 0x00008000 5441 #define ATTACH_FM 0x00010000 5442 #define ATTACH_MAP_SLI 0x00020000 5443 5444 static void 5445 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed) 5446 { 5447 emlxs_hba_t *hba = NULL; 5448 int ddiinst; 5449 5450 ddiinst = ddi_get_instance(dip); 5451 5452 if (init_flag & ATTACH_HBA) { 5453 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5454 5455 if (init_flag & ATTACH_ONLINE) { 5456 (void) emlxs_offline(hba); 5457 } 5458 5459 if (init_flag & ATTACH_INTR_ADD) { 5460 (void) EMLXS_INTR_REMOVE(hba); 5461 } 5462 #ifdef SFCT_SUPPORT 5463 if (init_flag & ATTACH_FCT) { 5464 emlxs_fct_detach(hba); 5465 } 5466 #endif /* SFCT_SUPPORT */ 5467 5468 #ifdef DHCHAP_SUPPORT 5469 if (init_flag & ATTACH_DHCHAP) { 5470 emlxs_dhc_detach(hba); 5471 } 5472 #endif /* DHCHAP_SUPPORT */ 5473 5474 if (init_flag & ATTACH_KSTAT) { 5475 kstat_delete(hba->kstat); 5476 } 5477 5478 if (init_flag & ATTACH_FCA) { 5479 emlxs_fca_detach(hba); 5480 } 5481 5482 if (init_flag & ATTACH_NODE) { 5483 (void) ddi_remove_minor_node(hba->dip, "devctl"); 5484 } 5485 5486 if (init_flag & ATTACH_THREAD) { 5487 emlxs_thread_destroy(&hba->iodone_thread); 5488 } 5489 5490 if (init_flag & ATTACH_PROP) { 5491 (void) ddi_prop_remove_all(hba->dip); 5492 } 5493 5494 if (init_flag & ATTACH_LOCK) { 5495 emlxs_lock_destroy(hba); 5496 } 5497 5498 if (init_flag & ATTACH_INTR_INIT) { 5499 (void) EMLXS_INTR_UNINIT(hba); 5500 } 5501 5502 if (init_flag & ATTACH_MAP_BUS) { 5503 emlxs_unmap_bus(hba); 5504 } 5505 5506 if (init_flag & ATTACH_MAP_SLI) { 5507 emlxs_sli_unmap_hdw(hba); 5508 } 5509 5510 #ifdef FMA_SUPPORT 5511 if (init_flag & ATTACH_FM) { 5512 emlxs_fm_fini(hba); 5513 } 5514 #endif /* FMA_SUPPORT */ 5515 5516 if (init_flag & ATTACH_LOG) { 5517 (void) emlxs_msg_log_destroy(hba); 5518 } 5519 5520 if (init_flag & ATTACH_FCA_TRAN) { 5521 (void) ddi_set_driver_private(hba->dip, NULL); 5522 kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t)); 5523 hba->fca_tran = NULL; 5524 } 5525 5526 if (init_flag & ATTACH_HBA) { 5527 emlxs_device.log[hba->emlxinst] = 0; 5528 emlxs_device.hba[hba->emlxinst] = 5529 (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0)); 5530 5531 #ifdef DUMP_SUPPORT 5532 emlxs_device.dump_txtfile[hba->emlxinst] = 0; 5533 emlxs_device.dump_dmpfile[hba->emlxinst] = 0; 5534 emlxs_device.dump_ceefile[hba->emlxinst] = 0; 5535 #endif /* DUMP_SUPPORT */ 5536 5537 } 5538 } 5539 5540 if (init_flag & ATTACH_SOFT_STATE) { 5541 (void) ddi_soft_state_free(emlxs_soft_state, ddiinst); 5542 } 5543 5544 return; 5545 5546 } /* emlxs_driver_remove() */ 5547 5548 5549 5550 /* This determines which ports will be initiator mode */ 5551 static void 5552 emlxs_fca_init(emlxs_hba_t *hba) 5553 { 5554 emlxs_port_t *port = &PPORT; 5555 emlxs_port_t *vport; 5556 uint32_t i; 5557 5558 if (!hba->ini_mode) { 5559 return; 5560 } 5561 #ifdef MODSYM_SUPPORT 5562 /* Open SFS */ 5563 (void) emlxs_fca_modopen(); 5564 #endif /* MODSYM_SUPPORT */ 5565 5566 /* Check if SFS present */ 5567 if (((void *)MODSYM(fc_fca_init) == NULL) || 5568 ((void *)MODSYM(fc_fca_attach) == NULL)) { 5569 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5570 "SFS not present. Initiator mode disabled."); 5571 goto failed; 5572 } 5573 5574 /* Setup devops for SFS */ 5575 MODSYM(fc_fca_init)(&emlxs_ops); 5576 5577 /* Check if our SFS driver interface matches the current SFS stack */ 5578 if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) { 5579 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5580 "SFS/FCA version mismatch. FCA=0x%x", 5581 hba->fca_tran->fca_version); 5582 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5583 "SFS present. Initiator mode disabled."); 5584 5585 goto failed; 5586 } 5587 5588 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5589 "SFS present. Initiator mode enabled."); 5590 5591 return; 5592 5593 failed: 5594 5595 hba->ini_mode = 0; 5596 for (i = 0; i < MAX_VPORTS; i++) { 5597 vport = &VPORT(i); 5598 vport->ini_mode = 0; 5599 } 5600 5601 return; 5602 5603 } /* emlxs_fca_init() */ 5604 5605 5606 /* This determines which ports will be initiator or target mode */ 5607 static void 5608 emlxs_set_mode(emlxs_hba_t *hba) 5609 { 5610 emlxs_port_t *port = &PPORT; 5611 emlxs_port_t *vport; 5612 uint32_t i; 5613 uint32_t tgt_mode = 0; 5614 5615 #ifdef SFCT_SUPPORT 5616 emlxs_config_t *cfg; 5617 5618 cfg = &hba->config[CFG_TARGET_MODE]; 5619 tgt_mode = cfg->current; 5620 5621 port->fct_flags = 0; 5622 #endif /* SFCT_SUPPORT */ 5623 5624 /* Initialize physical port */ 5625 if (tgt_mode) { 5626 hba->tgt_mode = 1; 5627 hba->ini_mode = 0; 5628 5629 port->tgt_mode = 1; 5630 port->ini_mode = 0; 5631 } else { 5632 hba->tgt_mode = 0; 5633 hba->ini_mode = 1; 5634 5635 port->tgt_mode = 0; 5636 port->ini_mode = 1; 5637 } 5638 5639 /* Initialize virtual ports */ 5640 /* Virtual ports take on the mode of the parent physical port */ 5641 for (i = 1; i < MAX_VPORTS; i++) { 5642 vport = &VPORT(i); 5643 5644 #ifdef SFCT_SUPPORT 5645 vport->fct_flags = 0; 5646 #endif /* SFCT_SUPPORT */ 5647 5648 vport->ini_mode = port->ini_mode; 5649 vport->tgt_mode = port->tgt_mode; 5650 } 5651 5652 /* Check if initiator mode is requested */ 5653 if (hba->ini_mode) { 5654 emlxs_fca_init(hba); 5655 } else { 5656 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5657 "Initiator mode not enabled."); 5658 } 5659 5660 #ifdef SFCT_SUPPORT 5661 /* Check if target mode is requested */ 5662 if (hba->tgt_mode) { 5663 emlxs_fct_init(hba); 5664 } else { 5665 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5666 "Target mode not enabled."); 5667 } 5668 #endif /* SFCT_SUPPORT */ 5669 5670 return; 5671 5672 } /* emlxs_set_mode() */ 5673 5674 5675 5676 static void 5677 emlxs_fca_attach(emlxs_hba_t *hba) 5678 { 5679 /* Update our transport structure */ 5680 hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg; 5681 hba->fca_tran->fca_cmd_max = hba->io_throttle; 5682 5683 #if (EMLXS_MODREV >= EMLXS_MODREV5) 5684 bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn, 5685 sizeof (NAME_TYPE)); 5686 #endif /* >= EMLXS_MODREV5 */ 5687 5688 return; 5689 5690 } /* emlxs_fca_attach() */ 5691 5692 5693 static void 5694 emlxs_fca_detach(emlxs_hba_t *hba) 5695 { 5696 uint32_t i; 5697 emlxs_port_t *vport; 5698 5699 if (hba->ini_mode) { 5700 if ((void *)MODSYM(fc_fca_detach) != NULL) { 5701 MODSYM(fc_fca_detach)(hba->dip); 5702 } 5703 5704 hba->ini_mode = 0; 5705 5706 for (i = 0; i < MAX_VPORTS; i++) { 5707 vport = &VPORT(i); 5708 vport->ini_mode = 0; 5709 } 5710 } 5711 5712 return; 5713 5714 } /* emlxs_fca_detach() */ 5715 5716 5717 5718 static void 5719 emlxs_drv_banner(emlxs_hba_t *hba) 5720 { 5721 emlxs_port_t *port = &PPORT; 5722 uint32_t i; 5723 char msi_mode[16]; 5724 char npiv_mode[16]; 5725 emlxs_vpd_t *vpd = &VPD; 5726 emlxs_config_t *cfg = &CFG; 5727 uint8_t *wwpn; 5728 uint8_t *wwnn; 5729 5730 /* Display firmware library one time */ 5731 if (emlxs_instance_count == 1) { 5732 emlxs_fw_show(hba); 5733 } 5734 5735 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label, 5736 emlxs_revision); 5737 5738 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5739 "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model, 5740 hba->model_info.device_id, hba->model_info.ssdid, 5741 hba->model_info.id); 5742 5743 #ifdef EMLXS_I386 5744 5745 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5746 "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label, 5747 vpd->boot_version); 5748 5749 #else /* EMLXS_SPARC */ 5750 5751 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5752 "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version, 5753 vpd->fw_label, vpd->boot_version, vpd->fcode_version); 5754 5755 #endif /* EMLXS_I386 */ 5756 5757 (void) strcpy(msi_mode, " INTX:1"); 5758 5759 #ifdef MSI_SUPPORT 5760 if (hba->intr_flags & EMLXS_MSI_ENABLED) { 5761 switch (hba->intr_type) { 5762 case DDI_INTR_TYPE_FIXED: 5763 (void) strcpy(msi_mode, " MSI:0"); 5764 break; 5765 5766 case DDI_INTR_TYPE_MSI: 5767 (void) sprintf(msi_mode, " MSI:%d", hba->intr_count); 5768 break; 5769 5770 case DDI_INTR_TYPE_MSIX: 5771 (void) sprintf(msi_mode, " MSIX:%d", hba->intr_count); 5772 break; 5773 } 5774 } 5775 #endif 5776 5777 (void) strcpy(npiv_mode, ""); 5778 5779 #ifdef SLI3_SUPPORT 5780 if (hba->flag & FC_NPIV_ENABLED) { 5781 (void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max); 5782 } else { 5783 (void) strcpy(npiv_mode, " NPIV:0"); 5784 } 5785 #endif /* SLI3_SUPPORT */ 5786 5787 5788 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "SLI:%d%s%s%s%s", 5789 hba->sli_mode, msi_mode, npiv_mode, 5790 ((hba->ini_mode)? " FCA":""), ((hba->tgt_mode)? " FCT":"")); 5791 5792 wwpn = (uint8_t *)&hba->wwpn; 5793 wwnn = (uint8_t *)&hba->wwnn; 5794 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5795 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X " 5796 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", 5797 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6], 5798 wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5], 5799 wwnn[6], wwnn[7]); 5800 5801 #ifdef SLI3_SUPPORT 5802 for (i = 0; i < MAX_VPORTS; i++) { 5803 port = &VPORT(i); 5804 5805 if (!(port->flag & EMLXS_PORT_CONFIG)) { 5806 continue; 5807 } 5808 5809 wwpn = (uint8_t *)&port->wwpn; 5810 wwnn = (uint8_t *)&port->wwnn; 5811 5812 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5813 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X " 5814 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", 5815 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], 5816 wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], 5817 wwnn[4], wwnn[5], wwnn[6], wwnn[7]); 5818 } 5819 port = &PPORT; 5820 5821 #ifdef NPIV_SUPPORT 5822 /* 5823 * No dependency for Restricted login parameter. 5824 */ 5825 if ((cfg[CFG_VPORT_RESTRICTED].current) && (port->ini_mode)) { 5826 port->flag |= EMLXS_PORT_RESTRICTED; 5827 } else { 5828 port->flag &= ~EMLXS_PORT_RESTRICTED; 5829 } 5830 #endif /* NPIV_SUPPORT */ 5831 5832 #endif /* SLI3_SUPPORT */ 5833 5834 /* 5835 * Announce the device: ddi_report_dev() prints a banner at boot time, 5836 * announcing the device pointed to by dip. 5837 */ 5838 (void) ddi_report_dev(hba->dip); 5839 5840 return; 5841 5842 } /* emlxs_drv_banner() */ 5843 5844 5845 extern void 5846 emlxs_get_fcode_version(emlxs_hba_t *hba) 5847 { 5848 emlxs_vpd_t *vpd = &VPD; 5849 char *prop_str; 5850 int status; 5851 5852 /* Setup fcode version property */ 5853 prop_str = NULL; 5854 status = 5855 ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0, 5856 "fcode-version", (char **)&prop_str); 5857 5858 if (status == DDI_PROP_SUCCESS) { 5859 bcopy(prop_str, vpd->fcode_version, strlen(prop_str)); 5860 (void) ddi_prop_free((void *)prop_str); 5861 } else { 5862 (void) strcpy(vpd->fcode_version, "none"); 5863 } 5864 5865 return; 5866 5867 } /* emlxs_get_fcode_version() */ 5868 5869 5870 static int 5871 emlxs_hba_attach(dev_info_t *dip) 5872 { 5873 emlxs_hba_t *hba; 5874 emlxs_port_t *port; 5875 emlxs_config_t *cfg; 5876 char *prop_str; 5877 int ddiinst; 5878 int32_t emlxinst; 5879 int status; 5880 uint32_t rval; 5881 uint32_t init_flag = 0; 5882 char local_pm_components[32]; 5883 #ifdef EMLXS_I386 5884 uint32_t i; 5885 #endif /* EMLXS_I386 */ 5886 5887 ddiinst = ddi_get_instance(dip); 5888 emlxinst = emlxs_add_instance(ddiinst); 5889 5890 if (emlxinst >= MAX_FC_BRDS) { 5891 cmn_err(CE_WARN, 5892 "?%s: fca_hba_attach failed. Too many driver ddiinsts. " 5893 "inst=%x", DRIVER_NAME, ddiinst); 5894 return (DDI_FAILURE); 5895 } 5896 5897 if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) { 5898 return (DDI_FAILURE); 5899 } 5900 5901 if (emlxs_device.hba[emlxinst]) { 5902 return (DDI_SUCCESS); 5903 } 5904 5905 /* An adapter can accidentally be plugged into a slave-only PCI slot */ 5906 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 5907 cmn_err(CE_WARN, 5908 "?%s%d: fca_hba_attach failed. Device in slave-only slot.", 5909 DRIVER_NAME, ddiinst); 5910 return (DDI_FAILURE); 5911 } 5912 5913 /* Allocate emlxs_dev_ctl structure. */ 5914 if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) { 5915 cmn_err(CE_WARN, 5916 "?%s%d: fca_hba_attach failed. Unable to allocate soft " 5917 "state.", DRIVER_NAME, ddiinst); 5918 return (DDI_FAILURE); 5919 } 5920 init_flag |= ATTACH_SOFT_STATE; 5921 5922 if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state, 5923 ddiinst)) == NULL) { 5924 cmn_err(CE_WARN, 5925 "?%s%d: fca_hba_attach failed. Unable to get soft state.", 5926 DRIVER_NAME, ddiinst); 5927 goto failed; 5928 } 5929 bzero((char *)hba, sizeof (emlxs_hba_t)); 5930 5931 emlxs_device.hba[emlxinst] = hba; 5932 emlxs_device.log[emlxinst] = &hba->log; 5933 5934 #ifdef DUMP_SUPPORT 5935 emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile; 5936 emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile; 5937 emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile; 5938 #endif /* DUMP_SUPPORT */ 5939 5940 hba->dip = dip; 5941 hba->emlxinst = emlxinst; 5942 hba->ddiinst = ddiinst; 5943 hba->ini_mode = 0; 5944 hba->tgt_mode = 0; 5945 hba->mem_bpl_size = MEM_BPL_SIZE; 5946 5947 init_flag |= ATTACH_HBA; 5948 5949 /* Enable the physical port on this HBA */ 5950 port = &PPORT; 5951 port->hba = hba; 5952 port->vpi = 0; 5953 port->flag |= EMLXS_PORT_ENABLE; 5954 5955 /* Allocate a transport structure */ 5956 hba->fca_tran = 5957 (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP); 5958 if (hba->fca_tran == NULL) { 5959 cmn_err(CE_WARN, 5960 "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran " 5961 "memory.", DRIVER_NAME, ddiinst); 5962 goto failed; 5963 } 5964 bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran, 5965 sizeof (fc_fca_tran_t)); 5966 5967 /* Set the transport structure pointer in our dip */ 5968 /* SFS may panic if we are in target only mode */ 5969 /* We will update the transport structure later */ 5970 (void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran); 5971 init_flag |= ATTACH_FCA_TRAN; 5972 5973 /* Perform driver integrity check */ 5974 rval = emlxs_integrity_check(hba); 5975 if (rval) { 5976 cmn_err(CE_WARN, 5977 "?%s%d: fca_hba_attach failed. Driver integrity check " 5978 "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval); 5979 goto failed; 5980 } 5981 5982 cfg = &CFG; 5983 5984 bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg)); 5985 #ifdef MSI_SUPPORT 5986 if ((void *)&ddi_intr_get_supported_types != NULL) { 5987 hba->intr_flags |= EMLXS_MSI_ENABLED; 5988 } 5989 #endif /* MSI_SUPPORT */ 5990 5991 5992 /* Create the msg log file */ 5993 if (emlxs_msg_log_create(hba) == 0) { 5994 cmn_err(CE_WARN, 5995 "?%s%d: fca_hba_attach failed. Unable to create message " 5996 "log", DRIVER_NAME, ddiinst); 5997 goto failed; 5998 5999 } 6000 init_flag |= ATTACH_LOG; 6001 6002 /* We can begin to use EMLXS_MSGF from this point on */ 6003 6004 /* 6005 * Find the I/O bus type If it is not a SBUS card, 6006 * then it is a PCI card. Default is PCI_FC (0). 6007 */ 6008 prop_str = NULL; 6009 status = ddi_prop_lookup_string(DDI_DEV_T_ANY, 6010 (dev_info_t *)dip, 0, "name", (char **)&prop_str); 6011 6012 if (status == DDI_PROP_SUCCESS) { 6013 if (strncmp(prop_str, "lpfs", 4) == 0) { 6014 hba->bus_type = SBUS_FC; 6015 } 6016 6017 (void) ddi_prop_free((void *)prop_str); 6018 } 6019 #ifdef EMLXS_I386 6020 /* Update BPL size based on max_xfer_size */ 6021 i = cfg[CFG_MAX_XFER_SIZE].current; 6022 if (i > 688128) { 6023 /* 688128 = (((2048 / 12) - 2) * 4096) */ 6024 hba->mem_bpl_size = 4096; 6025 } else if (i > 339968) { 6026 /* 339968 = (((1024 / 12) - 2) * 4096) */ 6027 hba->mem_bpl_size = 2048; 6028 } else { 6029 hba->mem_bpl_size = 1024; 6030 } 6031 6032 /* Update dma_attr_sgllen based on BPL size */ 6033 i = BPL_TO_SGLLEN(hba->mem_bpl_size); 6034 emlxs_dma_attr.dma_attr_sgllen = i; 6035 emlxs_dma_attr_ro.dma_attr_sgllen = i; 6036 emlxs_dma_attr_fcip_rsp.dma_attr_sgllen = i; 6037 #endif /* EMLXS_I386 */ 6038 6039 /* 6040 * Copy DDS from the config method and update configuration parameters 6041 */ 6042 (void) emlxs_get_props(hba); 6043 6044 #ifdef FMA_SUPPORT 6045 hba->fm_caps = cfg[CFG_FM_CAPS].current; 6046 6047 emlxs_fm_init(hba); 6048 6049 init_flag |= ATTACH_FM; 6050 #endif /* FMA_SUPPORT */ 6051 6052 if (emlxs_map_bus(hba)) { 6053 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6054 "Unable to map memory"); 6055 goto failed; 6056 6057 } 6058 init_flag |= ATTACH_MAP_BUS; 6059 6060 /* Attempt to identify the adapter */ 6061 rval = emlxs_init_adapter_info(hba); 6062 6063 if (rval == 0) { 6064 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6065 "Unable to get adapter info. Id:%d Device id:0x%x " 6066 "Model:%s", hba->model_info.id, 6067 hba->model_info.device_id, hba->model_info.model); 6068 goto failed; 6069 } 6070 6071 /* Check if adapter is not supported */ 6072 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) { 6073 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6074 "Unsupported adapter found. Id:%d Device id:0x%x " 6075 "SSDID:0x%x Model:%s", hba->model_info.id, 6076 hba->model_info.device_id, 6077 hba->model_info.ssdid, hba->model_info.model); 6078 goto failed; 6079 } 6080 6081 if (emlxs_sli_map_hdw(hba)) { 6082 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6083 "Unable to map memory"); 6084 goto failed; 6085 6086 } 6087 init_flag |= ATTACH_MAP_SLI; 6088 6089 /* Initialize the interrupts. But don't add them yet */ 6090 status = EMLXS_INTR_INIT(hba, 0); 6091 if (status != DDI_SUCCESS) { 6092 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6093 "Unable to initalize interrupt(s)."); 6094 goto failed; 6095 6096 } 6097 init_flag |= ATTACH_INTR_INIT; 6098 6099 /* Initialize LOCKs */ 6100 emlxs_lock_init(hba); 6101 init_flag |= ATTACH_LOCK; 6102 6103 /* Initialize the power management */ 6104 mutex_enter(&hba->pm_lock); 6105 hba->pm_state = EMLXS_PM_IN_ATTACH; 6106 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 6107 hba->pm_busy = 0; 6108 #ifdef IDLE_TIMER 6109 hba->pm_active = 1; 6110 hba->pm_idle_timer = 0; 6111 #endif /* IDLE_TIMER */ 6112 mutex_exit(&hba->pm_lock); 6113 6114 /* Set the pm component name */ 6115 (void) sprintf(local_pm_components, "NAME=%s%d", DRIVER_NAME, 6116 ddiinst); 6117 emlxs_pm_components[0] = local_pm_components; 6118 6119 /* Check if power management support is enabled */ 6120 if (cfg[CFG_PM_SUPPORT].current) { 6121 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 6122 "pm-components", emlxs_pm_components, 6123 sizeof (emlxs_pm_components) / 6124 sizeof (emlxs_pm_components[0])) != 6125 DDI_PROP_SUCCESS) { 6126 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6127 "Unable to create pm components."); 6128 goto failed; 6129 } 6130 } 6131 6132 /* Needed for suspend and resume support */ 6133 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state", 6134 "needs-suspend-resume"); 6135 init_flag |= ATTACH_PROP; 6136 6137 emlxs_thread_create(hba, &hba->iodone_thread); 6138 init_flag |= ATTACH_THREAD; 6139 6140 /* Setup initiator / target ports */ 6141 emlxs_set_mode(hba); 6142 6143 /* If driver did not attach to either stack, */ 6144 /* then driver attach failed */ 6145 if (!hba->tgt_mode && !hba->ini_mode) { 6146 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6147 "Driver interfaces not enabled."); 6148 goto failed; 6149 } 6150 6151 /* 6152 * Initialize HBA 6153 */ 6154 6155 /* Set initial state */ 6156 mutex_enter(&EMLXS_PORT_LOCK); 6157 emlxs_diag_state = DDI_OFFDI; 6158 hba->flag |= FC_OFFLINE_MODE; 6159 hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE); 6160 mutex_exit(&EMLXS_PORT_LOCK); 6161 6162 if (status = emlxs_online(hba)) { 6163 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6164 "Unable to initialize adapter."); 6165 goto failed; 6166 } 6167 init_flag |= ATTACH_ONLINE; 6168 6169 /* This is to ensure that the model property is properly set */ 6170 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model", 6171 hba->model_info.model); 6172 6173 /* Create the device node. */ 6174 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) == 6175 DDI_FAILURE) { 6176 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6177 "Unable to create device node."); 6178 goto failed; 6179 } 6180 init_flag |= ATTACH_NODE; 6181 6182 /* Attach initiator now */ 6183 /* This must come after emlxs_online() */ 6184 emlxs_fca_attach(hba); 6185 init_flag |= ATTACH_FCA; 6186 6187 /* Initialize kstat information */ 6188 hba->kstat = kstat_create(DRIVER_NAME, 6189 ddiinst, "statistics", "controller", 6190 KSTAT_TYPE_RAW, sizeof (emlxs_stats_t), 6191 KSTAT_FLAG_VIRTUAL); 6192 6193 if (hba->kstat == NULL) { 6194 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6195 "kstat_create failed."); 6196 } else { 6197 hba->kstat->ks_data = (void *)&hba->stats; 6198 kstat_install(hba->kstat); 6199 init_flag |= ATTACH_KSTAT; 6200 } 6201 6202 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4) 6203 /* Setup virtual port properties */ 6204 emlxs_read_vport_prop(hba); 6205 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */ 6206 6207 6208 #ifdef DHCHAP_SUPPORT 6209 emlxs_dhc_attach(hba); 6210 init_flag |= ATTACH_DHCHAP; 6211 #endif /* DHCHAP_SUPPORT */ 6212 6213 /* Display the driver banner now */ 6214 emlxs_drv_banner(hba); 6215 6216 /* Raise the power level */ 6217 6218 /* 6219 * This will not execute emlxs_hba_resume because 6220 * EMLXS_PM_IN_ATTACH is set 6221 */ 6222 if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) { 6223 /* Set power up anyway. This should not happen! */ 6224 mutex_enter(&hba->pm_lock); 6225 hba->pm_level = EMLXS_PM_ADAPTER_UP; 6226 hba->pm_state &= ~EMLXS_PM_IN_ATTACH; 6227 mutex_exit(&hba->pm_lock); 6228 } else { 6229 mutex_enter(&hba->pm_lock); 6230 hba->pm_state &= ~EMLXS_PM_IN_ATTACH; 6231 mutex_exit(&hba->pm_lock); 6232 } 6233 6234 #ifdef SFCT_SUPPORT 6235 /* Do this last */ 6236 emlxs_fct_attach(hba); 6237 init_flag |= ATTACH_FCT; 6238 #endif /* SFCT_SUPPORT */ 6239 6240 return (DDI_SUCCESS); 6241 6242 failed: 6243 6244 emlxs_driver_remove(dip, init_flag, 1); 6245 6246 return (DDI_FAILURE); 6247 6248 } /* emlxs_hba_attach() */ 6249 6250 6251 static int 6252 emlxs_hba_detach(dev_info_t *dip) 6253 { 6254 emlxs_hba_t *hba; 6255 emlxs_port_t *port; 6256 int ddiinst; 6257 uint32_t init_flag = (uint32_t)-1; 6258 6259 ddiinst = ddi_get_instance(dip); 6260 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 6261 port = &PPORT; 6262 6263 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL); 6264 6265 mutex_enter(&hba->pm_lock); 6266 hba->pm_state |= EMLXS_PM_IN_DETACH; 6267 mutex_exit(&hba->pm_lock); 6268 6269 /* Lower the power level */ 6270 /* 6271 * This will not suspend the driver since the 6272 * EMLXS_PM_IN_DETACH has been set 6273 */ 6274 if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) { 6275 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 6276 "Unable to lower power."); 6277 6278 mutex_enter(&hba->pm_lock); 6279 hba->pm_state &= ~EMLXS_PM_IN_DETACH; 6280 mutex_exit(&hba->pm_lock); 6281 6282 return (DDI_FAILURE); 6283 } 6284 6285 /* Take the adapter offline first, if not already */ 6286 if (emlxs_offline(hba) != 0) { 6287 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 6288 "Unable to take adapter offline."); 6289 6290 mutex_enter(&hba->pm_lock); 6291 hba->pm_state &= ~EMLXS_PM_IN_DETACH; 6292 mutex_exit(&hba->pm_lock); 6293 6294 (void) emlxs_pm_raise_power(dip); 6295 6296 return (DDI_FAILURE); 6297 } 6298 init_flag &= ~ATTACH_ONLINE; 6299 6300 /* Remove the driver instance */ 6301 emlxs_driver_remove(dip, init_flag, 0); 6302 6303 return (DDI_SUCCESS); 6304 6305 } /* emlxs_hba_detach() */ 6306 6307 6308 extern int 6309 emlxs_map_bus(emlxs_hba_t *hba) 6310 { 6311 emlxs_port_t *port = &PPORT; 6312 dev_info_t *dip; 6313 ddi_device_acc_attr_t dev_attr; 6314 int status; 6315 6316 dip = (dev_info_t *)hba->dip; 6317 dev_attr = emlxs_dev_acc_attr; 6318 6319 if (hba->bus_type == SBUS_FC) { 6320 if (hba->pci_acc_handle == 0) { 6321 status = ddi_regs_map_setup(dip, 6322 SBUS_DFLY_PCI_CFG_RINDEX, 6323 (caddr_t *)&hba->pci_addr, 6324 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle); 6325 if (status != DDI_SUCCESS) { 6326 EMLXS_MSGF(EMLXS_CONTEXT, 6327 &emlxs_attach_failed_msg, 6328 "(SBUS) ddi_regs_map_setup PCI failed. " 6329 "status=%x", status); 6330 goto failed; 6331 } 6332 } 6333 6334 if (hba->sbus_pci_handle == 0) { 6335 status = ddi_regs_map_setup(dip, 6336 SBUS_TITAN_PCI_CFG_RINDEX, 6337 (caddr_t *)&hba->sbus_pci_addr, 6338 0, 0, &dev_attr, &hba->sbus_pci_handle); 6339 if (status != DDI_SUCCESS) { 6340 EMLXS_MSGF(EMLXS_CONTEXT, 6341 &emlxs_attach_failed_msg, 6342 "(SBUS) ddi_regs_map_setup TITAN PCI " 6343 "failed. status=%x", status); 6344 goto failed; 6345 } 6346 } 6347 6348 } else { /* ****** PCI ****** */ 6349 6350 if (hba->pci_acc_handle == 0) { 6351 status = ddi_regs_map_setup(dip, 6352 PCI_CFG_RINDEX, 6353 (caddr_t *)&hba->pci_addr, 6354 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle); 6355 if (status != DDI_SUCCESS) { 6356 EMLXS_MSGF(EMLXS_CONTEXT, 6357 &emlxs_attach_failed_msg, 6358 "(PCI) ddi_regs_map_setup PCI failed. " 6359 "status=%x", status); 6360 goto failed; 6361 } 6362 } 6363 #ifdef EMLXS_I386 6364 /* Setting up PCI configure space */ 6365 (void) ddi_put16(hba->pci_acc_handle, 6366 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER), 6367 CMD_CFG_VALUE | CMD_IO_ENBL); 6368 #endif /* EMLXS_I386 */ 6369 6370 } 6371 return (0); 6372 6373 failed: 6374 6375 emlxs_unmap_bus(hba); 6376 return (ENOMEM); 6377 6378 } /* emlxs_map_bus() */ 6379 6380 6381 extern void 6382 emlxs_unmap_bus(emlxs_hba_t *hba) 6383 { 6384 if (hba->pci_acc_handle) { 6385 (void) ddi_regs_map_free(&hba->pci_acc_handle); 6386 hba->pci_acc_handle = 0; 6387 } 6388 6389 if (hba->sbus_pci_handle) { 6390 (void) ddi_regs_map_free(&hba->sbus_pci_handle); 6391 hba->sbus_pci_handle = 0; 6392 } 6393 6394 return; 6395 6396 } /* emlxs_unmap_bus() */ 6397 6398 6399 static int 6400 emlxs_get_props(emlxs_hba_t *hba) 6401 { 6402 emlxs_config_t *cfg; 6403 uint32_t i; 6404 char string[256]; 6405 uint32_t new_value; 6406 6407 /* Initialize each parameter */ 6408 for (i = 0; i < NUM_CFG_PARAM; i++) { 6409 cfg = &hba->config[i]; 6410 6411 /* Ensure strings are terminated */ 6412 cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0; 6413 cfg->help[(EMLXS_CFG_HELP_SIZE-1)] = 0; 6414 6415 /* Set the current value to the default value */ 6416 new_value = cfg->def; 6417 6418 /* First check for the global setting */ 6419 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 6420 (void *)hba->dip, DDI_PROP_DONTPASS, 6421 cfg->string, new_value); 6422 6423 /* Now check for the per adapter ddiinst setting */ 6424 (void) sprintf(string, "%s%d-%s", DRIVER_NAME, hba->ddiinst, 6425 cfg->string); 6426 6427 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 6428 (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value); 6429 6430 /* Now check the parameter */ 6431 cfg->current = emlxs_check_parm(hba, i, new_value); 6432 } 6433 6434 return (0); 6435 6436 } /* emlxs_get_props() */ 6437 6438 6439 extern uint32_t 6440 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value) 6441 { 6442 emlxs_port_t *port = &PPORT; 6443 uint32_t i; 6444 emlxs_config_t *cfg; 6445 emlxs_vpd_t *vpd = &VPD; 6446 6447 if (index > NUM_CFG_PARAM) { 6448 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6449 "emlxs_check_parm failed. Invalid index = %d", index); 6450 6451 return (new_value); 6452 } 6453 6454 cfg = &hba->config[index]; 6455 6456 if (new_value > cfg->hi) { 6457 new_value = cfg->def; 6458 } else if (new_value < cfg->low) { 6459 new_value = cfg->def; 6460 } 6461 6462 /* Perform additional checks */ 6463 switch (index) { 6464 #ifdef NPIV_SUPPORT 6465 case CFG_NPIV_ENABLE: 6466 if (hba->tgt_mode) { 6467 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6468 "enable-npiv: Not supported in target mode. " 6469 "Disabling."); 6470 6471 new_value = 0; 6472 } 6473 break; 6474 #endif /* NPIV_SUPPORT */ 6475 6476 #ifdef DHCHAP_SUPPORT 6477 case CFG_AUTH_ENABLE: 6478 if (hba->tgt_mode) { 6479 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6480 "enable-auth: Not supported in target mode. " 6481 "Disabling."); 6482 6483 new_value = 0; 6484 } 6485 break; 6486 #endif /* DHCHAP_SUPPORT */ 6487 6488 case CFG_NUM_NODES: 6489 switch (new_value) { 6490 case 1: 6491 case 2: 6492 /* Must have at least 3 if not 0 */ 6493 return (3); 6494 6495 default: 6496 break; 6497 } 6498 break; 6499 6500 case CFG_LINK_SPEED: 6501 if (vpd->link_speed) { 6502 switch (new_value) { 6503 case 0: 6504 break; 6505 6506 case 1: 6507 if (!(vpd->link_speed & LMT_1GB_CAPABLE)) { 6508 new_value = 0; 6509 6510 EMLXS_MSGF(EMLXS_CONTEXT, 6511 &emlxs_init_msg, 6512 "link-speed: 1Gb not supported " 6513 "by adapter. Switching to auto " 6514 "detect."); 6515 } 6516 break; 6517 6518 case 2: 6519 if (!(vpd->link_speed & LMT_2GB_CAPABLE)) { 6520 new_value = 0; 6521 6522 EMLXS_MSGF(EMLXS_CONTEXT, 6523 &emlxs_init_msg, 6524 "link-speed: 2Gb not supported " 6525 "by adapter. Switching to auto " 6526 "detect."); 6527 } 6528 break; 6529 case 4: 6530 if (!(vpd->link_speed & LMT_4GB_CAPABLE)) { 6531 new_value = 0; 6532 6533 EMLXS_MSGF(EMLXS_CONTEXT, 6534 &emlxs_init_msg, 6535 "link-speed: 4Gb not supported " 6536 "by adapter. Switching to auto " 6537 "detect."); 6538 } 6539 break; 6540 6541 case 8: 6542 if (!(vpd->link_speed & LMT_8GB_CAPABLE)) { 6543 new_value = 0; 6544 6545 EMLXS_MSGF(EMLXS_CONTEXT, 6546 &emlxs_init_msg, 6547 "link-speed: 8Gb not supported " 6548 "by adapter. Switching to auto " 6549 "detect."); 6550 } 6551 break; 6552 6553 case 10: 6554 if (!(vpd->link_speed & LMT_10GB_CAPABLE)) { 6555 new_value = 0; 6556 6557 EMLXS_MSGF(EMLXS_CONTEXT, 6558 &emlxs_init_msg, 6559 "link-speed: 10Gb not supported " 6560 "by adapter. Switching to auto " 6561 "detect."); 6562 } 6563 break; 6564 6565 default: 6566 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6567 "link-speed: Invalid value=%d provided. " 6568 "Switching to auto detect.", 6569 new_value); 6570 6571 new_value = 0; 6572 } 6573 } else { /* Perform basic validity check */ 6574 6575 /* Perform additional check on link speed */ 6576 switch (new_value) { 6577 case 0: 6578 case 1: 6579 case 2: 6580 case 4: 6581 case 8: 6582 case 10: 6583 /* link-speed is a valid choice */ 6584 break; 6585 6586 default: 6587 new_value = cfg->def; 6588 } 6589 } 6590 break; 6591 6592 case CFG_TOPOLOGY: 6593 /* Perform additional check on topology */ 6594 switch (new_value) { 6595 case 0: 6596 case 2: 6597 case 4: 6598 case 6: 6599 /* topology is a valid choice */ 6600 break; 6601 6602 default: 6603 return (cfg->def); 6604 } 6605 break; 6606 6607 #ifdef DHCHAP_SUPPORT 6608 case CFG_AUTH_TYPE: 6609 { 6610 uint32_t shift; 6611 uint32_t mask; 6612 6613 /* Perform additional check on auth type */ 6614 shift = 12; 6615 mask = 0xF000; 6616 for (i = 0; i < 4; i++) { 6617 if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) { 6618 return (cfg->def); 6619 } 6620 6621 shift -= 4; 6622 mask >>= 4; 6623 } 6624 break; 6625 } 6626 6627 case CFG_AUTH_HASH: 6628 { 6629 uint32_t shift; 6630 uint32_t mask; 6631 6632 /* Perform additional check on auth hash */ 6633 shift = 12; 6634 mask = 0xF000; 6635 for (i = 0; i < 4; i++) { 6636 if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) { 6637 return (cfg->def); 6638 } 6639 6640 shift -= 4; 6641 mask >>= 4; 6642 } 6643 break; 6644 } 6645 6646 case CFG_AUTH_GROUP: 6647 { 6648 uint32_t shift; 6649 uint32_t mask; 6650 6651 /* Perform additional check on auth group */ 6652 shift = 28; 6653 mask = 0xF0000000; 6654 for (i = 0; i < 8; i++) { 6655 if (((new_value & mask) >> shift) > 6656 DFC_AUTH_GROUP_MAX) { 6657 return (cfg->def); 6658 } 6659 6660 shift -= 4; 6661 mask >>= 4; 6662 } 6663 break; 6664 } 6665 6666 case CFG_AUTH_INTERVAL: 6667 if (new_value < 10) { 6668 return (10); 6669 } 6670 break; 6671 6672 6673 #endif /* DHCHAP_SUPPORT */ 6674 6675 } /* switch */ 6676 6677 return (new_value); 6678 6679 } /* emlxs_check_parm() */ 6680 6681 6682 extern uint32_t 6683 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value) 6684 { 6685 emlxs_port_t *port = &PPORT; 6686 emlxs_port_t *vport; 6687 uint32_t vpi; 6688 emlxs_config_t *cfg; 6689 uint32_t old_value; 6690 6691 if (index > NUM_CFG_PARAM) { 6692 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6693 "emlxs_set_parm failed. Invalid index = %d", index); 6694 6695 return ((uint32_t)FC_FAILURE); 6696 } 6697 6698 cfg = &hba->config[index]; 6699 6700 if (!(cfg->flags & PARM_DYNAMIC)) { 6701 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6702 "emlxs_set_parm failed. %s is not dynamic.", cfg->string); 6703 6704 return ((uint32_t)FC_FAILURE); 6705 } 6706 6707 /* Check new value */ 6708 old_value = new_value; 6709 new_value = emlxs_check_parm(hba, index, new_value); 6710 6711 if (old_value != new_value) { 6712 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6713 "emlxs_set_parm: %s invalid. 0x%x --> 0x%x", 6714 cfg->string, old_value, new_value); 6715 } 6716 6717 /* Return now if no actual change */ 6718 if (new_value == cfg->current) { 6719 return (FC_SUCCESS); 6720 } 6721 6722 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6723 "emlxs_set_parm: %s changing. 0x%x --> 0x%x", 6724 cfg->string, cfg->current, new_value); 6725 6726 old_value = cfg->current; 6727 cfg->current = new_value; 6728 6729 /* React to change if needed */ 6730 switch (index) { 6731 case CFG_PCI_MAX_READ: 6732 /* Update MXR */ 6733 emlxs_pcix_mxr_update(hba, 1); 6734 break; 6735 6736 #ifdef SLI3_SUPPORT 6737 case CFG_SLI_MODE: 6738 /* Check SLI mode */ 6739 if ((hba->sli_mode == 3) && (new_value == 2)) { 6740 /* All vports must be disabled first */ 6741 for (vpi = 1; vpi < MAX_VPORTS; vpi++) { 6742 vport = &VPORT(vpi); 6743 6744 if (vport->flag & EMLXS_PORT_ENABLE) { 6745 /* Reset current value */ 6746 cfg->current = old_value; 6747 6748 EMLXS_MSGF(EMLXS_CONTEXT, 6749 &emlxs_sfs_debug_msg, 6750 "emlxs_set_parm failed. %s: vpi=%d " 6751 "still enabled. Value restored to " 6752 "0x%x.", cfg->string, vpi, 6753 old_value); 6754 6755 return (2); 6756 } 6757 } 6758 } 6759 break; 6760 6761 #ifdef NPIV_SUPPORT 6762 case CFG_NPIV_ENABLE: 6763 /* Check if NPIV is being disabled */ 6764 if ((old_value == 1) && (new_value == 0)) { 6765 /* All vports must be disabled first */ 6766 for (vpi = 1; vpi < MAX_VPORTS; vpi++) { 6767 vport = &VPORT(vpi); 6768 6769 if (vport->flag & EMLXS_PORT_ENABLE) { 6770 /* Reset current value */ 6771 cfg->current = old_value; 6772 6773 EMLXS_MSGF(EMLXS_CONTEXT, 6774 &emlxs_sfs_debug_msg, 6775 "emlxs_set_parm failed. %s: vpi=%d " 6776 "still enabled. Value restored to " 6777 "0x%x.", cfg->string, vpi, 6778 old_value); 6779 6780 return (2); 6781 } 6782 } 6783 } 6784 6785 /* Trigger adapter reset */ 6786 /* (void) emlxs_reset(port, FC_FCA_RESET); */ 6787 6788 break; 6789 6790 6791 case CFG_VPORT_RESTRICTED: 6792 for (vpi = 0; vpi < MAX_VPORTS; vpi++) { 6793 vport = &VPORT(vpi); 6794 6795 if (!(vport->flag & EMLXS_PORT_CONFIG)) { 6796 continue; 6797 } 6798 6799 if (vport->options & EMLXS_OPT_RESTRICT_MASK) { 6800 continue; 6801 } 6802 6803 if (new_value) { 6804 vport->flag |= EMLXS_PORT_RESTRICTED; 6805 } else { 6806 vport->flag &= ~EMLXS_PORT_RESTRICTED; 6807 } 6808 } 6809 6810 break; 6811 #endif /* NPIV_SUPPORT */ 6812 #endif /* SLI3_SUPPORT */ 6813 6814 #ifdef DHCHAP_SUPPORT 6815 case CFG_AUTH_ENABLE: 6816 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 6817 break; 6818 6819 case CFG_AUTH_TMO: 6820 hba->auth_cfg.authentication_timeout = cfg->current; 6821 break; 6822 6823 case CFG_AUTH_MODE: 6824 hba->auth_cfg.authentication_mode = cfg->current; 6825 break; 6826 6827 case CFG_AUTH_BIDIR: 6828 hba->auth_cfg.bidirectional = cfg->current; 6829 break; 6830 6831 case CFG_AUTH_TYPE: 6832 hba->auth_cfg.authentication_type_priority[0] = 6833 (cfg->current & 0xF000) >> 12; 6834 hba->auth_cfg.authentication_type_priority[1] = 6835 (cfg->current & 0x0F00) >> 8; 6836 hba->auth_cfg.authentication_type_priority[2] = 6837 (cfg->current & 0x00F0) >> 4; 6838 hba->auth_cfg.authentication_type_priority[3] = 6839 (cfg->current & 0x000F); 6840 break; 6841 6842 case CFG_AUTH_HASH: 6843 hba->auth_cfg.hash_priority[0] = 6844 (cfg->current & 0xF000) >> 12; 6845 hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8; 6846 hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4; 6847 hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F); 6848 break; 6849 6850 case CFG_AUTH_GROUP: 6851 hba->auth_cfg.dh_group_priority[0] = 6852 (cfg->current & 0xF0000000) >> 28; 6853 hba->auth_cfg.dh_group_priority[1] = 6854 (cfg->current & 0x0F000000) >> 24; 6855 hba->auth_cfg.dh_group_priority[2] = 6856 (cfg->current & 0x00F00000) >> 20; 6857 hba->auth_cfg.dh_group_priority[3] = 6858 (cfg->current & 0x000F0000) >> 16; 6859 hba->auth_cfg.dh_group_priority[4] = 6860 (cfg->current & 0x0000F000) >> 12; 6861 hba->auth_cfg.dh_group_priority[5] = 6862 (cfg->current & 0x00000F00) >> 8; 6863 hba->auth_cfg.dh_group_priority[6] = 6864 (cfg->current & 0x000000F0) >> 4; 6865 hba->auth_cfg.dh_group_priority[7] = 6866 (cfg->current & 0x0000000F); 6867 break; 6868 6869 case CFG_AUTH_INTERVAL: 6870 hba->auth_cfg.reauthenticate_time_interval = cfg->current; 6871 break; 6872 #endif /* DHCAHP_SUPPORT */ 6873 6874 } 6875 6876 return (FC_SUCCESS); 6877 6878 } /* emlxs_set_parm() */ 6879 6880 6881 /* 6882 * emlxs_mem_alloc OS specific routine for memory allocation / mapping 6883 * 6884 * The buf_info->flags field describes the memory operation requested. 6885 * 6886 * FC_MBUF_PHYSONLY set requests a supplied virtual address be mapped for DMA 6887 * Virtual address is supplied in buf_info->virt 6888 * DMA mapping flag is in buf_info->align 6889 * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE) 6890 * The mapped physical address is returned buf_info->phys 6891 * 6892 * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and 6893 * if FC_MBUF_DMA is set the memory is also mapped for DMA 6894 * The byte alignment of the memory request is supplied in buf_info->align 6895 * The byte size of the memory request is supplied in buf_info->size 6896 * The virtual address is returned buf_info->virt 6897 * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA) 6898 */ 6899 extern uint8_t * 6900 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info) 6901 { 6902 emlxs_port_t *port = &PPORT; 6903 ddi_dma_attr_t dma_attr; 6904 ddi_device_acc_attr_t dev_attr; 6905 uint_t cookie_count; 6906 size_t dma_reallen; 6907 ddi_dma_cookie_t dma_cookie; 6908 uint_t dma_flag; 6909 int status; 6910 6911 dma_attr = emlxs_dma_attr_1sg; 6912 dev_attr = emlxs_data_acc_attr; 6913 6914 if (buf_info->flags & FC_MBUF_SNGLSG) { 6915 buf_info->flags &= ~FC_MBUF_SNGLSG; 6916 dma_attr.dma_attr_sgllen = 1; 6917 } 6918 6919 if (buf_info->flags & FC_MBUF_DMA32) { 6920 buf_info->flags &= ~FC_MBUF_DMA32; 6921 dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff; 6922 } 6923 6924 buf_info->flags &= ~(FC_MBUF_UNLOCK | FC_MBUF_IOCTL); 6925 6926 switch (buf_info->flags) { 6927 case 0: /* allocate host memory */ 6928 6929 buf_info->virt = 6930 (uint32_t *)kmem_zalloc((size_t)buf_info->size, 6931 KM_NOSLEEP); 6932 buf_info->phys = 0; 6933 buf_info->data_handle = 0; 6934 buf_info->dma_handle = 0; 6935 6936 if (buf_info->virt == (uint32_t *)0) { 6937 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 6938 "size=%x align=%x flags=%x", buf_info->size, 6939 buf_info->align, buf_info->flags); 6940 } 6941 break; 6942 6943 case FC_MBUF_PHYSONLY: 6944 case FC_MBUF_DMA | FC_MBUF_PHYSONLY: /* fill in physical address */ 6945 6946 if (buf_info->virt == 0) 6947 break; 6948 6949 /* 6950 * Allocate the DMA handle for this DMA object 6951 */ 6952 status = ddi_dma_alloc_handle((void *)hba->dip, 6953 &dma_attr, DDI_DMA_DONTWAIT, 6954 NULL, (ddi_dma_handle_t *)&buf_info->dma_handle); 6955 if (status != DDI_SUCCESS) { 6956 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 6957 "ddi_dma_alloc_handle failed: size=%x align=%x " 6958 "flags=%x", buf_info->size, buf_info->align, 6959 buf_info->flags); 6960 6961 buf_info->phys = 0; 6962 buf_info->dma_handle = 0; 6963 break; 6964 } 6965 6966 switch (buf_info->align) { 6967 case DMA_READ_WRITE: 6968 dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT); 6969 break; 6970 case DMA_READ_ONLY: 6971 dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT); 6972 break; 6973 case DMA_WRITE_ONLY: 6974 dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT); 6975 break; 6976 } 6977 6978 /* Map this page of memory */ 6979 status = ddi_dma_addr_bind_handle( 6980 (ddi_dma_handle_t)buf_info->dma_handle, NULL, 6981 (caddr_t)buf_info->virt, (size_t)buf_info->size, 6982 dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie, 6983 &cookie_count); 6984 6985 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) { 6986 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 6987 "ddi_dma_addr_bind_handle failed: status=%x " 6988 "count=%x flags=%x", status, cookie_count, 6989 buf_info->flags); 6990 6991 (void) ddi_dma_free_handle( 6992 (ddi_dma_handle_t *)&buf_info->dma_handle); 6993 buf_info->phys = 0; 6994 buf_info->dma_handle = 0; 6995 break; 6996 } 6997 6998 if (hba->bus_type == SBUS_FC) { 6999 7000 int32_t burstsizes_limit = 0xff; 7001 int32_t ret_burst; 7002 7003 ret_burst = ddi_dma_burstsizes( 7004 buf_info->dma_handle) & burstsizes_limit; 7005 if (ddi_dma_set_sbus64(buf_info->dma_handle, 7006 ret_burst) == DDI_FAILURE) { 7007 EMLXS_MSGF(EMLXS_CONTEXT, 7008 &emlxs_mem_alloc_failed_msg, 7009 "ddi_dma_set_sbus64 failed."); 7010 } 7011 } 7012 7013 /* Save Physical address */ 7014 buf_info->phys = dma_cookie.dmac_laddress; 7015 7016 /* 7017 * Just to be sure, let's add this 7018 */ 7019 emlxs_mpdata_sync((ddi_dma_handle_t)buf_info->dma_handle, 7020 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV); 7021 7022 break; 7023 7024 case FC_MBUF_DMA: /* allocate and map DMA mem */ 7025 7026 dma_attr.dma_attr_align = buf_info->align; 7027 7028 /* 7029 * Allocate the DMA handle for this DMA object 7030 */ 7031 status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr, 7032 DDI_DMA_DONTWAIT, NULL, 7033 (ddi_dma_handle_t *)&buf_info->dma_handle); 7034 if (status != DDI_SUCCESS) { 7035 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7036 "ddi_dma_alloc_handle failed: size=%x align=%x " 7037 "flags=%x", buf_info->size, buf_info->align, 7038 buf_info->flags); 7039 7040 buf_info->virt = 0; 7041 buf_info->phys = 0; 7042 buf_info->data_handle = 0; 7043 buf_info->dma_handle = 0; 7044 break; 7045 } 7046 7047 status = ddi_dma_mem_alloc( 7048 (ddi_dma_handle_t)buf_info->dma_handle, 7049 (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT, 7050 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt, 7051 &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle); 7052 7053 if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) { 7054 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7055 "ddi_dma_mem_alloc failed: size=%x align=%x " 7056 "flags=%x", buf_info->size, buf_info->align, 7057 buf_info->flags); 7058 7059 (void) ddi_dma_free_handle( 7060 (ddi_dma_handle_t *)&buf_info->dma_handle); 7061 7062 buf_info->virt = 0; 7063 buf_info->phys = 0; 7064 buf_info->data_handle = 0; 7065 buf_info->dma_handle = 0; 7066 break; 7067 } 7068 7069 /* Map this page of memory */ 7070 status = ddi_dma_addr_bind_handle( 7071 (ddi_dma_handle_t)buf_info->dma_handle, NULL, 7072 (caddr_t)buf_info->virt, (size_t)buf_info->size, 7073 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 7074 &dma_cookie, &cookie_count); 7075 7076 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) { 7077 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7078 "ddi_dma_addr_bind_handle failed: status=%x " 7079 "count=%d size=%x align=%x flags=%x", status, 7080 cookie_count, buf_info->size, buf_info->align, 7081 buf_info->flags); 7082 7083 (void) ddi_dma_mem_free( 7084 (ddi_acc_handle_t *)&buf_info->data_handle); 7085 (void) ddi_dma_free_handle( 7086 (ddi_dma_handle_t *)&buf_info->dma_handle); 7087 7088 buf_info->virt = 0; 7089 buf_info->phys = 0; 7090 buf_info->dma_handle = 0; 7091 buf_info->data_handle = 0; 7092 break; 7093 } 7094 7095 if (hba->bus_type == SBUS_FC) { 7096 int32_t burstsizes_limit = 0xff; 7097 int32_t ret_burst; 7098 7099 ret_burst = 7100 ddi_dma_burstsizes(buf_info-> 7101 dma_handle) & burstsizes_limit; 7102 if (ddi_dma_set_sbus64(buf_info->dma_handle, 7103 ret_burst) == DDI_FAILURE) { 7104 EMLXS_MSGF(EMLXS_CONTEXT, 7105 &emlxs_mem_alloc_failed_msg, 7106 "ddi_dma_set_sbus64 failed."); 7107 } 7108 } 7109 7110 /* Save Physical address */ 7111 buf_info->phys = dma_cookie.dmac_laddress; 7112 7113 /* Just to be sure, let's add this */ 7114 emlxs_mpdata_sync((ddi_dma_handle_t)buf_info->dma_handle, 7115 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV); 7116 7117 break; 7118 } /* End of switch */ 7119 7120 return ((uint8_t *)buf_info->virt); 7121 7122 } /* emlxs_mem_alloc() */ 7123 7124 7125 7126 /* 7127 * emlxs_mem_free: 7128 * 7129 * OS specific routine for memory de-allocation / unmapping 7130 * 7131 * The buf_info->flags field describes the memory operation requested. 7132 * 7133 * FC_MBUF_PHYSONLY set requests a supplied virtual address be unmapped 7134 * for DMA, but not freed. The mapped physical address to be unmapped is in 7135 * buf_info->phys 7136 * 7137 * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only 7138 * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in 7139 * buf_info->phys. The virtual address to be freed is in buf_info->virt 7140 */ 7141 /*ARGSUSED*/ 7142 extern void 7143 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info) 7144 { 7145 buf_info->flags &= ~(FC_MBUF_UNLOCK | FC_MBUF_IOCTL); 7146 7147 switch (buf_info->flags) { 7148 case 0: /* free host memory */ 7149 7150 if (buf_info->virt) { 7151 kmem_free(buf_info->virt, (size_t)buf_info->size); 7152 buf_info->virt = NULL; 7153 } 7154 7155 break; 7156 7157 case FC_MBUF_PHYSONLY: 7158 case FC_MBUF_DMA | FC_MBUF_PHYSONLY: /* nothing to do */ 7159 7160 if (buf_info->dma_handle) { 7161 (void) ddi_dma_unbind_handle(buf_info->dma_handle); 7162 (void) ddi_dma_free_handle( 7163 (ddi_dma_handle_t *)&buf_info->dma_handle); 7164 buf_info->dma_handle = NULL; 7165 } 7166 7167 break; 7168 7169 case FC_MBUF_DMA: /* unmap free DMA-able memory */ 7170 7171 7172 if (buf_info->dma_handle) { 7173 (void) ddi_dma_unbind_handle(buf_info->dma_handle); 7174 (void) ddi_dma_mem_free( 7175 (ddi_acc_handle_t *)&buf_info->data_handle); 7176 (void) ddi_dma_free_handle( 7177 (ddi_dma_handle_t *)&buf_info->dma_handle); 7178 buf_info->dma_handle = NULL; 7179 buf_info->data_handle = NULL; 7180 } 7181 7182 break; 7183 } 7184 7185 } /* emlxs_mem_free() */ 7186 7187 7188 static int32_t 7189 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp) 7190 { 7191 emlxs_hba_t *hba = HBA; 7192 fc_packet_t *pkt; 7193 IOCBQ *iocbq; 7194 IOCB *iocb; 7195 RING *rp; 7196 NODELIST *ndlp; 7197 char *cmd; 7198 uint16_t lun; 7199 FCP_CMND *fcp_cmd; 7200 uint32_t did; 7201 7202 pkt = PRIV2PKT(sbp); 7203 fcp_cmd = (FCP_CMND *)pkt->pkt_cmd; 7204 rp = &hba->ring[FC_FCP_RING]; 7205 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 7206 7207 /* Find target node object */ 7208 ndlp = emlxs_node_find_did(port, did); 7209 7210 if (!ndlp || !ndlp->nlp_active) { 7211 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7212 "Node not found. did=%x", did); 7213 7214 return (FC_BADPACKET); 7215 } 7216 7217 /* If gate is closed */ 7218 if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) { 7219 return (FC_TRAN_BUSY); 7220 } 7221 7222 #ifdef SAN_DIAG_SUPPORT 7223 sbp->sd_start_time = gethrtime(); 7224 #endif /* SAN_DIAG_SUPPORT */ 7225 7226 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 7227 emlxs_swap_fcp_pkt(sbp); 7228 #endif /* EMLXS_MODREV2X */ 7229 7230 if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) { 7231 fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE; 7232 } 7233 7234 iocbq = &sbp->iocbq; 7235 iocb = &iocbq->iocb; 7236 7237 iocbq->node = (void *)ndlp; 7238 if (emlxs_sli_prep_fcp_iocb(port, sbp) != FC_SUCCESS) { 7239 return (FC_TRAN_BUSY); 7240 } 7241 7242 /* Snoop for target or lun resets */ 7243 cmd = (char *)pkt->pkt_cmd; 7244 lun = *((uint16_t *)cmd); 7245 lun = SWAP_DATA16(lun); 7246 7247 /* Check for target reset */ 7248 if (cmd[10] & 0x20) { 7249 mutex_enter(&sbp->mtx); 7250 sbp->pkt_flags |= PACKET_FCP_TGT_RESET; 7251 sbp->pkt_flags |= PACKET_POLLED; 7252 mutex_exit(&sbp->mtx); 7253 7254 #ifdef SAN_DIAG_SUPPORT 7255 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET, 7256 (HBA_WWN *)&ndlp->nlp_portname, -1); 7257 #endif 7258 7259 iocbq->flag |= IOCB_PRIORITY; 7260 7261 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7262 "Target Reset: did=%x", did); 7263 7264 /* Close the node for any further normal IO */ 7265 emlxs_node_close(port, ndlp, FC_FCP_RING, pkt->pkt_timeout); 7266 7267 /* Flush the IO's on the tx queues */ 7268 (void) emlxs_tx_node_flush(port, ndlp, rp, 0, sbp); 7269 } 7270 7271 /* Check for lun reset */ 7272 else if (cmd[10] & 0x10) { 7273 mutex_enter(&sbp->mtx); 7274 sbp->pkt_flags |= PACKET_FCP_LUN_RESET; 7275 sbp->pkt_flags |= PACKET_POLLED; 7276 mutex_exit(&sbp->mtx); 7277 7278 #ifdef SAN_DIAG_SUPPORT 7279 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET, 7280 (HBA_WWN *)&ndlp->nlp_portname, lun); 7281 #endif 7282 7283 iocbq->flag |= IOCB_PRIORITY; 7284 7285 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7286 "LUN Reset: did=%x LUN=%02x02x", did, cmd[0], cmd[1]); 7287 7288 /* Flush the IO's on the tx queues for this lun */ 7289 (void) emlxs_tx_lun_flush(port, ndlp, lun, sbp); 7290 } 7291 7292 /* Initalize sbp */ 7293 mutex_enter(&sbp->mtx); 7294 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7295 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7296 sbp->node = (void *)ndlp; 7297 sbp->lun = lun; 7298 sbp->class = iocb->ulpClass; 7299 sbp->did = ndlp->nlp_DID; 7300 mutex_exit(&sbp->mtx); 7301 7302 if (pkt->pkt_cmdlen) { 7303 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7304 DDI_DMA_SYNC_FORDEV); 7305 } 7306 7307 if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) { 7308 emlxs_mpdata_sync(pkt->pkt_data_dma, 0, pkt->pkt_datalen, 7309 DDI_DMA_SYNC_FORDEV); 7310 } 7311 7312 HBASTATS.FcpIssued++; 7313 7314 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_FCP_RING], iocbq); 7315 7316 return (FC_SUCCESS); 7317 7318 } /* emlxs_send_fcp_cmd() */ 7319 7320 7321 #ifdef SFCT_SUPPORT 7322 static int32_t 7323 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp) 7324 { 7325 emlxs_hba_t *hba = HBA; 7326 fc_packet_t *pkt; 7327 IOCBQ *iocbq; 7328 IOCB *iocb; 7329 NODELIST *ndlp; 7330 uint16_t iotag; 7331 uint32_t did; 7332 ddi_dma_cookie_t *cp_cmd; 7333 7334 pkt = PRIV2PKT(sbp); 7335 7336 did = sbp->did; 7337 ndlp = sbp->node; 7338 7339 iocbq = &sbp->iocbq; 7340 iocb = &iocbq->iocb; 7341 7342 /* Make sure node is still active */ 7343 if (!ndlp->nlp_active) { 7344 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7345 "*Node not found. did=%x", did); 7346 7347 return (FC_BADPACKET); 7348 } 7349 7350 /* If gate is closed */ 7351 if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) { 7352 return (FC_TRAN_BUSY); 7353 } 7354 7355 /* Get the iotag by registering the packet */ 7356 iotag = emlxs_register_pkt(sbp->ring, sbp); 7357 7358 if (!iotag) { 7359 /* No more command slots available, retry later */ 7360 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7361 "*Adapter Busy. Unable to allocate iotag: did=0x%x", did); 7362 7363 return (FC_TRAN_BUSY); 7364 } 7365 7366 /* Point of no return */ 7367 7368 #if (EMLXS_MODREV >= EMLXS_MODREV3) 7369 cp_cmd = pkt->pkt_cmd_cookie; 7370 #else 7371 cp_cmd = &pkt->pkt_cmd_cookie; 7372 #endif /* >= EMLXS_MODREV3 */ 7373 7374 iocb->un.fcpt64.bdl.addrHigh = putPaddrHigh(cp_cmd->dmac_laddress); 7375 iocb->un.fcpt64.bdl.addrLow = putPaddrLow(cp_cmd->dmac_laddress); 7376 iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen; 7377 iocb->un.fcpt64.bdl.bdeFlags = 0; 7378 7379 if (hba->sli_mode < 3) { 7380 iocb->ulpBdeCount = 1; 7381 iocb->ulpLe = 1; 7382 } else { /* SLI3 */ 7383 7384 iocb->ulpBdeCount = 0; 7385 iocb->ulpLe = 0; 7386 iocb->unsli3.ext_iocb.ebde_count = 0; 7387 } 7388 7389 /* Initalize iocbq */ 7390 iocbq->port = (void *)port; 7391 iocbq->node = (void *)ndlp; 7392 iocbq->ring = (void *)sbp->ring; 7393 7394 /* Initalize iocb */ 7395 iocb->ulpContext = (uint16_t)pkt->pkt_cmd_fhdr.rx_id; 7396 iocb->ulpIoTag = iotag; 7397 iocb->ulpRsvdByte = 7398 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 7399 iocb->ulpOwner = OWN_CHIP; 7400 iocb->ulpClass = sbp->class; 7401 iocb->ulpCommand = CMD_FCP_TRSP64_CX; 7402 7403 /* Set the pkt timer */ 7404 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7405 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7406 7407 if (pkt->pkt_cmdlen) { 7408 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7409 DDI_DMA_SYNC_FORDEV); 7410 } 7411 7412 HBASTATS.FcpIssued++; 7413 7414 emlxs_sli_issue_iocb_cmd(hba, sbp->ring, iocbq); 7415 7416 return (FC_SUCCESS); 7417 7418 } /* emlxs_send_fct_status() */ 7419 7420 7421 static int32_t 7422 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp) 7423 { 7424 emlxs_hba_t *hba = HBA; 7425 fc_packet_t *pkt; 7426 IOCBQ *iocbq; 7427 IOCB *iocb; 7428 NODELIST *ndlp; 7429 uint16_t iotag; 7430 uint32_t did; 7431 7432 pkt = PRIV2PKT(sbp); 7433 7434 did = sbp->did; 7435 ndlp = sbp->node; 7436 7437 7438 iocbq = &sbp->iocbq; 7439 iocb = &iocbq->iocb; 7440 7441 /* Make sure node is still active */ 7442 if ((ndlp == NULL) || (!ndlp->nlp_active)) { 7443 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7444 "*Node not found. did=%x", did); 7445 7446 return (FC_BADPACKET); 7447 } 7448 7449 /* If gate is closed */ 7450 if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) { 7451 return (FC_TRAN_BUSY); 7452 } 7453 7454 /* Get the iotag by registering the packet */ 7455 iotag = emlxs_register_pkt(sbp->ring, sbp); 7456 7457 if (!iotag) { 7458 /* No more command slots available, retry later */ 7459 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7460 "*Adapter Busy. Unable to allocate iotag: did=0x%x", did); 7461 7462 return (FC_TRAN_BUSY); 7463 } 7464 7465 /* Point of no return */ 7466 iocbq->port = (void *)port; 7467 iocbq->node = (void *)ndlp; 7468 iocbq->ring = (void *)sbp->ring; 7469 /* 7470 * Don't give the abort priority, we want the IOCB 7471 * we are aborting to be processed first. 7472 */ 7473 iocbq->flag |= IOCB_SPECIAL; 7474 7475 iocb->ulpContext = pkt->pkt_cmd_fhdr.rx_id; 7476 iocb->ulpIoTag = iotag; 7477 iocb->ulpLe = 1; 7478 iocb->ulpClass = sbp->class; 7479 iocb->ulpOwner = OWN_CHIP; 7480 7481 if (hba->state >= FC_LINK_UP) { 7482 /* Create the abort IOCB */ 7483 iocb->un.acxri.abortType = ABORT_TYPE_ABTS; 7484 iocb->ulpCommand = CMD_ABORT_XRI_CX; 7485 7486 } else { 7487 /* Create the close IOCB */ 7488 iocb->ulpCommand = CMD_CLOSE_XRI_CX; 7489 7490 } 7491 7492 iocb->ulpRsvdByte = 7493 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 7494 /* Set the pkt timer */ 7495 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7496 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7497 7498 emlxs_sli_issue_iocb_cmd(hba, sbp->ring, iocbq); 7499 7500 return (FC_SUCCESS); 7501 7502 } /* emlxs_send_fct_abort() */ 7503 7504 #endif /* SFCT_SUPPORT */ 7505 7506 7507 static int32_t 7508 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp) 7509 { 7510 emlxs_hba_t *hba = HBA; 7511 fc_packet_t *pkt; 7512 IOCBQ *iocbq; 7513 IOCB *iocb; 7514 RING *rp; 7515 uint32_t i; 7516 NODELIST *ndlp; 7517 uint32_t did; 7518 7519 pkt = PRIV2PKT(sbp); 7520 rp = &hba->ring[FC_IP_RING]; 7521 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 7522 7523 /* Check if node exists */ 7524 /* Broadcast did is always a success */ 7525 ndlp = emlxs_node_find_did(port, did); 7526 7527 if (!ndlp || !ndlp->nlp_active) { 7528 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7529 "Node not found. did=0x%x", did); 7530 7531 return (FC_BADPACKET); 7532 } 7533 7534 /* Check if gate is temporarily closed */ 7535 if (ndlp->nlp_flag[FC_IP_RING] & NLP_CLOSED) { 7536 return (FC_TRAN_BUSY); 7537 } 7538 7539 /* Check if an exchange has been created */ 7540 if ((ndlp->nlp_Xri == 0) && (did != Bcast_DID)) { 7541 /* No exchange. Try creating one */ 7542 (void) emlxs_create_xri(port, rp, ndlp); 7543 7544 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7545 "Adapter Busy. Exchange not found. did=0x%x", did); 7546 7547 return (FC_TRAN_BUSY); 7548 } 7549 7550 /* ULP PATCH: pkt_cmdlen was found to be set to zero */ 7551 /* on BROADCAST commands */ 7552 if (pkt->pkt_cmdlen == 0) { 7553 /* Set the pkt_cmdlen to the cookie size */ 7554 #if (EMLXS_MODREV >= EMLXS_MODREV3) 7555 for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) { 7556 pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size; 7557 } 7558 #else 7559 pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size; 7560 #endif /* >= EMLXS_MODREV3 */ 7561 7562 } 7563 7564 iocbq = &sbp->iocbq; 7565 iocb = &iocbq->iocb; 7566 7567 iocbq->node = (void *)ndlp; 7568 if (emlxs_sli_prep_ip_iocb(port, sbp) != FC_SUCCESS) { 7569 return (FC_TRAN_BUSY); 7570 } 7571 7572 /* Initalize sbp */ 7573 mutex_enter(&sbp->mtx); 7574 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7575 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7576 sbp->node = (void *)ndlp; 7577 sbp->lun = 0; 7578 sbp->class = iocb->ulpClass; 7579 sbp->did = did; 7580 mutex_exit(&sbp->mtx); 7581 7582 if (pkt->pkt_cmdlen) { 7583 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7584 DDI_DMA_SYNC_FORDEV); 7585 } 7586 7587 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_IP_RING], iocbq); 7588 7589 return (FC_SUCCESS); 7590 7591 } /* emlxs_send_ip() */ 7592 7593 7594 static int32_t 7595 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp) 7596 { 7597 emlxs_hba_t *hba = HBA; 7598 emlxs_port_t *vport; 7599 fc_packet_t *pkt; 7600 IOCBQ *iocbq; 7601 IOCB *iocb; 7602 uint32_t cmd; 7603 int i; 7604 ELS_PKT *els_pkt; 7605 NODELIST *ndlp; 7606 uint32_t did; 7607 char fcsp_msg[32]; 7608 7609 fcsp_msg[0] = 0; 7610 pkt = PRIV2PKT(sbp); 7611 els_pkt = (ELS_PKT *)pkt->pkt_cmd; 7612 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 7613 7614 iocbq = &sbp->iocbq; 7615 iocb = &iocbq->iocb; 7616 7617 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 7618 emlxs_swap_els_pkt(sbp); 7619 #endif /* EMLXS_MODREV2X */ 7620 7621 cmd = *((uint32_t *)pkt->pkt_cmd); 7622 cmd &= ELS_CMD_MASK; 7623 7624 /* Point of no return, except for ADISC & PLOGI */ 7625 7626 /* Check node */ 7627 switch (cmd) { 7628 case ELS_CMD_FLOGI: 7629 if (port->vpi > 0) { 7630 cmd = ELS_CMD_FDISC; 7631 *((uint32_t *)pkt->pkt_cmd) = cmd; 7632 } 7633 ndlp = NULL; 7634 7635 if (hba->flag & FC_NPIV_DELAY_REQUIRED) { 7636 sbp->pkt_flags |= PACKET_DELAY_REQUIRED; 7637 } 7638 7639 /* We will process these cmds at the bottom of this routine */ 7640 break; 7641 7642 case ELS_CMD_PLOGI: 7643 /* Make sure we don't log into ourself */ 7644 for (i = 0; i < MAX_VPORTS; i++) { 7645 vport = &VPORT(i); 7646 7647 if (!(vport->flag & EMLXS_PORT_BOUND)) { 7648 continue; 7649 } 7650 7651 if (did == vport->did) { 7652 pkt->pkt_state = FC_PKT_NPORT_RJT; 7653 7654 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 7655 emlxs_unswap_pkt(sbp); 7656 #endif /* EMLXS_MODREV2X */ 7657 7658 return (FC_FAILURE); 7659 } 7660 } 7661 7662 ndlp = NULL; 7663 7664 /* Check if this is the first PLOGI */ 7665 /* after a PT_TO_PT connection */ 7666 if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) { 7667 MAILBOXQ *mbox; 7668 7669 /* ULP bug fix */ 7670 if (pkt->pkt_cmd_fhdr.s_id == 0) { 7671 pkt->pkt_cmd_fhdr.s_id = 7672 pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID + 7673 FP_DEFAULT_SID; 7674 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, 7675 "PLOGI: P2P Fix. sid=0-->%x did=%x", 7676 pkt->pkt_cmd_fhdr.s_id, 7677 pkt->pkt_cmd_fhdr.d_id); 7678 } 7679 7680 mutex_enter(&EMLXS_PORT_LOCK); 7681 port->did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.s_id); 7682 mutex_exit(&EMLXS_PORT_LOCK); 7683 7684 /* Update our service parms */ 7685 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 7686 MEM_MBOX | MEM_PRI))) { 7687 emlxs_mb_config_link(hba, (MAILBOX *) mbox); 7688 7689 if (emlxs_sli_issue_mbox_cmd(hba, 7690 (MAILBOX *)mbox, MBX_NOWAIT, 0) 7691 != MBX_BUSY) { 7692 (void) emlxs_mem_put(hba, MEM_MBOX, 7693 (uint8_t *)mbox); 7694 } 7695 7696 } 7697 } 7698 7699 /* We will process these cmds at the bottom of this routine */ 7700 break; 7701 7702 default: 7703 ndlp = emlxs_node_find_did(port, did); 7704 7705 /* If an ADISC is being sent and we have no node, */ 7706 /* then we must fail the ADISC now */ 7707 if (!ndlp && (cmd == ELS_CMD_ADISC) && !port->tgt_mode) { 7708 7709 /* Build the LS_RJT response */ 7710 els_pkt = (ELS_PKT *)pkt->pkt_resp; 7711 els_pkt->elsCode = 0x01; 7712 els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0; 7713 els_pkt->un.lsRjt.un.b.lsRjtRsnCode = 7714 LSRJT_LOGICAL_ERR; 7715 els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp = 7716 LSEXP_NOTHING_MORE; 7717 els_pkt->un.lsRjt.un.b.vendorUnique = 0x03; 7718 7719 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7720 "ADISC Rejected. Node not found. did=0x%x", did); 7721 7722 /* Return this as rejected by the target */ 7723 emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1); 7724 7725 return (FC_SUCCESS); 7726 } 7727 } 7728 7729 /* DID == Bcast_DID is special case to indicate that */ 7730 /* RPI is being passed in seq_id field */ 7731 /* This is used by emlxs_send_logo() for target mode */ 7732 7733 /* Initalize iocbq */ 7734 iocbq->node = (void *)ndlp; 7735 if (emlxs_sli_prep_els_iocb(port, sbp) != FC_SUCCESS) { 7736 return (FC_TRAN_BUSY); 7737 } 7738 7739 /* Check cmd */ 7740 switch (cmd) { 7741 case ELS_CMD_PRLI: 7742 { 7743 /* 7744 * if our firmware version is 3.20 or later, 7745 * set the following bits for FC-TAPE support. 7746 */ 7747 7748 if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) { 7749 els_pkt->un.prli.ConfmComplAllowed = 1; 7750 els_pkt->un.prli.Retry = 1; 7751 els_pkt->un.prli.TaskRetryIdReq = 1; 7752 } else { 7753 els_pkt->un.prli.ConfmComplAllowed = 0; 7754 els_pkt->un.prli.Retry = 0; 7755 els_pkt->un.prli.TaskRetryIdReq = 0; 7756 } 7757 7758 break; 7759 } 7760 7761 /* This is a patch for the ULP stack. */ 7762 7763 /* 7764 * ULP only reads our service paramters once during bind_port, 7765 * but the service parameters change due to topology. 7766 */ 7767 case ELS_CMD_FLOGI: 7768 case ELS_CMD_FDISC: 7769 case ELS_CMD_PLOGI: 7770 case ELS_CMD_PDISC: 7771 { 7772 /* Copy latest service parameters to payload */ 7773 bcopy((void *) &port->sparam, 7774 (void *)&els_pkt->un.logi, sizeof (SERV_PARM)); 7775 7776 #ifdef NPIV_SUPPORT 7777 if ((hba->flag & FC_NPIV_ENABLED) && 7778 (hba->flag & FC_NPIV_SUPPORTED) && 7779 (cmd == ELS_CMD_PLOGI)) { 7780 SERV_PARM *sp; 7781 emlxs_vvl_fmt_t *vvl; 7782 7783 sp = (SERV_PARM *)&els_pkt->un.logi; 7784 sp->valid_vendor_version = 1; 7785 vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0]; 7786 vvl->un0.w0.oui = 0x0000C9; 7787 vvl->un0.word0 = SWAP_DATA32(vvl->un0.word0); 7788 vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0; 7789 vvl->un1.word1 = SWAP_DATA32(vvl->un1.word1); 7790 } 7791 #endif /* NPIV_SUPPORT */ 7792 7793 #ifdef DHCHAP_SUPPORT 7794 emlxs_dhc_init_sp(port, did, 7795 (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg); 7796 #endif /* DHCHAP_SUPPORT */ 7797 7798 break; 7799 } 7800 7801 } 7802 7803 /* Initialize the sbp */ 7804 mutex_enter(&sbp->mtx); 7805 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7806 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7807 sbp->node = (void *)ndlp; 7808 sbp->lun = 0; 7809 sbp->class = iocb->ulpClass; 7810 sbp->did = did; 7811 mutex_exit(&sbp->mtx); 7812 7813 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s", 7814 emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg); 7815 7816 if (pkt->pkt_cmdlen) { 7817 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7818 DDI_DMA_SYNC_FORDEV); 7819 } 7820 7821 /* Check node */ 7822 switch (cmd) { 7823 case ELS_CMD_FLOGI: 7824 if (port->ini_mode) { 7825 /* Make sure fabric node is destroyed */ 7826 /* It should already have been destroyed at link down */ 7827 /* Unregister the fabric did and attempt a deferred */ 7828 /* iocb send */ 7829 if (emlxs_mb_unreg_did(port, Fabric_DID, NULL, NULL, 7830 iocbq) == 0) { 7831 /* Deferring iocb tx until */ 7832 /* completion of unreg */ 7833 return (FC_SUCCESS); 7834 } 7835 } 7836 break; 7837 7838 case ELS_CMD_PLOGI: 7839 7840 ndlp = emlxs_node_find_did(port, did); 7841 7842 if (ndlp && ndlp->nlp_active) { 7843 /* Close the node for any further normal IO */ 7844 emlxs_node_close(port, ndlp, FC_FCP_RING, 7845 pkt->pkt_timeout + 10); 7846 emlxs_node_close(port, ndlp, FC_IP_RING, 7847 pkt->pkt_timeout + 10); 7848 7849 /* Flush tx queues */ 7850 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 7851 7852 /* Flush chip queues */ 7853 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 7854 } 7855 7856 break; 7857 7858 case ELS_CMD_PRLI: 7859 7860 ndlp = emlxs_node_find_did(port, did); 7861 7862 if (ndlp && ndlp->nlp_active) { 7863 /* Close the node for any further FCP IO */ 7864 emlxs_node_close(port, ndlp, FC_FCP_RING, 7865 pkt->pkt_timeout + 10); 7866 7867 /* Flush tx queues */ 7868 (void) emlxs_tx_node_flush(port, ndlp, 7869 &hba->ring[FC_FCP_RING], 0, 0); 7870 7871 /* Flush chip queues */ 7872 (void) emlxs_chipq_node_flush(port, 7873 &hba->ring[FC_FCP_RING], ndlp, 0); 7874 } 7875 7876 break; 7877 7878 } 7879 7880 HBASTATS.ElsCmdIssued++; 7881 7882 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_ELS_RING], iocbq); 7883 7884 return (FC_SUCCESS); 7885 7886 } /* emlxs_send_els() */ 7887 7888 7889 7890 7891 static int32_t 7892 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp) 7893 { 7894 emlxs_hba_t *hba = HBA; 7895 fc_packet_t *pkt; 7896 IOCBQ *iocbq; 7897 IOCB *iocb; 7898 NODELIST *ndlp; 7899 int i; 7900 uint32_t cmd; 7901 uint32_t ucmd; 7902 ELS_PKT *els_pkt; 7903 fc_unsol_buf_t *ubp; 7904 emlxs_ub_priv_t *ub_priv; 7905 uint32_t did; 7906 char fcsp_msg[32]; 7907 uint8_t *ub_buffer; 7908 7909 fcsp_msg[0] = 0; 7910 pkt = PRIV2PKT(sbp); 7911 els_pkt = (ELS_PKT *)pkt->pkt_cmd; 7912 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 7913 7914 iocbq = &sbp->iocbq; 7915 iocb = &iocbq->iocb; 7916 7917 /* Acquire the unsolicited command this pkt is replying to */ 7918 if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) { 7919 /* This is for auto replies when no ub's are used */ 7920 ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT; 7921 ubp = NULL; 7922 ub_priv = NULL; 7923 ub_buffer = NULL; 7924 7925 #ifdef SFCT_SUPPORT 7926 if (sbp->fct_cmd) { 7927 fct_els_t *els = 7928 (fct_els_t *)sbp->fct_cmd->cmd_specific; 7929 ub_buffer = (uint8_t *)els->els_req_payload; 7930 } 7931 #endif /* SFCT_SUPPORT */ 7932 7933 } else { 7934 /* Find the ub buffer that goes with this reply */ 7935 if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) { 7936 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 7937 "ELS reply: Invalid oxid=%x", 7938 pkt->pkt_cmd_fhdr.ox_id); 7939 return (FC_BADPACKET); 7940 } 7941 7942 ub_buffer = (uint8_t *)ubp->ub_buffer; 7943 ub_priv = ubp->ub_fca_private; 7944 ucmd = ub_priv->cmd; 7945 7946 ub_priv->flags |= EMLXS_UB_REPLY; 7947 7948 /* Reset oxid to ELS command */ 7949 /* We do this because the ub is only valid */ 7950 /* until we return from this thread */ 7951 pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff; 7952 } 7953 7954 /* Save the result */ 7955 sbp->ucmd = ucmd; 7956 7957 /* Check for interceptions */ 7958 switch (ucmd) { 7959 7960 #ifdef ULP_PATCH2 7961 case ELS_CMD_LOGO: 7962 { 7963 /* Check if this was generated by ULP and not us */ 7964 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 7965 7966 /* 7967 * Since we replied to this already, 7968 * we won't need to send this now 7969 */ 7970 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 7971 7972 return (FC_SUCCESS); 7973 } 7974 7975 break; 7976 } 7977 #endif 7978 7979 #ifdef ULP_PATCH3 7980 case ELS_CMD_PRLI: 7981 { 7982 /* Check if this was generated by ULP and not us */ 7983 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 7984 7985 /* 7986 * Since we replied to this already, 7987 * we won't need to send this now 7988 */ 7989 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 7990 7991 return (FC_SUCCESS); 7992 } 7993 7994 break; 7995 } 7996 #endif 7997 7998 7999 #ifdef ULP_PATCH4 8000 case ELS_CMD_PRLO: 8001 { 8002 /* Check if this was generated by ULP and not us */ 8003 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 8004 /* 8005 * Since we replied to this already, 8006 * we won't need to send this now 8007 */ 8008 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8009 8010 return (FC_SUCCESS); 8011 } 8012 8013 break; 8014 } 8015 #endif 8016 8017 #ifdef ULP_PATCH6 8018 case ELS_CMD_RSCN: 8019 { 8020 /* Check if this RSCN was generated by us */ 8021 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) { 8022 cmd = *((uint32_t *)pkt->pkt_cmd); 8023 cmd = SWAP_DATA32(cmd); 8024 cmd &= ELS_CMD_MASK; 8025 8026 /* 8027 * If ULP is accepting this, 8028 * then close affected node 8029 */ 8030 if (port->ini_mode && ub_buffer && cmd 8031 == ELS_CMD_ACC) { 8032 fc_rscn_t *rscn; 8033 uint32_t count; 8034 uint32_t *lp; 8035 8036 /* 8037 * Only the Leadville code path will 8038 * come thru here. The RSCN data is NOT 8039 * swapped properly for the Comstar code 8040 * path. 8041 */ 8042 lp = (uint32_t *)ub_buffer; 8043 rscn = (fc_rscn_t *)lp++; 8044 count = 8045 ((rscn->rscn_payload_len - 4) / 4); 8046 8047 /* Close affected ports */ 8048 for (i = 0; i < count; i++, lp++) { 8049 (void) emlxs_port_offline(port, 8050 *lp); 8051 } 8052 } 8053 8054 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8055 "RSCN %s: did=%x oxid=%x rxid=%x. " 8056 "Intercepted.", emlxs_elscmd_xlate(cmd), 8057 did, pkt->pkt_cmd_fhdr.ox_id, 8058 pkt->pkt_cmd_fhdr.rx_id); 8059 8060 /* 8061 * Since we generated this RSCN, 8062 * we won't need to send this reply 8063 */ 8064 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8065 8066 return (FC_SUCCESS); 8067 } 8068 8069 break; 8070 } 8071 #endif 8072 8073 case ELS_CMD_PLOGI: 8074 { 8075 /* Check if this PLOGI was generated by us */ 8076 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) { 8077 cmd = *((uint32_t *)pkt->pkt_cmd); 8078 cmd = SWAP_DATA32(cmd); 8079 cmd &= ELS_CMD_MASK; 8080 8081 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8082 "PLOGI %s: did=%x oxid=%x rxid=%x. " 8083 "Intercepted.", emlxs_elscmd_xlate(cmd), 8084 did, pkt->pkt_cmd_fhdr.ox_id, 8085 pkt->pkt_cmd_fhdr.rx_id); 8086 8087 /* 8088 * Since we generated this PLOGI, 8089 * we won't need to send this reply 8090 */ 8091 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8092 8093 return (FC_SUCCESS); 8094 } 8095 8096 break; 8097 } 8098 8099 } 8100 8101 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8102 emlxs_swap_els_pkt(sbp); 8103 #endif /* EMLXS_MODREV2X */ 8104 8105 8106 cmd = *((uint32_t *)pkt->pkt_cmd); 8107 cmd &= ELS_CMD_MASK; 8108 8109 /* Check if modifications are needed */ 8110 switch (ucmd) { 8111 case (ELS_CMD_PRLI): 8112 8113 if (cmd == ELS_CMD_ACC) { 8114 /* This is a patch for the ULP stack. */ 8115 /* ULP does not keep track of FCP2 support */ 8116 8117 if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) { 8118 els_pkt->un.prli.ConfmComplAllowed = 1; 8119 els_pkt->un.prli.Retry = 1; 8120 els_pkt->un.prli.TaskRetryIdReq = 1; 8121 } else { 8122 els_pkt->un.prli.ConfmComplAllowed = 0; 8123 els_pkt->un.prli.Retry = 0; 8124 els_pkt->un.prli.TaskRetryIdReq = 0; 8125 } 8126 } 8127 8128 break; 8129 8130 case ELS_CMD_FLOGI: 8131 case ELS_CMD_PLOGI: 8132 case ELS_CMD_FDISC: 8133 case ELS_CMD_PDISC: 8134 8135 if (cmd == ELS_CMD_ACC) { 8136 /* This is a patch for the ULP stack. */ 8137 8138 /* 8139 * ULP only reads our service parameters 8140 * once during bind_port, but the service 8141 * parameters change due to topology. 8142 */ 8143 8144 /* Copy latest service parameters to payload */ 8145 bcopy((void *)&port->sparam, 8146 (void *)&els_pkt->un.logi, sizeof (SERV_PARM)); 8147 8148 #ifdef DHCHAP_SUPPORT 8149 emlxs_dhc_init_sp(port, did, 8150 (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg); 8151 #endif /* DHCHAP_SUPPORT */ 8152 8153 } 8154 8155 break; 8156 8157 } 8158 8159 /* Initalize iocbq */ 8160 iocbq->node = (void *)NULL; 8161 if (emlxs_sli_prep_els_iocb(port, sbp) != FC_SUCCESS) { 8162 return (FC_TRAN_BUSY); 8163 } 8164 8165 /* Initalize sbp */ 8166 mutex_enter(&sbp->mtx); 8167 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8168 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8169 sbp->node = (void *) NULL; 8170 sbp->lun = 0; 8171 sbp->class = iocb->ulpClass; 8172 sbp->did = did; 8173 mutex_exit(&sbp->mtx); 8174 8175 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8176 "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd), 8177 emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id, 8178 pkt->pkt_cmd_fhdr.rx_id, fcsp_msg); 8179 8180 /* Process nodes */ 8181 switch (ucmd) { 8182 case ELS_CMD_RSCN: 8183 { 8184 if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) { 8185 fc_rscn_t *rscn; 8186 uint32_t count; 8187 uint32_t *lp = NULL; 8188 8189 /* 8190 * Only the Leadville code path will come thru 8191 * here. The RSCN data is NOT swapped properly 8192 * for the Comstar code path. 8193 */ 8194 lp = (uint32_t *)ub_buffer; 8195 rscn = (fc_rscn_t *)lp++; 8196 count = ((rscn->rscn_payload_len - 4) / 4); 8197 8198 /* Close affected ports */ 8199 for (i = 0; i < count; i++, lp++) { 8200 (void) emlxs_port_offline(port, *lp); 8201 } 8202 } 8203 break; 8204 } 8205 case ELS_CMD_PLOGI: 8206 8207 if (cmd == ELS_CMD_ACC) { 8208 ndlp = emlxs_node_find_did(port, did); 8209 8210 if (ndlp && ndlp->nlp_active) { 8211 /* Close the node for any further normal IO */ 8212 emlxs_node_close(port, ndlp, FC_FCP_RING, 8213 pkt->pkt_timeout + 10); 8214 emlxs_node_close(port, ndlp, FC_IP_RING, 8215 pkt->pkt_timeout + 10); 8216 8217 /* Flush tx queue */ 8218 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 8219 8220 /* Flush chip queue */ 8221 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 8222 } 8223 } 8224 8225 break; 8226 8227 case ELS_CMD_PRLI: 8228 8229 if (cmd == ELS_CMD_ACC) { 8230 ndlp = emlxs_node_find_did(port, did); 8231 8232 if (ndlp && ndlp->nlp_active) { 8233 /* Close the node for any further normal IO */ 8234 emlxs_node_close(port, ndlp, FC_FCP_RING, 8235 pkt->pkt_timeout + 10); 8236 8237 /* Flush tx queues */ 8238 (void) emlxs_tx_node_flush(port, ndlp, 8239 &hba->ring[FC_FCP_RING], 0, 0); 8240 8241 /* Flush chip queues */ 8242 (void) emlxs_chipq_node_flush(port, 8243 &hba->ring[FC_FCP_RING], ndlp, 0); 8244 } 8245 } 8246 8247 break; 8248 8249 case ELS_CMD_PRLO: 8250 8251 if (cmd == ELS_CMD_ACC) { 8252 ndlp = emlxs_node_find_did(port, did); 8253 8254 if (ndlp && ndlp->nlp_active) { 8255 /* Close the node for any further normal IO */ 8256 emlxs_node_close(port, ndlp, FC_FCP_RING, 60); 8257 8258 /* Flush tx queues */ 8259 (void) emlxs_tx_node_flush(port, ndlp, 8260 &hba->ring[FC_FCP_RING], 0, 0); 8261 8262 /* Flush chip queues */ 8263 (void) emlxs_chipq_node_flush(port, 8264 &hba->ring[FC_FCP_RING], ndlp, 0); 8265 } 8266 } 8267 8268 break; 8269 8270 case ELS_CMD_LOGO: 8271 8272 if (cmd == ELS_CMD_ACC) { 8273 ndlp = emlxs_node_find_did(port, did); 8274 8275 if (ndlp && ndlp->nlp_active) { 8276 /* Close the node for any further normal IO */ 8277 emlxs_node_close(port, ndlp, FC_FCP_RING, 60); 8278 emlxs_node_close(port, ndlp, FC_IP_RING, 60); 8279 8280 /* Flush tx queues */ 8281 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 8282 8283 /* Flush chip queues */ 8284 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 8285 } 8286 } 8287 8288 break; 8289 } 8290 8291 if (pkt->pkt_cmdlen) { 8292 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8293 DDI_DMA_SYNC_FORDEV); 8294 } 8295 8296 HBASTATS.ElsRspIssued++; 8297 8298 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_ELS_RING], iocbq); 8299 8300 return (FC_SUCCESS); 8301 8302 } /* emlxs_send_els_rsp() */ 8303 8304 8305 #ifdef MENLO_SUPPORT 8306 static int32_t 8307 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp) 8308 { 8309 emlxs_hba_t *hba = HBA; 8310 fc_packet_t *pkt; 8311 IOCBQ *iocbq; 8312 IOCB *iocb; 8313 NODELIST *ndlp; 8314 uint32_t did; 8315 uint32_t *lp; 8316 8317 pkt = PRIV2PKT(sbp); 8318 did = EMLXS_MENLO_DID; 8319 lp = (uint32_t *)pkt->pkt_cmd; 8320 8321 iocbq = &sbp->iocbq; 8322 iocb = &iocbq->iocb; 8323 8324 ndlp = emlxs_node_find_did(port, did); 8325 8326 if (!ndlp || !ndlp->nlp_active) { 8327 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8328 "Node not found. did=0x%x", did); 8329 8330 return (FC_BADPACKET); 8331 } 8332 8333 iocbq->node = (void *) ndlp; 8334 if (emlxs_sli_prep_ct_iocb(port, sbp) != FC_SUCCESS) { 8335 return (FC_TRAN_BUSY); 8336 } 8337 8338 if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) { 8339 /* Cmd phase */ 8340 8341 /* Initalize iocb */ 8342 iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id; 8343 iocb->ulpContext = 0; 8344 iocb->ulpPU = 3; 8345 8346 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8347 "%s: [%08x,%08x,%08x,%08x]", 8348 emlxs_menlo_cmd_xlate(SWAP_LONG(lp[0])), SWAP_LONG(lp[1]), 8349 SWAP_LONG(lp[2]), SWAP_LONG(lp[3]), SWAP_LONG(lp[4])); 8350 8351 } else { /* FC_PKT_OUTBOUND */ 8352 8353 /* MENLO_CMD_FW_DOWNLOAD Data Phase */ 8354 iocb->ulpCommand = CMD_GEN_REQUEST64_CX; 8355 8356 /* Initalize iocb */ 8357 iocb->un.genreq64.param = 0; 8358 iocb->ulpContext = pkt->pkt_cmd_fhdr.rx_id; 8359 iocb->ulpPU = 1; 8360 8361 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8362 "%s: Data: rxid=0x%x size=%d", 8363 emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD), 8364 pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen); 8365 } 8366 8367 /* Initalize sbp */ 8368 mutex_enter(&sbp->mtx); 8369 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8370 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8371 sbp->node = (void *) ndlp; 8372 sbp->lun = 0; 8373 sbp->class = iocb->ulpClass; 8374 sbp->did = did; 8375 mutex_exit(&sbp->mtx); 8376 8377 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8378 DDI_DMA_SYNC_FORDEV); 8379 8380 HBASTATS.CtCmdIssued++; 8381 8382 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq); 8383 8384 return (FC_SUCCESS); 8385 8386 } /* emlxs_send_menlo() */ 8387 #endif /* MENLO_SUPPORT */ 8388 8389 8390 static int32_t 8391 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp) 8392 { 8393 emlxs_hba_t *hba = HBA; 8394 fc_packet_t *pkt; 8395 IOCBQ *iocbq; 8396 IOCB *iocb; 8397 NODELIST *ndlp; 8398 uint32_t did; 8399 8400 pkt = PRIV2PKT(sbp); 8401 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 8402 8403 iocbq = &sbp->iocbq; 8404 iocb = &iocbq->iocb; 8405 8406 ndlp = emlxs_node_find_did(port, did); 8407 8408 if (!ndlp || !ndlp->nlp_active) { 8409 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8410 "Node not found. did=0x%x", did); 8411 8412 return (FC_BADPACKET); 8413 } 8414 8415 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8416 emlxs_swap_ct_pkt(sbp); 8417 #endif /* EMLXS_MODREV2X */ 8418 8419 iocbq->node = (void *)ndlp; 8420 if (emlxs_sli_prep_ct_iocb(port, sbp) != FC_SUCCESS) { 8421 return (FC_TRAN_BUSY); 8422 } 8423 8424 /* Initalize sbp */ 8425 mutex_enter(&sbp->mtx); 8426 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8427 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8428 sbp->node = (void *)ndlp; 8429 sbp->lun = 0; 8430 sbp->class = iocb->ulpClass; 8431 sbp->did = did; 8432 mutex_exit(&sbp->mtx); 8433 8434 if (did == NameServer_DID) { 8435 SLI_CT_REQUEST *CtCmd; 8436 uint32_t *lp0; 8437 8438 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 8439 lp0 = (uint32_t *)pkt->pkt_cmd; 8440 8441 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8442 "%s: did=%x [%08x,%08x]", 8443 emlxs_ctcmd_xlate( 8444 SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), 8445 did, SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5])); 8446 8447 if (hba->flag & FC_NPIV_DELAY_REQUIRED) { 8448 sbp->pkt_flags |= PACKET_DELAY_REQUIRED; 8449 } 8450 8451 } else if (did == FDMI_DID) { 8452 SLI_CT_REQUEST *CtCmd; 8453 uint32_t *lp0; 8454 8455 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 8456 lp0 = (uint32_t *)pkt->pkt_cmd; 8457 8458 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8459 "%s: did=%x [%08x,%08x]", 8460 emlxs_mscmd_xlate( 8461 SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), 8462 did, SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5])); 8463 } else { 8464 SLI_CT_REQUEST *CtCmd; 8465 uint32_t *lp0; 8466 8467 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 8468 lp0 = (uint32_t *)pkt->pkt_cmd; 8469 8470 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8471 "%s: did=%x [%08x,%08x]", 8472 emlxs_rmcmd_xlate( 8473 SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), 8474 did, SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5])); 8475 } 8476 8477 if (pkt->pkt_cmdlen) { 8478 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8479 DDI_DMA_SYNC_FORDEV); 8480 } 8481 8482 HBASTATS.CtCmdIssued++; 8483 8484 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq); 8485 8486 return (FC_SUCCESS); 8487 8488 } /* emlxs_send_ct() */ 8489 8490 8491 static int32_t 8492 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp) 8493 { 8494 emlxs_hba_t *hba = HBA; 8495 fc_packet_t *pkt; 8496 IOCBQ *iocbq; 8497 IOCB *iocb; 8498 uint32_t did; 8499 uint32_t *cmd; 8500 SLI_CT_REQUEST *CtCmd; 8501 8502 pkt = PRIV2PKT(sbp); 8503 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 8504 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 8505 cmd = (uint32_t *)pkt->pkt_cmd; 8506 8507 iocbq = &sbp->iocbq; 8508 iocb = &iocbq->iocb; 8509 8510 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8511 emlxs_swap_ct_pkt(sbp); 8512 #endif /* EMLXS_MODREV2X */ 8513 8514 iocbq->node = (void *)NULL; 8515 if (emlxs_sli_prep_ct_iocb(port, sbp) != FC_SUCCESS) { 8516 return (FC_TRAN_BUSY); 8517 } 8518 8519 /* Initalize sbp */ 8520 mutex_enter(&sbp->mtx); 8521 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8522 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8523 sbp->node = NULL; 8524 sbp->lun = 0; 8525 sbp->class = iocb->ulpClass; 8526 sbp->did = did; 8527 mutex_exit(&sbp->mtx); 8528 8529 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg, 8530 "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ", 8531 emlxs_rmcmd_xlate(SWAP_DATA16( 8532 CtCmd->CommandResponse.bits.CmdRsp)), 8533 CtCmd->ReasonCode, CtCmd->Explanation, 8534 SWAP_DATA32(cmd[4]), SWAP_DATA32(cmd[5]), 8535 pkt->pkt_cmd_fhdr.rx_id); 8536 8537 if (pkt->pkt_cmdlen) { 8538 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8539 DDI_DMA_SYNC_FORDEV); 8540 } 8541 8542 HBASTATS.CtRspIssued++; 8543 8544 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq); 8545 8546 return (FC_SUCCESS); 8547 8548 } /* emlxs_send_ct_rsp() */ 8549 8550 8551 /* 8552 * emlxs_get_instance() 8553 * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst. 8554 */ 8555 extern uint32_t 8556 emlxs_get_instance(int32_t ddiinst) 8557 { 8558 uint32_t i; 8559 uint32_t inst; 8560 8561 mutex_enter(&emlxs_device.lock); 8562 8563 inst = MAX_FC_BRDS; 8564 for (i = 0; i < emlxs_instance_count; i++) { 8565 if (emlxs_instance[i] == ddiinst) { 8566 inst = i; 8567 break; 8568 } 8569 } 8570 8571 mutex_exit(&emlxs_device.lock); 8572 8573 return (inst); 8574 8575 } /* emlxs_get_instance() */ 8576 8577 8578 /* 8579 * emlxs_add_instance() 8580 * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst. 8581 * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0. 8582 */ 8583 static uint32_t 8584 emlxs_add_instance(int32_t ddiinst) 8585 { 8586 uint32_t i; 8587 8588 mutex_enter(&emlxs_device.lock); 8589 8590 /* First see if the ddiinst already exists */ 8591 for (i = 0; i < emlxs_instance_count; i++) { 8592 if (emlxs_instance[i] == ddiinst) { 8593 break; 8594 } 8595 } 8596 8597 /* If it doesn't already exist, add it */ 8598 if (i >= emlxs_instance_count) { 8599 if ((i = emlxs_instance_count) < MAX_FC_BRDS) { 8600 emlxs_instance[i] = ddiinst; 8601 emlxs_instance_count++; 8602 emlxs_device.hba_count = emlxs_instance_count; 8603 } 8604 } 8605 8606 mutex_exit(&emlxs_device.lock); 8607 8608 return (i); 8609 8610 } /* emlxs_add_instance() */ 8611 8612 8613 /*ARGSUSED*/ 8614 extern void 8615 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat, 8616 uint32_t doneq) 8617 { 8618 emlxs_hba_t *hba; 8619 emlxs_port_t *port; 8620 emlxs_buf_t *fpkt; 8621 8622 port = sbp->port; 8623 8624 if (!port) { 8625 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg, 8626 "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags); 8627 8628 return; 8629 } 8630 8631 hba = HBA; 8632 8633 mutex_enter(&sbp->mtx); 8634 8635 /* Check for error conditions */ 8636 if (sbp->pkt_flags & (PACKET_RETURNED | PACKET_COMPLETED | 8637 PACKET_IN_DONEQ | PACKET_IN_COMPLETION | 8638 PACKET_IN_TXQ | PACKET_IN_CHIPQ)) { 8639 if (sbp->pkt_flags & PACKET_RETURNED) { 8640 EMLXS_MSGF(EMLXS_CONTEXT, 8641 &emlxs_pkt_completion_error_msg, 8642 "Packet already returned. sbp=%p flags=%x", sbp, 8643 sbp->pkt_flags); 8644 } 8645 8646 else if (sbp->pkt_flags & PACKET_COMPLETED) { 8647 EMLXS_MSGF(EMLXS_CONTEXT, 8648 &emlxs_pkt_completion_error_msg, 8649 "Packet already completed. sbp=%p flags=%x", sbp, 8650 sbp->pkt_flags); 8651 } 8652 8653 else if (sbp->pkt_flags & PACKET_IN_DONEQ) { 8654 EMLXS_MSGF(EMLXS_CONTEXT, 8655 &emlxs_pkt_completion_error_msg, 8656 "Pkt already on done queue. sbp=%p flags=%x", sbp, 8657 sbp->pkt_flags); 8658 } 8659 8660 else if (sbp->pkt_flags & PACKET_IN_COMPLETION) { 8661 EMLXS_MSGF(EMLXS_CONTEXT, 8662 &emlxs_pkt_completion_error_msg, 8663 "Packet already in completion. sbp=%p flags=%x", 8664 sbp, sbp->pkt_flags); 8665 } 8666 8667 else if (sbp->pkt_flags & PACKET_IN_CHIPQ) { 8668 EMLXS_MSGF(EMLXS_CONTEXT, 8669 &emlxs_pkt_completion_error_msg, 8670 "Packet still on chip queue. sbp=%p flags=%x", 8671 sbp, sbp->pkt_flags); 8672 } 8673 8674 else if (sbp->pkt_flags & PACKET_IN_TXQ) { 8675 EMLXS_MSGF(EMLXS_CONTEXT, 8676 &emlxs_pkt_completion_error_msg, 8677 "Packet still on tx queue. sbp=%p flags=%x", sbp, 8678 sbp->pkt_flags); 8679 } 8680 8681 mutex_exit(&sbp->mtx); 8682 return; 8683 } 8684 8685 /* Packet is now in completion */ 8686 sbp->pkt_flags |= PACKET_IN_COMPLETION; 8687 8688 /* Set the state if not already set */ 8689 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) { 8690 emlxs_set_pkt_state(sbp, iostat, localstat, 0); 8691 } 8692 8693 /* Check for parent flush packet */ 8694 /* If pkt has a parent flush packet then adjust its count now */ 8695 fpkt = sbp->fpkt; 8696 if (fpkt) { 8697 /* 8698 * We will try to NULL sbp->fpkt inside the 8699 * fpkt's mutex if possible 8700 */ 8701 8702 if (!(fpkt->pkt_flags & PACKET_RETURNED)) { 8703 mutex_enter(&fpkt->mtx); 8704 if (fpkt->flush_count) { 8705 fpkt->flush_count--; 8706 } 8707 sbp->fpkt = NULL; 8708 mutex_exit(&fpkt->mtx); 8709 } else { /* fpkt has been returned already */ 8710 8711 sbp->fpkt = NULL; 8712 } 8713 } 8714 8715 /* If pkt is polled, then wake up sleeping thread */ 8716 if (sbp->pkt_flags & PACKET_POLLED) { 8717 /* Don't set the PACKET_RETURNED flag here */ 8718 /* because the polling thread will do it */ 8719 sbp->pkt_flags |= PACKET_COMPLETED; 8720 mutex_exit(&sbp->mtx); 8721 8722 /* Wake up sleeping thread */ 8723 mutex_enter(&EMLXS_PKT_LOCK); 8724 cv_broadcast(&EMLXS_PKT_CV); 8725 mutex_exit(&EMLXS_PKT_LOCK); 8726 } 8727 8728 /* If packet was generated by our driver, */ 8729 /* then complete it immediately */ 8730 else if (sbp->pkt_flags & PACKET_ALLOCATED) { 8731 mutex_exit(&sbp->mtx); 8732 8733 emlxs_iodone(sbp); 8734 } 8735 8736 /* Put the pkt on the done queue for callback */ 8737 /* completion in another thread */ 8738 else { 8739 sbp->pkt_flags |= PACKET_IN_DONEQ; 8740 sbp->next = NULL; 8741 mutex_exit(&sbp->mtx); 8742 8743 /* Put pkt on doneq, so I/O's will be completed in order */ 8744 mutex_enter(&EMLXS_PORT_LOCK); 8745 if (hba->iodone_tail == NULL) { 8746 hba->iodone_list = sbp; 8747 hba->iodone_count = 1; 8748 } else { 8749 hba->iodone_tail->next = sbp; 8750 hba->iodone_count++; 8751 } 8752 hba->iodone_tail = sbp; 8753 mutex_exit(&EMLXS_PORT_LOCK); 8754 8755 /* Trigger a thread to service the doneq */ 8756 emlxs_thread_trigger1(&hba->iodone_thread, 8757 emlxs_iodone_server); 8758 } 8759 8760 return; 8761 8762 } /* emlxs_pkt_complete() */ 8763 8764 8765 #ifdef SAN_DIAG_SUPPORT 8766 /* 8767 * This routine is called with EMLXS_PORT_LOCK held so we can just increment 8768 * normally. Don't have to use atomic operations. 8769 */ 8770 extern void 8771 emlxs_update_sd_bucket(emlxs_buf_t *sbp) 8772 { 8773 emlxs_port_t *vport; 8774 fc_packet_t *pkt; 8775 uint32_t did; 8776 hrtime_t t; 8777 hrtime_t delta_time; 8778 int i; 8779 NODELIST *ndlp; 8780 8781 vport = sbp->port; 8782 8783 if ((sd_bucket.search_type == 0) || 8784 (vport->sd_io_latency_state != SD_COLLECTING)) 8785 return; 8786 8787 /* Compute the iolatency time in microseconds */ 8788 t = gethrtime(); 8789 delta_time = t - sbp->sd_start_time; 8790 pkt = PRIV2PKT(sbp); 8791 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 8792 ndlp = emlxs_node_find_did(vport, did); 8793 8794 if (ndlp) { 8795 if (delta_time >= 8796 sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1]) 8797 ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1]. 8798 count++; 8799 else if (delta_time <= sd_bucket.values[0]) 8800 ndlp->sd_dev_bucket[0].count++; 8801 else { 8802 for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) { 8803 if ((delta_time > sd_bucket.values[i-1]) && 8804 (delta_time <= sd_bucket.values[i])) { 8805 ndlp->sd_dev_bucket[i].count++; 8806 break; 8807 } 8808 } 8809 } 8810 } 8811 } 8812 #endif /* SAN_DIAG_SUPPORT */ 8813 8814 /*ARGSUSED*/ 8815 static void 8816 emlxs_iodone_server(void *arg1, void *arg2, void *arg3) 8817 { 8818 emlxs_hba_t *hba = (emlxs_hba_t *)arg1; 8819 emlxs_buf_t *sbp; 8820 8821 mutex_enter(&EMLXS_PORT_LOCK); 8822 8823 /* Remove one pkt from the doneq head and complete it */ 8824 while ((sbp = hba->iodone_list) != NULL) { 8825 if ((hba->iodone_list = sbp->next) == NULL) { 8826 hba->iodone_tail = NULL; 8827 hba->iodone_count = 0; 8828 } else { 8829 hba->iodone_count--; 8830 } 8831 8832 mutex_exit(&EMLXS_PORT_LOCK); 8833 8834 /* Prepare the pkt for completion */ 8835 mutex_enter(&sbp->mtx); 8836 sbp->next = NULL; 8837 sbp->pkt_flags &= ~PACKET_IN_DONEQ; 8838 mutex_exit(&sbp->mtx); 8839 8840 /* Complete the IO now */ 8841 emlxs_iodone(sbp); 8842 8843 /* Reacquire lock and check if more work is to be done */ 8844 mutex_enter(&EMLXS_PORT_LOCK); 8845 } 8846 8847 mutex_exit(&EMLXS_PORT_LOCK); 8848 8849 return; 8850 8851 } /* End emlxs_iodone_server */ 8852 8853 8854 static void 8855 emlxs_iodone(emlxs_buf_t *sbp) 8856 { 8857 fc_packet_t *pkt; 8858 8859 pkt = PRIV2PKT(sbp); 8860 8861 /* Check one more time that the pkt has not already been returned */ 8862 if (sbp->pkt_flags & PACKET_RETURNED) { 8863 return; 8864 } 8865 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8866 emlxs_unswap_pkt(sbp); 8867 #endif /* EMLXS_MODREV2X */ 8868 8869 mutex_enter(&sbp->mtx); 8870 sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_RETURNED); 8871 mutex_exit(&sbp->mtx); 8872 8873 if (pkt->pkt_comp) { 8874 (*pkt->pkt_comp) (pkt); 8875 } 8876 8877 return; 8878 8879 } /* emlxs_iodone() */ 8880 8881 8882 8883 extern fc_unsol_buf_t * 8884 emlxs_ub_find(emlxs_port_t *port, uint32_t token) 8885 { 8886 emlxs_unsol_buf_t *pool; 8887 fc_unsol_buf_t *ubp; 8888 emlxs_ub_priv_t *ub_priv; 8889 8890 /* Check if this is a valid ub token */ 8891 if (token < EMLXS_UB_TOKEN_OFFSET) { 8892 return (NULL); 8893 } 8894 8895 mutex_enter(&EMLXS_UB_LOCK); 8896 8897 pool = port->ub_pool; 8898 while (pool) { 8899 /* Find a pool with the proper token range */ 8900 if (token >= pool->pool_first_token && 8901 token <= pool->pool_last_token) { 8902 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token - 8903 pool->pool_first_token)]; 8904 ub_priv = ubp->ub_fca_private; 8905 8906 if (ub_priv->token != token) { 8907 EMLXS_MSGF(EMLXS_CONTEXT, 8908 &emlxs_sfs_debug_msg, 8909 "ub_find: Invalid token=%x", ubp, token, 8910 ub_priv->token); 8911 8912 ubp = NULL; 8913 } 8914 8915 else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) { 8916 EMLXS_MSGF(EMLXS_CONTEXT, 8917 &emlxs_sfs_debug_msg, 8918 "ub_find: Buffer not in use. buffer=%p " 8919 "token=%x", ubp, token); 8920 8921 ubp = NULL; 8922 } 8923 8924 mutex_exit(&EMLXS_UB_LOCK); 8925 8926 return (ubp); 8927 } 8928 8929 pool = pool->pool_next; 8930 } 8931 8932 mutex_exit(&EMLXS_UB_LOCK); 8933 8934 return (NULL); 8935 8936 } /* emlxs_ub_find() */ 8937 8938 8939 8940 extern fc_unsol_buf_t * 8941 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type, 8942 uint32_t reserve) 8943 { 8944 emlxs_hba_t *hba = HBA; 8945 emlxs_unsol_buf_t *pool; 8946 fc_unsol_buf_t *ubp; 8947 emlxs_ub_priv_t *ub_priv; 8948 uint32_t i; 8949 uint32_t resv_flag; 8950 uint32_t pool_free; 8951 uint32_t pool_free_resv; 8952 8953 mutex_enter(&EMLXS_UB_LOCK); 8954 8955 pool = port->ub_pool; 8956 while (pool) { 8957 /* Find a pool of the appropriate type and size */ 8958 if ((pool->pool_available == 0) || 8959 (pool->pool_type != type) || 8960 (pool->pool_buf_size < size)) { 8961 goto next_pool; 8962 } 8963 8964 8965 /* Adjust free counts based on availablity */ 8966 /* The free reserve count gets first priority */ 8967 pool_free_resv = 8968 min(pool->pool_free_resv, pool->pool_available); 8969 pool_free = 8970 min(pool->pool_free, 8971 (pool->pool_available - pool_free_resv)); 8972 8973 /* Initialize reserve flag */ 8974 resv_flag = reserve; 8975 8976 if (resv_flag) { 8977 if (pool_free_resv == 0) { 8978 if (pool_free == 0) { 8979 goto next_pool; 8980 } 8981 resv_flag = 0; 8982 } 8983 } else if (pool_free == 0) { 8984 goto next_pool; 8985 } 8986 8987 /* Find next available free buffer in this pool */ 8988 for (i = 0; i < pool->pool_nentries; i++) { 8989 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i]; 8990 ub_priv = ubp->ub_fca_private; 8991 8992 if (!ub_priv->available || 8993 ub_priv->flags != EMLXS_UB_FREE) { 8994 continue; 8995 } 8996 8997 ub_priv->time = hba->timer_tics; 8998 8999 /* Timeout in 5 minutes */ 9000 ub_priv->timeout = (5 * 60); 9001 9002 ub_priv->flags = EMLXS_UB_IN_USE; 9003 9004 /* Alloc the buffer from the pool */ 9005 if (resv_flag) { 9006 ub_priv->flags |= EMLXS_UB_RESV; 9007 pool->pool_free_resv--; 9008 } else { 9009 pool->pool_free--; 9010 } 9011 9012 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 9013 "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp, 9014 ub_priv->token, pool->pool_nentries, 9015 pool->pool_available, pool->pool_free, 9016 pool->pool_free_resv); 9017 9018 mutex_exit(&EMLXS_UB_LOCK); 9019 9020 return (ubp); 9021 } 9022 next_pool: 9023 9024 pool = pool->pool_next; 9025 } 9026 9027 mutex_exit(&EMLXS_UB_LOCK); 9028 9029 return (NULL); 9030 9031 } /* emlxs_ub_get() */ 9032 9033 9034 9035 extern void 9036 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat, 9037 uint32_t lock) 9038 { 9039 fc_packet_t *pkt; 9040 fcp_rsp_t *fcp_rsp; 9041 uint32_t i; 9042 emlxs_xlat_err_t *tptr; 9043 emlxs_xlat_err_t *entry; 9044 9045 9046 pkt = PRIV2PKT(sbp); 9047 9048 if (lock) { 9049 mutex_enter(&sbp->mtx); 9050 } 9051 9052 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) { 9053 sbp->pkt_flags |= PACKET_STATE_VALID; 9054 9055 /* Perform table lookup */ 9056 entry = NULL; 9057 if (iostat != IOSTAT_LOCAL_REJECT) { 9058 tptr = emlxs_iostat_tbl; 9059 for (i = 0; i < IOSTAT_MAX; i++, tptr++) { 9060 if (iostat == tptr->emlxs_status) { 9061 entry = tptr; 9062 break; 9063 } 9064 } 9065 } else { /* iostate == IOSTAT_LOCAL_REJECT */ 9066 9067 tptr = emlxs_ioerr_tbl; 9068 for (i = 0; i < IOERR_MAX; i++, tptr++) { 9069 if (localstat == tptr->emlxs_status) { 9070 entry = tptr; 9071 break; 9072 } 9073 } 9074 } 9075 9076 if (entry) { 9077 pkt->pkt_state = entry->pkt_state; 9078 pkt->pkt_reason = entry->pkt_reason; 9079 pkt->pkt_expln = entry->pkt_expln; 9080 pkt->pkt_action = entry->pkt_action; 9081 } else { 9082 /* Set defaults */ 9083 pkt->pkt_state = FC_PKT_TRAN_ERROR; 9084 pkt->pkt_reason = FC_REASON_ABORTED; 9085 pkt->pkt_expln = FC_EXPLN_NONE; 9086 pkt->pkt_action = FC_ACTION_RETRYABLE; 9087 } 9088 9089 9090 /* Set the residual counts and response frame */ 9091 /* Check if response frame was received from the chip */ 9092 /* If so, then the residual counts will already be set */ 9093 if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID | 9094 PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) { 9095 /* We have to create the response frame */ 9096 if (iostat == IOSTAT_SUCCESS) { 9097 pkt->pkt_resp_resid = 0; 9098 pkt->pkt_data_resid = 0; 9099 9100 if ((pkt->pkt_cmd_fhdr.type == 9101 FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen && 9102 pkt->pkt_resp) { 9103 fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp; 9104 9105 fcp_rsp->fcp_u.fcp_status. 9106 rsp_len_set = 1; 9107 fcp_rsp->fcp_response_len = 8; 9108 } 9109 } else { 9110 /* Otherwise assume no data */ 9111 /* and no response received */ 9112 pkt->pkt_data_resid = pkt->pkt_datalen; 9113 pkt->pkt_resp_resid = pkt->pkt_rsplen; 9114 } 9115 } 9116 } 9117 9118 if (lock) { 9119 mutex_exit(&sbp->mtx); 9120 } 9121 9122 return; 9123 9124 } /* emlxs_set_pkt_state() */ 9125 9126 9127 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9128 9129 extern void 9130 emlxs_swap_service_params(SERV_PARM *sp) 9131 { 9132 uint16_t *p; 9133 int size; 9134 int i; 9135 9136 size = (sizeof (CSP) - 4) / 2; 9137 p = (uint16_t *)&sp->cmn; 9138 for (i = 0; i < size; i++) { 9139 p[i] = SWAP_DATA16(p[i]); 9140 } 9141 sp->cmn.e_d_tov = SWAP_DATA32(sp->cmn.e_d_tov); 9142 9143 size = sizeof (CLASS_PARMS) / 2; 9144 p = (uint16_t *)&sp->cls1; 9145 for (i = 0; i < size; i++, p++) { 9146 *p = SWAP_DATA16(*p); 9147 } 9148 9149 size = sizeof (CLASS_PARMS) / 2; 9150 p = (uint16_t *)&sp->cls2; 9151 for (i = 0; i < size; i++, p++) { 9152 *p = SWAP_DATA16(*p); 9153 } 9154 9155 size = sizeof (CLASS_PARMS) / 2; 9156 p = (uint16_t *)&sp->cls3; 9157 for (i = 0; i < size; i++, p++) { 9158 *p = SWAP_DATA16(*p); 9159 } 9160 9161 size = sizeof (CLASS_PARMS) / 2; 9162 p = (uint16_t *)&sp->cls4; 9163 for (i = 0; i < size; i++, p++) { 9164 *p = SWAP_DATA16(*p); 9165 } 9166 9167 return; 9168 9169 } /* emlxs_swap_service_params() */ 9170 9171 extern void 9172 emlxs_unswap_pkt(emlxs_buf_t *sbp) 9173 { 9174 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) { 9175 emlxs_swap_fcp_pkt(sbp); 9176 } 9177 9178 else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) { 9179 emlxs_swap_els_pkt(sbp); 9180 } 9181 9182 else if (sbp->pkt_flags & PACKET_CT_SWAPPED) { 9183 emlxs_swap_ct_pkt(sbp); 9184 } 9185 9186 } /* emlxs_unswap_pkt() */ 9187 9188 9189 extern void 9190 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp) 9191 { 9192 fc_packet_t *pkt; 9193 FCP_CMND *cmd; 9194 fcp_rsp_t *rsp; 9195 uint16_t *lunp; 9196 uint32_t i; 9197 9198 mutex_enter(&sbp->mtx); 9199 9200 if (sbp->pkt_flags & PACKET_ALLOCATED) { 9201 mutex_exit(&sbp->mtx); 9202 return; 9203 } 9204 9205 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) { 9206 sbp->pkt_flags &= ~PACKET_FCP_SWAPPED; 9207 } else { 9208 sbp->pkt_flags |= PACKET_FCP_SWAPPED; 9209 } 9210 9211 mutex_exit(&sbp->mtx); 9212 9213 pkt = PRIV2PKT(sbp); 9214 9215 cmd = (FCP_CMND *)pkt->pkt_cmd; 9216 rsp = (pkt->pkt_rsplen && 9217 (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ? 9218 (fcp_rsp_t *)pkt->pkt_resp : NULL; 9219 9220 /* The size of data buffer needs to be swapped. */ 9221 cmd->fcpDl = SWAP_DATA32(cmd->fcpDl); 9222 9223 /* 9224 * Swap first 2 words of FCP CMND payload. 9225 */ 9226 lunp = (uint16_t *)&cmd->fcpLunMsl; 9227 for (i = 0; i < 4; i++) { 9228 lunp[i] = SWAP_DATA16(lunp[i]); 9229 } 9230 9231 if (rsp) { 9232 rsp->fcp_resid = SWAP_DATA32(rsp->fcp_resid); 9233 rsp->fcp_sense_len = SWAP_DATA32(rsp->fcp_sense_len); 9234 rsp->fcp_response_len = SWAP_DATA32(rsp->fcp_response_len); 9235 } 9236 9237 return; 9238 9239 } /* emlxs_swap_fcp_pkt() */ 9240 9241 9242 extern void 9243 emlxs_swap_els_pkt(emlxs_buf_t *sbp) 9244 { 9245 fc_packet_t *pkt; 9246 uint32_t *cmd; 9247 uint32_t *rsp; 9248 uint32_t command; 9249 uint16_t *c; 9250 uint32_t i; 9251 uint32_t swapped; 9252 9253 mutex_enter(&sbp->mtx); 9254 9255 if (sbp->pkt_flags & PACKET_ALLOCATED) { 9256 mutex_exit(&sbp->mtx); 9257 return; 9258 } 9259 9260 if (sbp->pkt_flags & PACKET_ELS_SWAPPED) { 9261 sbp->pkt_flags &= ~PACKET_ELS_SWAPPED; 9262 swapped = 1; 9263 } else { 9264 sbp->pkt_flags |= PACKET_ELS_SWAPPED; 9265 swapped = 0; 9266 } 9267 9268 mutex_exit(&sbp->mtx); 9269 9270 pkt = PRIV2PKT(sbp); 9271 9272 cmd = (uint32_t *)pkt->pkt_cmd; 9273 rsp = (pkt->pkt_rsplen && 9274 (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ? 9275 (uint32_t *)pkt->pkt_resp : NULL; 9276 9277 if (!swapped) { 9278 cmd[0] = SWAP_DATA32(cmd[0]); 9279 command = cmd[0] & ELS_CMD_MASK; 9280 } else { 9281 command = cmd[0] & ELS_CMD_MASK; 9282 cmd[0] = SWAP_DATA32(cmd[0]); 9283 } 9284 9285 if (rsp) { 9286 rsp[0] = SWAP_DATA32(rsp[0]); 9287 } 9288 9289 switch (command) { 9290 case ELS_CMD_ACC: 9291 if (sbp->ucmd == ELS_CMD_ADISC) { 9292 /* Hard address of originator */ 9293 cmd[1] = SWAP_DATA32(cmd[1]); 9294 9295 /* N_Port ID of originator */ 9296 cmd[6] = SWAP_DATA32(cmd[6]); 9297 } 9298 break; 9299 9300 case ELS_CMD_PLOGI: 9301 case ELS_CMD_FLOGI: 9302 case ELS_CMD_FDISC: 9303 if (rsp) { 9304 emlxs_swap_service_params((SERV_PARM *) & rsp[1]); 9305 } 9306 break; 9307 9308 case ELS_CMD_RLS: 9309 cmd[1] = SWAP_DATA32(cmd[1]); 9310 9311 if (rsp) { 9312 for (i = 0; i < 6; i++) { 9313 rsp[1 + i] = SWAP_DATA32(rsp[1 + i]); 9314 } 9315 } 9316 break; 9317 9318 case ELS_CMD_ADISC: 9319 cmd[1] = SWAP_DATA32(cmd[1]); /* Hard address of originator */ 9320 cmd[6] = SWAP_DATA32(cmd[6]); /* N_Port ID of originator */ 9321 break; 9322 9323 case ELS_CMD_PRLI: 9324 c = (uint16_t *)&cmd[1]; 9325 c[1] = SWAP_DATA16(c[1]); 9326 9327 cmd[4] = SWAP_DATA32(cmd[4]); 9328 9329 if (rsp) { 9330 rsp[4] = SWAP_DATA32(rsp[4]); 9331 } 9332 break; 9333 9334 case ELS_CMD_SCR: 9335 cmd[1] = SWAP_DATA32(cmd[1]); 9336 break; 9337 9338 case ELS_CMD_LINIT: 9339 if (rsp) { 9340 rsp[1] = SWAP_DATA32(rsp[1]); 9341 } 9342 break; 9343 9344 default: 9345 break; 9346 } 9347 9348 return; 9349 9350 } /* emlxs_swap_els_pkt() */ 9351 9352 9353 extern void 9354 emlxs_swap_ct_pkt(emlxs_buf_t *sbp) 9355 { 9356 fc_packet_t *pkt; 9357 uint32_t *cmd; 9358 uint32_t *rsp; 9359 uint32_t command; 9360 uint32_t i; 9361 uint32_t swapped; 9362 9363 mutex_enter(&sbp->mtx); 9364 9365 if (sbp->pkt_flags & PACKET_ALLOCATED) { 9366 mutex_exit(&sbp->mtx); 9367 return; 9368 } 9369 9370 if (sbp->pkt_flags & PACKET_CT_SWAPPED) { 9371 sbp->pkt_flags &= ~PACKET_CT_SWAPPED; 9372 swapped = 1; 9373 } else { 9374 sbp->pkt_flags |= PACKET_CT_SWAPPED; 9375 swapped = 0; 9376 } 9377 9378 mutex_exit(&sbp->mtx); 9379 9380 pkt = PRIV2PKT(sbp); 9381 9382 cmd = (uint32_t *)pkt->pkt_cmd; 9383 rsp = (pkt->pkt_rsplen && 9384 (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ? 9385 (uint32_t *)pkt->pkt_resp : NULL; 9386 9387 if (!swapped) { 9388 cmd[0] = 0x01000000; 9389 command = cmd[2]; 9390 } 9391 9392 cmd[0] = SWAP_DATA32(cmd[0]); 9393 cmd[1] = SWAP_DATA32(cmd[1]); 9394 cmd[2] = SWAP_DATA32(cmd[2]); 9395 cmd[3] = SWAP_DATA32(cmd[3]); 9396 9397 if (swapped) { 9398 command = cmd[2]; 9399 } 9400 9401 switch ((command >> 16)) { 9402 case SLI_CTNS_GA_NXT: 9403 cmd[4] = SWAP_DATA32(cmd[4]); 9404 break; 9405 9406 case SLI_CTNS_GPN_ID: 9407 case SLI_CTNS_GNN_ID: 9408 case SLI_CTNS_RPN_ID: 9409 case SLI_CTNS_RNN_ID: 9410 cmd[4] = SWAP_DATA32(cmd[4]); 9411 break; 9412 9413 case SLI_CTNS_RCS_ID: 9414 case SLI_CTNS_RPT_ID: 9415 cmd[4] = SWAP_DATA32(cmd[4]); 9416 cmd[5] = SWAP_DATA32(cmd[5]); 9417 break; 9418 9419 case SLI_CTNS_RFT_ID: 9420 cmd[4] = SWAP_DATA32(cmd[4]); 9421 9422 /* Swap FC4 types */ 9423 for (i = 0; i < 8; i++) { 9424 cmd[5 + i] = SWAP_DATA32(cmd[5 + i]); 9425 } 9426 break; 9427 9428 case SLI_CTNS_GFT_ID: 9429 if (rsp) { 9430 /* Swap FC4 types */ 9431 for (i = 0; i < 8; i++) { 9432 rsp[4 + i] = SWAP_DATA32(rsp[4 + i]); 9433 } 9434 } 9435 break; 9436 9437 case SLI_CTNS_GCS_ID: 9438 case SLI_CTNS_GSPN_ID: 9439 case SLI_CTNS_GSNN_NN: 9440 case SLI_CTNS_GIP_NN: 9441 case SLI_CTNS_GIPA_NN: 9442 9443 case SLI_CTNS_GPT_ID: 9444 case SLI_CTNS_GID_NN: 9445 case SLI_CTNS_GNN_IP: 9446 case SLI_CTNS_GIPA_IP: 9447 case SLI_CTNS_GID_FT: 9448 case SLI_CTNS_GID_PT: 9449 case SLI_CTNS_GID_PN: 9450 case SLI_CTNS_RSPN_ID: 9451 case SLI_CTNS_RIP_NN: 9452 case SLI_CTNS_RIPA_NN: 9453 case SLI_CTNS_RSNN_NN: 9454 case SLI_CTNS_DA_ID: 9455 case SLI_CT_RESPONSE_FS_RJT: 9456 case SLI_CT_RESPONSE_FS_ACC: 9457 9458 default: 9459 break; 9460 } 9461 return; 9462 9463 } /* emlxs_swap_ct_pkt() */ 9464 9465 9466 extern void 9467 emlxs_swap_els_ub(fc_unsol_buf_t *ubp) 9468 { 9469 emlxs_ub_priv_t *ub_priv; 9470 fc_rscn_t *rscn; 9471 uint32_t count; 9472 uint32_t i; 9473 uint32_t *lp; 9474 la_els_logi_t *logi; 9475 9476 ub_priv = ubp->ub_fca_private; 9477 9478 switch (ub_priv->cmd) { 9479 case ELS_CMD_RSCN: 9480 rscn = (fc_rscn_t *)ubp->ub_buffer; 9481 9482 rscn->rscn_payload_len = SWAP_DATA16(rscn->rscn_payload_len); 9483 9484 count = ((rscn->rscn_payload_len - 4) / 4); 9485 lp = (uint32_t *)ubp->ub_buffer + 1; 9486 for (i = 0; i < count; i++, lp++) { 9487 *lp = SWAP_DATA32(*lp); 9488 } 9489 9490 break; 9491 9492 case ELS_CMD_FLOGI: 9493 case ELS_CMD_PLOGI: 9494 case ELS_CMD_FDISC: 9495 case ELS_CMD_PDISC: 9496 logi = (la_els_logi_t *)ubp->ub_buffer; 9497 emlxs_swap_service_params( 9498 (SERV_PARM *)&logi->common_service); 9499 break; 9500 9501 /* ULP handles this */ 9502 case ELS_CMD_LOGO: 9503 case ELS_CMD_PRLI: 9504 case ELS_CMD_PRLO: 9505 case ELS_CMD_ADISC: 9506 default: 9507 break; 9508 } 9509 9510 return; 9511 9512 } /* emlxs_swap_els_ub() */ 9513 9514 9515 #endif /* EMLXS_MODREV2X */ 9516 9517 9518 extern char * 9519 emlxs_elscmd_xlate(uint32_t elscmd) 9520 { 9521 static char buffer[32]; 9522 uint32_t i; 9523 uint32_t count; 9524 9525 count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t); 9526 for (i = 0; i < count; i++) { 9527 if (elscmd == emlxs_elscmd_table[i].code) { 9528 return (emlxs_elscmd_table[i].string); 9529 } 9530 } 9531 9532 (void) sprintf(buffer, "ELS=0x%x", elscmd); 9533 return (buffer); 9534 9535 } /* emlxs_elscmd_xlate() */ 9536 9537 9538 extern char * 9539 emlxs_ctcmd_xlate(uint32_t ctcmd) 9540 { 9541 static char buffer[32]; 9542 uint32_t i; 9543 uint32_t count; 9544 9545 count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t); 9546 for (i = 0; i < count; i++) { 9547 if (ctcmd == emlxs_ctcmd_table[i].code) { 9548 return (emlxs_ctcmd_table[i].string); 9549 } 9550 } 9551 9552 (void) sprintf(buffer, "cmd=0x%x", ctcmd); 9553 return (buffer); 9554 9555 } /* emlxs_ctcmd_xlate() */ 9556 9557 9558 #ifdef MENLO_SUPPORT 9559 extern char * 9560 emlxs_menlo_cmd_xlate(uint32_t cmd) 9561 { 9562 static char buffer[32]; 9563 uint32_t i; 9564 uint32_t count; 9565 9566 count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t); 9567 for (i = 0; i < count; i++) { 9568 if (cmd == emlxs_menlo_cmd_table[i].code) { 9569 return (emlxs_menlo_cmd_table[i].string); 9570 } 9571 } 9572 9573 (void) sprintf(buffer, "Cmd=0x%x", cmd); 9574 return (buffer); 9575 9576 } /* emlxs_menlo_cmd_xlate() */ 9577 9578 extern char * 9579 emlxs_menlo_rsp_xlate(uint32_t rsp) 9580 { 9581 static char buffer[32]; 9582 uint32_t i; 9583 uint32_t count; 9584 9585 count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t); 9586 for (i = 0; i < count; i++) { 9587 if (rsp == emlxs_menlo_rsp_table[i].code) { 9588 return (emlxs_menlo_rsp_table[i].string); 9589 } 9590 } 9591 9592 (void) sprintf(buffer, "Rsp=0x%x", rsp); 9593 return (buffer); 9594 9595 } /* emlxs_menlo_rsp_xlate() */ 9596 9597 #endif /* MENLO_SUPPORT */ 9598 9599 9600 extern char * 9601 emlxs_rmcmd_xlate(uint32_t rmcmd) 9602 { 9603 static char buffer[32]; 9604 uint32_t i; 9605 uint32_t count; 9606 9607 count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t); 9608 for (i = 0; i < count; i++) { 9609 if (rmcmd == emlxs_rmcmd_table[i].code) { 9610 return (emlxs_rmcmd_table[i].string); 9611 } 9612 } 9613 9614 (void) sprintf(buffer, "RM=0x%x", rmcmd); 9615 return (buffer); 9616 9617 } /* emlxs_rmcmd_xlate() */ 9618 9619 9620 9621 extern char * 9622 emlxs_mscmd_xlate(uint16_t mscmd) 9623 { 9624 static char buffer[32]; 9625 uint32_t i; 9626 uint32_t count; 9627 9628 count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t); 9629 for (i = 0; i < count; i++) { 9630 if (mscmd == emlxs_mscmd_table[i].code) { 9631 return (emlxs_mscmd_table[i].string); 9632 } 9633 } 9634 9635 (void) sprintf(buffer, "Cmd=0x%x", mscmd); 9636 return (buffer); 9637 9638 } /* emlxs_mscmd_xlate() */ 9639 9640 9641 extern char * 9642 emlxs_state_xlate(uint8_t state) 9643 { 9644 static char buffer[32]; 9645 uint32_t i; 9646 uint32_t count; 9647 9648 count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t); 9649 for (i = 0; i < count; i++) { 9650 if (state == emlxs_state_table[i].code) { 9651 return (emlxs_state_table[i].string); 9652 } 9653 } 9654 9655 (void) sprintf(buffer, "State=0x%x", state); 9656 return (buffer); 9657 9658 } /* emlxs_state_xlate() */ 9659 9660 9661 extern char * 9662 emlxs_error_xlate(uint8_t errno) 9663 { 9664 static char buffer[32]; 9665 uint32_t i; 9666 uint32_t count; 9667 9668 count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t); 9669 for (i = 0; i < count; i++) { 9670 if (errno == emlxs_error_table[i].code) { 9671 return (emlxs_error_table[i].string); 9672 } 9673 } 9674 9675 (void) sprintf(buffer, "Errno=0x%x", errno); 9676 return (buffer); 9677 9678 } /* emlxs_error_xlate() */ 9679 9680 9681 static int 9682 emlxs_pm_lower_power(dev_info_t *dip) 9683 { 9684 int ddiinst; 9685 int emlxinst; 9686 emlxs_config_t *cfg; 9687 int32_t rval; 9688 emlxs_hba_t *hba; 9689 9690 ddiinst = ddi_get_instance(dip); 9691 emlxinst = emlxs_get_instance(ddiinst); 9692 hba = emlxs_device.hba[emlxinst]; 9693 cfg = &CFG; 9694 9695 rval = DDI_SUCCESS; 9696 9697 /* Lower the power level */ 9698 if (cfg[CFG_PM_SUPPORT].current) { 9699 rval = 9700 pm_lower_power(dip, EMLXS_PM_ADAPTER, 9701 EMLXS_PM_ADAPTER_DOWN); 9702 } else { 9703 /* We do not have kernel support of power management enabled */ 9704 /* therefore, call our power management routine directly */ 9705 rval = 9706 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN); 9707 } 9708 9709 return (rval); 9710 9711 } /* emlxs_pm_lower_power() */ 9712 9713 9714 static int 9715 emlxs_pm_raise_power(dev_info_t *dip) 9716 { 9717 int ddiinst; 9718 int emlxinst; 9719 emlxs_config_t *cfg; 9720 int32_t rval; 9721 emlxs_hba_t *hba; 9722 9723 ddiinst = ddi_get_instance(dip); 9724 emlxinst = emlxs_get_instance(ddiinst); 9725 hba = emlxs_device.hba[emlxinst]; 9726 cfg = &CFG; 9727 9728 /* Raise the power level */ 9729 if (cfg[CFG_PM_SUPPORT].current) { 9730 rval = 9731 pm_raise_power(dip, EMLXS_PM_ADAPTER, 9732 EMLXS_PM_ADAPTER_UP); 9733 } else { 9734 /* We do not have kernel support of power management enabled */ 9735 /* therefore, call our power management routine directly */ 9736 rval = 9737 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP); 9738 } 9739 9740 return (rval); 9741 9742 } /* emlxs_pm_raise_power() */ 9743 9744 9745 #ifdef IDLE_TIMER 9746 9747 extern int 9748 emlxs_pm_busy_component(emlxs_hba_t *hba) 9749 { 9750 emlxs_config_t *cfg = &CFG; 9751 int rval; 9752 9753 hba->pm_active = 1; 9754 9755 if (hba->pm_busy) { 9756 return (DDI_SUCCESS); 9757 } 9758 9759 mutex_enter(&hba->pm_lock); 9760 9761 if (hba->pm_busy) { 9762 mutex_exit(&hba->pm_lock); 9763 return (DDI_SUCCESS); 9764 } 9765 hba->pm_busy = 1; 9766 9767 mutex_exit(&hba->pm_lock); 9768 9769 /* Attempt to notify system that we are busy */ 9770 if (cfg[CFG_PM_SUPPORT].current) { 9771 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 9772 "pm_busy_component."); 9773 9774 rval = pm_busy_component(dip, EMLXS_PM_ADAPTER); 9775 9776 if (rval != DDI_SUCCESS) { 9777 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 9778 "pm_busy_component failed. ret=%d", rval); 9779 9780 /* If this attempt failed then clear our flags */ 9781 mutex_enter(&hba->pm_lock); 9782 hba->pm_busy = 0; 9783 mutex_exit(&hba->pm_lock); 9784 9785 return (rval); 9786 } 9787 } 9788 9789 return (DDI_SUCCESS); 9790 9791 } /* emlxs_pm_busy_component() */ 9792 9793 9794 extern int 9795 emlxs_pm_idle_component(emlxs_hba_t *hba) 9796 { 9797 emlxs_config_t *cfg = &CFG; 9798 int rval; 9799 9800 if (!hba->pm_busy) { 9801 return (DDI_SUCCESS); 9802 } 9803 9804 mutex_enter(&hba->pm_lock); 9805 9806 if (!hba->pm_busy) { 9807 mutex_exit(&hba->pm_lock); 9808 return (DDI_SUCCESS); 9809 } 9810 hba->pm_busy = 0; 9811 9812 mutex_exit(&hba->pm_lock); 9813 9814 if (cfg[CFG_PM_SUPPORT].current) { 9815 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 9816 "pm_idle_component."); 9817 9818 rval = pm_idle_component(dip, EMLXS_PM_ADAPTER); 9819 9820 if (rval != DDI_SUCCESS) { 9821 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 9822 "pm_idle_component failed. ret=%d", rval); 9823 9824 /* If this attempt failed then */ 9825 /* reset our flags for another attempt */ 9826 mutex_enter(&hba->pm_lock); 9827 hba->pm_busy = 1; 9828 mutex_exit(&hba->pm_lock); 9829 9830 return (rval); 9831 } 9832 } 9833 9834 return (DDI_SUCCESS); 9835 9836 } /* emlxs_pm_idle_component() */ 9837 9838 9839 extern void 9840 emlxs_pm_idle_timer(emlxs_hba_t *hba) 9841 { 9842 emlxs_config_t *cfg = &CFG; 9843 9844 if (hba->pm_active) { 9845 /* Clear active flag and reset idle timer */ 9846 mutex_enter(&hba->pm_lock); 9847 hba->pm_active = 0; 9848 hba->pm_idle_timer = 9849 hba->timer_tics + cfg[CFG_PM_IDLE].current; 9850 mutex_exit(&hba->pm_lock); 9851 } 9852 9853 /* Check for idle timeout */ 9854 else if (hba->timer_tics >= hba->pm_idle_timer) { 9855 if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) { 9856 mutex_enter(&hba->pm_lock); 9857 hba->pm_idle_timer = 9858 hba->timer_tics + cfg[CFG_PM_IDLE].current; 9859 mutex_exit(&hba->pm_lock); 9860 } 9861 } 9862 9863 return; 9864 9865 } /* emlxs_pm_idle_timer() */ 9866 9867 #endif /* IDLE_TIMER */ 9868 9869 9870 #ifdef SLI3_SUPPORT 9871 static void 9872 emlxs_read_vport_prop(emlxs_hba_t *hba) 9873 { 9874 emlxs_port_t *port = &PPORT; 9875 emlxs_config_t *cfg = &CFG; 9876 char **arrayp; 9877 uint8_t *s; 9878 uint8_t *np; 9879 NAME_TYPE pwwpn; 9880 NAME_TYPE wwnn; 9881 NAME_TYPE wwpn; 9882 uint32_t vpi; 9883 uint32_t cnt; 9884 uint32_t rval; 9885 uint32_t i; 9886 uint32_t j; 9887 uint32_t c1; 9888 uint32_t sum; 9889 uint32_t errors; 9890 char buffer[64]; 9891 9892 /* Check for the per adapter vport setting */ 9893 (void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst); 9894 cnt = 0; 9895 arrayp = NULL; 9896 rval = 9897 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip, 9898 (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt); 9899 9900 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) { 9901 /* Check for the global vport setting */ 9902 cnt = 0; 9903 arrayp = NULL; 9904 rval = 9905 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip, 9906 (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt); 9907 } 9908 9909 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) { 9910 return; 9911 } 9912 9913 for (i = 0; i < cnt; i++) { 9914 errors = 0; 9915 s = (uint8_t *)arrayp[i]; 9916 9917 if (!s) { 9918 break; 9919 } 9920 9921 np = (uint8_t *)&pwwpn; 9922 for (j = 0; j < sizeof (NAME_TYPE); j++) { 9923 c1 = *s++; 9924 if ((c1 >= '0') && (c1 <= '9')) { 9925 sum = ((c1 - '0') << 4); 9926 } else if ((c1 >= 'a') && (c1 <= 'f')) { 9927 sum = ((c1 - 'a' + 10) << 4); 9928 } else if ((c1 >= 'A') && (c1 <= 'F')) { 9929 sum = ((c1 - 'A' + 10) << 4); 9930 } else { 9931 EMLXS_MSGF(EMLXS_CONTEXT, 9932 &emlxs_attach_debug_msg, 9933 "Config error: Invalid PWWPN found. " 9934 "entry=%d byte=%d hi_nibble=%c", 9935 i, j, c1); 9936 errors++; 9937 } 9938 9939 c1 = *s++; 9940 if ((c1 >= '0') && (c1 <= '9')) { 9941 sum |= (c1 - '0'); 9942 } else if ((c1 >= 'a') && (c1 <= 'f')) { 9943 sum |= (c1 - 'a' + 10); 9944 } else if ((c1 >= 'A') && (c1 <= 'F')) { 9945 sum |= (c1 - 'A' + 10); 9946 } else { 9947 EMLXS_MSGF(EMLXS_CONTEXT, 9948 &emlxs_attach_debug_msg, 9949 "Config error: Invalid PWWPN found. " 9950 "entry=%d byte=%d lo_nibble=%c", 9951 i, j, c1); 9952 errors++; 9953 } 9954 9955 *np++ = sum; 9956 } 9957 9958 if (*s++ != ':') { 9959 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 9960 "Config error: Invalid delimiter after PWWPN. " 9961 "entry=%d", i); 9962 goto out; 9963 } 9964 9965 np = (uint8_t *)&wwnn; 9966 for (j = 0; j < sizeof (NAME_TYPE); j++) { 9967 c1 = *s++; 9968 if ((c1 >= '0') && (c1 <= '9')) { 9969 sum = ((c1 - '0') << 4); 9970 } else if ((c1 >= 'a') && (c1 <= 'f')) { 9971 sum = ((c1 - 'a' + 10) << 4); 9972 } else if ((c1 >= 'A') && (c1 <= 'F')) { 9973 sum = ((c1 - 'A' + 10) << 4); 9974 } else { 9975 EMLXS_MSGF(EMLXS_CONTEXT, 9976 &emlxs_attach_debug_msg, 9977 "Config error: Invalid WWNN found. " 9978 "entry=%d byte=%d hi_nibble=%c", 9979 i, j, c1); 9980 errors++; 9981 } 9982 9983 c1 = *s++; 9984 if ((c1 >= '0') && (c1 <= '9')) { 9985 sum |= (c1 - '0'); 9986 } else if ((c1 >= 'a') && (c1 <= 'f')) { 9987 sum |= (c1 - 'a' + 10); 9988 } else if ((c1 >= 'A') && (c1 <= 'F')) { 9989 sum |= (c1 - 'A' + 10); 9990 } else { 9991 EMLXS_MSGF(EMLXS_CONTEXT, 9992 &emlxs_attach_debug_msg, 9993 "Config error: Invalid WWNN found. " 9994 "entry=%d byte=%d lo_nibble=%c", 9995 i, j, c1); 9996 errors++; 9997 } 9998 9999 *np++ = sum; 10000 } 10001 10002 if (*s++ != ':') { 10003 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10004 "Config error: Invalid delimiter after WWNN. " 10005 "entry=%d", i); 10006 goto out; 10007 } 10008 10009 np = (uint8_t *)&wwpn; 10010 for (j = 0; j < sizeof (NAME_TYPE); j++) { 10011 c1 = *s++; 10012 if ((c1 >= '0') && (c1 <= '9')) { 10013 sum = ((c1 - '0') << 4); 10014 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10015 sum = ((c1 - 'a' + 10) << 4); 10016 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10017 sum = ((c1 - 'A' + 10) << 4); 10018 } else { 10019 EMLXS_MSGF(EMLXS_CONTEXT, 10020 &emlxs_attach_debug_msg, 10021 "Config error: Invalid WWPN found. " 10022 "entry=%d byte=%d hi_nibble=%c", 10023 i, j, c1); 10024 10025 errors++; 10026 } 10027 10028 c1 = *s++; 10029 if ((c1 >= '0') && (c1 <= '9')) { 10030 sum |= (c1 - '0'); 10031 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10032 sum |= (c1 - 'a' + 10); 10033 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10034 sum |= (c1 - 'A' + 10); 10035 } else { 10036 EMLXS_MSGF(EMLXS_CONTEXT, 10037 &emlxs_attach_debug_msg, 10038 "Config error: Invalid WWPN found. " 10039 "entry=%d byte=%d lo_nibble=%c", 10040 i, j, c1); 10041 10042 errors++; 10043 } 10044 10045 *np++ = sum; 10046 } 10047 10048 if (*s++ != ':') { 10049 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10050 "Config error: Invalid delimiter after WWPN. " 10051 "entry=%d", i); 10052 10053 goto out; 10054 } 10055 10056 sum = 0; 10057 do { 10058 c1 = *s++; 10059 if ((c1 < '0') || (c1 > '9')) { 10060 EMLXS_MSGF(EMLXS_CONTEXT, 10061 &emlxs_attach_debug_msg, 10062 "Config error: Invalid VPI found. " 10063 "entry=%d c=%c vpi=%d", i, c1, sum); 10064 10065 goto out; 10066 } 10067 10068 sum = (sum * 10) + (c1 - '0'); 10069 10070 } while (*s != 0); 10071 10072 vpi = sum; 10073 10074 if (errors) { 10075 continue; 10076 } 10077 10078 /* Entry has been read */ 10079 10080 /* Check if the physical port wwpn */ 10081 /* matches our physical port wwpn */ 10082 if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) { 10083 continue; 10084 } 10085 10086 /* Check vpi range */ 10087 if ((vpi == 0) || (vpi >= MAX_VPORTS)) { 10088 continue; 10089 } 10090 10091 /* Check if port has already been configured */ 10092 if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) { 10093 continue; 10094 } 10095 10096 /* Set the highest configured vpi */ 10097 if (vpi >= hba->vpi_high) { 10098 hba->vpi_high = vpi; 10099 } 10100 10101 bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn, 10102 sizeof (NAME_TYPE)); 10103 bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn, 10104 sizeof (NAME_TYPE)); 10105 10106 if (hba->port[vpi].snn[0] == 0) { 10107 (void) strncpy((caddr_t)hba->port[vpi].snn, 10108 (caddr_t)hba->snn, 256); 10109 } 10110 10111 if (hba->port[vpi].spn[0] == 0) { 10112 (void) sprintf((caddr_t)hba->port[vpi].spn, 10113 "%s VPort-%d", 10114 (caddr_t)hba->spn, vpi); 10115 } 10116 10117 hba->port[vpi].flag |= 10118 (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE); 10119 10120 #ifdef NPIV_SUPPORT 10121 if (cfg[CFG_VPORT_RESTRICTED].current) { 10122 hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED; 10123 } 10124 #endif /* NPIV_SUPPORT */ 10125 } 10126 10127 out: 10128 10129 (void) ddi_prop_free((void *) arrayp); 10130 return; 10131 10132 } /* emlxs_read_vport_prop() */ 10133 10134 #endif /* SLI3_SUPPORT */ 10135 10136 10137 10138 extern char * 10139 emlxs_wwn_xlate(char *buffer, uint8_t *wwn) 10140 { 10141 (void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x", 10142 wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff, 10143 wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff); 10144 10145 return (buffer); 10146 10147 } /* emlxs_wwn_xlate() */ 10148 10149 10150 /* This is called at port online and offline */ 10151 extern void 10152 emlxs_ub_flush(emlxs_port_t *port) 10153 { 10154 emlxs_hba_t *hba = HBA; 10155 fc_unsol_buf_t *ubp; 10156 emlxs_ub_priv_t *ub_priv; 10157 emlxs_ub_priv_t *next; 10158 10159 /* Return if nothing to do */ 10160 if (!port->ub_wait_head) { 10161 return; 10162 } 10163 10164 mutex_enter(&EMLXS_PORT_LOCK); 10165 ub_priv = port->ub_wait_head; 10166 port->ub_wait_head = NULL; 10167 port->ub_wait_tail = NULL; 10168 mutex_exit(&EMLXS_PORT_LOCK); 10169 10170 while (ub_priv) { 10171 next = ub_priv->next; 10172 ubp = ub_priv->ubp; 10173 10174 /* Check if ULP is online and we have a callback function */ 10175 if ((port->ulp_statec != FC_STATE_OFFLINE) && 10176 port->ulp_unsol_cb) { 10177 /* Send ULP the ub buffer */ 10178 port->ulp_unsol_cb(port->ulp_handle, ubp, 10179 ubp->ub_frame.type); 10180 } else { /* Drop the buffer */ 10181 10182 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10183 } 10184 10185 ub_priv = next; 10186 10187 } /* while() */ 10188 10189 return; 10190 10191 } /* emlxs_ub_flush() */ 10192 10193 10194 extern void 10195 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp) 10196 { 10197 emlxs_hba_t *hba = HBA; 10198 emlxs_ub_priv_t *ub_priv; 10199 10200 ub_priv = ubp->ub_fca_private; 10201 10202 /* Check if ULP is online */ 10203 if (port->ulp_statec != FC_STATE_OFFLINE) { 10204 if (port->ulp_unsol_cb) { 10205 port->ulp_unsol_cb(port->ulp_handle, ubp, 10206 ubp->ub_frame.type); 10207 } else { 10208 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10209 } 10210 10211 return; 10212 } else { /* ULP offline */ 10213 10214 if (hba->state >= FC_LINK_UP) { 10215 /* Add buffer to queue tail */ 10216 mutex_enter(&EMLXS_PORT_LOCK); 10217 10218 if (port->ub_wait_tail) { 10219 port->ub_wait_tail->next = ub_priv; 10220 } 10221 port->ub_wait_tail = ub_priv; 10222 10223 if (!port->ub_wait_head) { 10224 port->ub_wait_head = ub_priv; 10225 } 10226 10227 mutex_exit(&EMLXS_PORT_LOCK); 10228 } else { 10229 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10230 } 10231 } 10232 10233 return; 10234 10235 } /* emlxs_ub_callback() */ 10236 10237 10238 static uint32_t 10239 emlxs_integrity_check(emlxs_hba_t *hba) 10240 { 10241 uint32_t size; 10242 uint32_t errors = 0; 10243 int ddiinst = hba->ddiinst; 10244 10245 size = 16; 10246 if (sizeof (ULP_BDL) != size) { 10247 cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect. %d != 16", 10248 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL)); 10249 10250 errors++; 10251 } 10252 size = 8; 10253 if (sizeof (ULP_BDE) != size) { 10254 cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect. %d != 8", 10255 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE)); 10256 10257 errors++; 10258 } 10259 size = 12; 10260 if (sizeof (ULP_BDE64) != size) { 10261 cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect. %d != 12", 10262 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64)); 10263 10264 errors++; 10265 } 10266 size = 16; 10267 if (sizeof (HBQE_t) != size) { 10268 cmn_err(CE_WARN, "?%s%d: HBQE size incorrect. %d != 16", 10269 DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t)); 10270 10271 errors++; 10272 } 10273 size = 8; 10274 if (sizeof (HGP) != size) { 10275 cmn_err(CE_WARN, "?%s%d: HGP size incorrect. %d != 8", 10276 DRIVER_NAME, ddiinst, (int)sizeof (HGP)); 10277 10278 errors++; 10279 } 10280 if (sizeof (PGP) != size) { 10281 cmn_err(CE_WARN, "?%s%d: PGP size incorrect. %d != 8", 10282 DRIVER_NAME, ddiinst, (int)sizeof (PGP)); 10283 10284 errors++; 10285 } 10286 size = 4; 10287 if (sizeof (WORD5) != size) { 10288 cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect. %d != 4", 10289 DRIVER_NAME, ddiinst, (int)sizeof (WORD5)); 10290 10291 errors++; 10292 } 10293 size = 124; 10294 if (sizeof (MAILVARIANTS) != size) { 10295 cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect. " 10296 "%d != 124", DRIVER_NAME, ddiinst, 10297 (int)sizeof (MAILVARIANTS)); 10298 10299 errors++; 10300 } 10301 size = 128; 10302 if (sizeof (SLI1_DESC) != size) { 10303 cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect. %d != 128", 10304 DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC)); 10305 10306 errors++; 10307 } 10308 if (sizeof (SLI2_DESC) != size) { 10309 cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect. %d != 128", 10310 DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC)); 10311 10312 errors++; 10313 } 10314 size = MBOX_SIZE; 10315 if (sizeof (MAILBOX) != size) { 10316 cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect. %d != %d", 10317 DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE); 10318 10319 errors++; 10320 } 10321 size = PCB_SIZE; 10322 if (sizeof (PCB) != size) { 10323 cmn_err(CE_WARN, "?%s%d: PCB size incorrect. %d != %d", 10324 DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE); 10325 10326 errors++; 10327 } 10328 size = 260; 10329 if (sizeof (ATTRIBUTE_ENTRY) != size) { 10330 cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect. " 10331 "%d != 260", DRIVER_NAME, ddiinst, 10332 (int)sizeof (ATTRIBUTE_ENTRY)); 10333 10334 errors++; 10335 } 10336 size = SLI_SLIM1_SIZE; 10337 if (sizeof (SLIM1) != size) { 10338 cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect. %d != %d", 10339 DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE); 10340 10341 errors++; 10342 } 10343 #ifdef SLI3_SUPPORT 10344 size = SLI3_IOCB_CMD_SIZE; 10345 if (sizeof (IOCB) != size) { 10346 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d", 10347 DRIVER_NAME, ddiinst, (int)sizeof (IOCB), 10348 SLI3_IOCB_CMD_SIZE); 10349 10350 errors++; 10351 } 10352 #else 10353 size = SLI2_IOCB_CMD_SIZE; 10354 if (sizeof (IOCB) != size) { 10355 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d", 10356 DRIVER_NAME, ddiinst, (int)sizeof (IOCB), 10357 SLI2_IOCB_CMD_SIZE); 10358 10359 errors++; 10360 } 10361 #endif /* SLI3_SUPPORT */ 10362 10363 size = SLI_SLIM2_SIZE; 10364 if (sizeof (SLIM2) != size) { 10365 cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect. %d != %d", 10366 DRIVER_NAME, ddiinst, (int)sizeof (SLIM2), 10367 SLI_SLIM2_SIZE); 10368 10369 errors++; 10370 } 10371 return (errors); 10372 10373 } /* emlxs_integrity_check() */ 10374 10375 10376 #ifdef FMA_SUPPORT 10377 /* 10378 * FMA support 10379 */ 10380 10381 extern void 10382 emlxs_fm_init(emlxs_hba_t *hba) 10383 { 10384 ddi_iblock_cookie_t iblk; 10385 10386 if (hba->fm_caps) { 10387 emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 10388 emlxs_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 10389 emlxs_dma_attr_ro.dma_attr_flags = DDI_DMA_FLAGERR; 10390 emlxs_dma_attr_1sg.dma_attr_flags = DDI_DMA_FLAGERR; 10391 emlxs_dma_attr_fcip_rsp.dma_attr_flags = DDI_DMA_FLAGERR; 10392 10393 ddi_fm_init(hba->dip, &hba->fm_caps, &iblk); 10394 10395 if (DDI_FM_EREPORT_CAP(hba->fm_caps) || 10396 DDI_FM_ERRCB_CAP(hba->fm_caps)) { 10397 pci_ereport_setup(hba->dip); 10398 } 10399 10400 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) { 10401 ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb, 10402 (void *)hba); 10403 } 10404 } 10405 } /* emlxs_fm_init() */ 10406 10407 10408 extern void 10409 emlxs_fm_fini(emlxs_hba_t *hba) 10410 { 10411 if (hba->fm_caps) { 10412 if (DDI_FM_EREPORT_CAP(hba->fm_caps) || 10413 DDI_FM_ERRCB_CAP(hba->fm_caps)) { 10414 pci_ereport_teardown(hba->dip); 10415 } 10416 10417 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) { 10418 ddi_fm_handler_unregister(hba->dip); 10419 } 10420 10421 (void) ddi_fm_fini(hba->dip); 10422 } 10423 } /* emlxs_fm_fini() */ 10424 10425 10426 int 10427 emlxs_fm_check_acc_handle(ddi_acc_handle_t handle) 10428 { 10429 ddi_fm_error_t fe; 10430 int rval = DDI_FM_OK; 10431 10432 /* Some S10 versions do not define the ahi_err structure */ 10433 if (((ddi_acc_impl_t *)handle)->ahi_err != NULL) { 10434 (void) ddi_fm_acc_err_get(handle, &fe, DDI_FME_VERSION); 10435 10436 /* 10437 * Some S10 versions do not define the 10438 * ddi_fm_acc_err_clear function 10439 */ 10440 if ((void *)&ddi_fm_acc_err_clear != NULL) { 10441 (void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 10442 } 10443 10444 rval = fe.fme_status; 10445 } 10446 10447 return (rval); 10448 10449 } /* emlxs_fm_check_acc_handle() */ 10450 10451 10452 int 10453 emlxs_fm_check_dma_handle(ddi_dma_handle_t handle) 10454 { 10455 ddi_fm_error_t fe; 10456 10457 (void) ddi_fm_dma_err_get(handle, &fe, DDI_FME_VERSION); 10458 10459 return (fe.fme_status); 10460 10461 } /* emlxs_fm_check_dma_handle() */ 10462 10463 10464 void 10465 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail) 10466 { 10467 uint64_t ena; 10468 char buf[FM_MAX_CLASS]; 10469 10470 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 10471 ena = fm_ena_generate(0, FM_ENA_FMT1); 10472 if (DDI_FM_EREPORT_CAP(hba->fm_caps)) { 10473 (void) ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP, 10474 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 10475 } 10476 } /* emlxs_fm_ereport() */ 10477 10478 /* 10479 * The I/O fault service error handling callback function 10480 */ 10481 /*ARGSUSED*/ 10482 extern int 10483 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 10484 const void *impl_data) 10485 { 10486 /* 10487 * as the driver can always deal with an error 10488 * in any dma or access handle, we can just return 10489 * the fme_status value. 10490 */ 10491 pci_ereport_post(dip, err, NULL); 10492 return (err->fme_status); 10493 } /* emlxs_fm_error_cb() */ 10494 #endif /* FMA_SUPPORT */ 10495