xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli3.c (revision 088c6f3f90c806c9ed1bdffa1b625233a27eb084)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at
9  * http://www.opensource.org/licenses/cddl1.txt.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004-2012 Emulex. All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2020 RackTop Systems, Inc.
26  */
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI3_C);
32 
33 static void emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
34 static void emlxs_sli3_handle_link_event(emlxs_hba_t *hba);
35 static void emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
36 	uint32_t ha_copy);
37 #ifdef SFCT_SUPPORT
38 static uint32_t emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
39 #endif /* SFCT_SUPPORT */
40 
41 static uint32_t	emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
42 
43 static uint32_t emlxs_disable_traffic_cop = 1;
44 
45 static int			emlxs_sli3_map_hdw(emlxs_hba_t *hba);
46 
47 static void			emlxs_sli3_unmap_hdw(emlxs_hba_t *hba);
48 
49 static int32_t			emlxs_sli3_online(emlxs_hba_t *hba);
50 
51 static void			emlxs_sli3_offline(emlxs_hba_t *hba,
52 					uint32_t reset_requested);
53 
54 static uint32_t			emlxs_sli3_hba_reset(emlxs_hba_t *hba,
55 					uint32_t restart, uint32_t skip_post,
56 					uint32_t quiesce);
57 
58 static void			emlxs_sli3_hba_kill(emlxs_hba_t *hba);
59 static void			emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba);
60 static uint32_t			emlxs_sli3_hba_init(emlxs_hba_t *hba);
61 
62 static uint32_t			emlxs_sli2_bde_setup(emlxs_port_t *port,
63 					emlxs_buf_t *sbp);
64 static uint32_t			emlxs_sli3_bde_setup(emlxs_port_t *port,
65 					emlxs_buf_t *sbp);
66 static uint32_t			emlxs_sli2_fct_bde_setup(emlxs_port_t *port,
67 					emlxs_buf_t *sbp);
68 static uint32_t			emlxs_sli3_fct_bde_setup(emlxs_port_t *port,
69 					emlxs_buf_t *sbp);
70 
71 
72 static void			emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba,
73 					CHANNEL *rp, IOCBQ *iocb_cmd);
74 
75 
76 static uint32_t			emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba,
77 					MAILBOXQ *mbq, int32_t flg,
78 					uint32_t tmo);
79 
80 
81 #ifdef SFCT_SUPPORT
82 static uint32_t			emlxs_sli3_prep_fct_iocb(emlxs_port_t *port,
83 					emlxs_buf_t *cmd_sbp, int channel);
84 
85 #endif /* SFCT_SUPPORT */
86 
87 static uint32_t			emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port,
88 					emlxs_buf_t *sbp, int ring);
89 
90 static uint32_t			emlxs_sli3_prep_ip_iocb(emlxs_port_t *port,
91 					emlxs_buf_t *sbp);
92 
93 static uint32_t			emlxs_sli3_prep_els_iocb(emlxs_port_t *port,
94 					emlxs_buf_t *sbp);
95 
96 
97 static uint32_t			emlxs_sli3_prep_ct_iocb(emlxs_port_t *port,
98 					emlxs_buf_t *sbp);
99 
100 
101 static void			emlxs_sli3_poll_intr(emlxs_hba_t *hba);
102 
103 static int32_t			emlxs_sli3_intx_intr(char *arg);
104 #ifdef MSI_SUPPORT
105 static uint32_t			emlxs_sli3_msi_intr(char *arg1, char *arg2);
106 #endif /* MSI_SUPPORT */
107 
108 static void			emlxs_sli3_enable_intr(emlxs_hba_t *hba);
109 
110 static void			emlxs_sli3_disable_intr(emlxs_hba_t *hba,
111 					uint32_t att);
112 
113 
114 static void			emlxs_handle_ff_error(emlxs_hba_t *hba);
115 
116 static uint32_t			emlxs_handle_mb_event(emlxs_hba_t *hba);
117 
118 static void			emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba);
119 
120 static uint32_t			emlxs_mb_config_port(emlxs_hba_t *hba,
121 					MAILBOXQ *mbq, uint32_t sli_mode,
122 					uint32_t hbainit);
123 static void			emlxs_enable_latt(emlxs_hba_t *hba);
124 
125 static uint32_t			emlxs_check_attention(emlxs_hba_t *hba);
126 
127 static uint32_t			emlxs_get_attention(emlxs_hba_t *hba,
128 					int32_t msgid);
129 static void			emlxs_proc_attention(emlxs_hba_t *hba,
130 					uint32_t ha_copy);
131 /* static int			emlxs_handle_rcv_seq(emlxs_hba_t *hba, */
132 					/* CHANNEL *cp, IOCBQ *iocbq); */
133 /* static void			emlxs_update_HBQ_index(emlxs_hba_t *hba, */
134 					/* uint32_t hbq_id); */
135 /* static void			emlxs_hbq_free_all(emlxs_hba_t *hba, */
136 					/* uint32_t hbq_id); */
137 static uint32_t			emlxs_hbq_setup(emlxs_hba_t *hba,
138 					uint32_t hbq_id);
139 static void			emlxs_sli3_timer(emlxs_hba_t *hba);
140 
141 static void			emlxs_sli3_poll_erratt(emlxs_hba_t *hba);
142 
143 static uint32_t			emlxs_sli3_reg_did(emlxs_port_t *port,
144 					uint32_t did, SERV_PARM *param,
145 					emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
146 					IOCBQ *iocbq);
147 
148 static uint32_t			emlxs_sli3_unreg_node(emlxs_port_t *port,
149 					NODELIST *node, emlxs_buf_t *sbp,
150 					fc_unsol_buf_t *ubp, IOCBQ *iocbq);
151 
152 
153 /* Define SLI3 API functions */
154 emlxs_sli_api_t emlxs_sli3_api = {
155 	emlxs_sli3_map_hdw,
156 	emlxs_sli3_unmap_hdw,
157 	emlxs_sli3_online,
158 	emlxs_sli3_offline,
159 	emlxs_sli3_hba_reset,
160 	emlxs_sli3_hba_kill,
161 	emlxs_sli3_issue_iocb_cmd,
162 	emlxs_sli3_issue_mbox_cmd,
163 #ifdef SFCT_SUPPORT
164 	emlxs_sli3_prep_fct_iocb,
165 #else
166 	NULL,
167 #endif /* SFCT_SUPPORT */
168 	emlxs_sli3_prep_fcp_iocb,
169 	emlxs_sli3_prep_ip_iocb,
170 	emlxs_sli3_prep_els_iocb,
171 	emlxs_sli3_prep_ct_iocb,
172 	emlxs_sli3_poll_intr,
173 	emlxs_sli3_intx_intr,
174 	emlxs_sli3_msi_intr,
175 	emlxs_sli3_disable_intr,
176 	emlxs_sli3_timer,
177 	emlxs_sli3_poll_erratt,
178 	emlxs_sli3_reg_did,
179 	emlxs_sli3_unreg_node
180 };
181 
182 
183 /*
184  * emlxs_sli3_online()
185  *
186  * This routine will start initialization of the SLI2/3 HBA.
187  */
188 static int32_t
emlxs_sli3_online(emlxs_hba_t * hba)189 emlxs_sli3_online(emlxs_hba_t *hba)
190 {
191 	emlxs_port_t *port = &PPORT;
192 	emlxs_config_t *cfg;
193 	emlxs_vpd_t *vpd;
194 	MAILBOX *mb = NULL;
195 	MAILBOXQ *mbq = NULL;
196 	RING *rp;
197 	CHANNEL *cp;
198 	MATCHMAP *mp = NULL;
199 	MATCHMAP *mp1 = NULL;
200 	uint8_t *inptr;
201 	uint8_t *outptr;
202 	uint32_t status;
203 	uint16_t i;
204 	uint32_t j;
205 	uint32_t read_rev_reset;
206 	uint32_t key = 0;
207 	uint32_t fw_check;
208 	uint32_t kern_update = 0;
209 	uint32_t rval = 0;
210 	uint32_t offset;
211 	uint8_t vpd_data[DMP_VPD_SIZE];
212 	uint32_t MaxRbusSize;
213 	uint32_t MaxIbusSize;
214 	uint32_t sli_mode;
215 	uint32_t sli_mode_mask;
216 
217 	cfg = &CFG;
218 	vpd = &VPD;
219 	MaxRbusSize = 0;
220 	MaxIbusSize = 0;
221 	read_rev_reset = 0;
222 	hba->chan_count = MAX_RINGS;
223 
224 	if (hba->bus_type == SBUS_FC) {
225 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
226 	}
227 
228 	/* Set the fw_check flag */
229 	fw_check = cfg[CFG_FW_CHECK].current;
230 
231 	if ((fw_check & 0x04) ||
232 	    (hba->fw_flag & FW_UPDATE_KERNEL)) {
233 		kern_update = 1;
234 	}
235 
236 	hba->mbox_queue_flag = 0;
237 	hba->sli.sli3.hc_copy = 0;
238 	hba->fc_edtov = FF_DEF_EDTOV;
239 	hba->fc_ratov = FF_DEF_RATOV;
240 	hba->fc_altov = FF_DEF_ALTOV;
241 	hba->fc_arbtov = FF_DEF_ARBTOV;
242 
243 	/*
244 	 * Get a buffer which will be used repeatedly for mailbox commands
245 	 */
246 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
247 
248 	mb = (MAILBOX *)mbq;
249 
250 	/* Initialize sli mode based on configuration parameter */
251 	switch (cfg[CFG_SLI_MODE].current) {
252 	case 2:	/* SLI2 mode */
253 		sli_mode = EMLXS_HBA_SLI2_MODE;
254 		sli_mode_mask = EMLXS_SLI2_MASK;
255 		break;
256 
257 	case 3:	/* SLI3 mode */
258 		sli_mode = EMLXS_HBA_SLI3_MODE;
259 		sli_mode_mask = EMLXS_SLI3_MASK;
260 		break;
261 
262 	case 0:	/* Best available */
263 	case 1:	/* Best available */
264 	default:
265 		if (hba->model_info.sli_mask & EMLXS_SLI3_MASK) {
266 			sli_mode = EMLXS_HBA_SLI3_MODE;
267 			sli_mode_mask = EMLXS_SLI3_MASK;
268 		} else if (hba->model_info.sli_mask & EMLXS_SLI2_MASK) {
269 			sli_mode = EMLXS_HBA_SLI2_MODE;
270 			sli_mode_mask = EMLXS_SLI2_MASK;
271 		} else {
272 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
273 			    "No SLI mode available.");
274 			rval = EIO;
275 			goto failed;
276 		}
277 		break;
278 	}
279 	/* SBUS adapters only available in SLI2 */
280 	if (hba->bus_type == SBUS_FC) {
281 		sli_mode = EMLXS_HBA_SLI2_MODE;
282 		sli_mode_mask = EMLXS_SLI2_MASK;
283 	}
284 
285 reset:
286 	/* Reset & Initialize the adapter */
287 	if (emlxs_sli3_hba_init(hba)) {
288 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
289 		    "Unable to init hba.");
290 
291 		rval = EIO;
292 		goto failed;
293 	}
294 
295 #ifdef FMA_SUPPORT
296 	/* Access handle validation */
297 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
298 	    != DDI_FM_OK) ||
299 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
300 	    != DDI_FM_OK) ||
301 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
302 	    != DDI_FM_OK)) {
303 		EMLXS_MSGF(EMLXS_CONTEXT,
304 		    &emlxs_invalid_access_handle_msg, NULL);
305 
306 		rval = EIO;
307 		goto failed;
308 	}
309 #endif	/* FMA_SUPPORT */
310 
311 	/* Check for PEGASUS (This is a special case) */
312 	/* We need to check for dual channel adapter */
313 	if (hba->model_info.vendor_id == PCI_VENDOR_ID_EMULEX &&
314 	    hba->model_info.device_id == PCI_DEVICE_ID_PEGASUS) {
315 		/* Try to determine if this is a DC adapter */
316 		if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
317 			if (MaxRbusSize == REDUCED_SRAM_CFG) {
318 				/* LP9802DC */
319 				for (i = 1; i < emlxs_pci_model_count; i++) {
320 					if (emlxs_pci_model[i].id == LP9802DC) {
321 						bcopy(&emlxs_pci_model[i],
322 						    &hba->model_info,
323 						    sizeof (emlxs_model_t));
324 						break;
325 					}
326 				}
327 			} else if (hba->model_info.id != LP9802) {
328 				/* LP9802 */
329 				for (i = 1; i < emlxs_pci_model_count; i++) {
330 					if (emlxs_pci_model[i].id == LP9802) {
331 						bcopy(&emlxs_pci_model[i],
332 						    &hba->model_info,
333 						    sizeof (emlxs_model_t));
334 						break;
335 					}
336 				}
337 			}
338 		}
339 	}
340 
341 	/*
342 	 * Setup and issue mailbox READ REV command
343 	 */
344 	vpd->opFwRev = 0;
345 	vpd->postKernRev = 0;
346 	vpd->sli1FwRev = 0;
347 	vpd->sli2FwRev = 0;
348 	vpd->sli3FwRev = 0;
349 	vpd->sli4FwRev = 0;
350 
351 	vpd->postKernName[0] = 0;
352 	vpd->opFwName[0] = 0;
353 	vpd->sli1FwName[0] = 0;
354 	vpd->sli2FwName[0] = 0;
355 	vpd->sli3FwName[0] = 0;
356 	vpd->sli4FwName[0] = 0;
357 
358 	vpd->opFwLabel[0] = 0;
359 	vpd->sli1FwLabel[0] = 0;
360 	vpd->sli2FwLabel[0] = 0;
361 	vpd->sli3FwLabel[0] = 0;
362 	vpd->sli4FwLabel[0] = 0;
363 
364 	/* Sanity check */
365 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
366 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
367 		    "Adapter / SLI mode mismatch mask:x%x",
368 		    hba->model_info.sli_mask);
369 
370 		rval = EIO;
371 		goto failed;
372 	}
373 
374 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
375 	emlxs_mb_read_rev(hba, mbq, 0);
376 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
377 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
378 		    "Unable to read rev. Mailbox cmd=%x status=%x",
379 		    mb->mbxCommand, mb->mbxStatus);
380 
381 		rval = EIO;
382 		goto failed;
383 	}
384 
385 	if (mb->un.varRdRev.rr == 0) {
386 		/* Old firmware */
387 		if (read_rev_reset == 0) {
388 			read_rev_reset = 1;
389 
390 			goto reset;
391 		} else {
392 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
393 			    "Outdated firmware detected.");
394 		}
395 
396 		vpd->rBit = 0;
397 	} else {
398 		if (mb->un.varRdRev.un.b.ProgType != FUNC_FIRMWARE) {
399 			if (read_rev_reset == 0) {
400 				read_rev_reset = 1;
401 
402 				goto reset;
403 			} else {
404 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
405 				    "Non-operational firmware detected. "
406 				    "type=%x",
407 				    mb->un.varRdRev.un.b.ProgType);
408 			}
409 		}
410 
411 		vpd->rBit = 1;
412 		vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
413 		bcopy((char *)mb->un.varRdRev.sliFwName1, vpd->sli1FwLabel,
414 		    16);
415 		vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
416 		bcopy((char *)mb->un.varRdRev.sliFwName2, vpd->sli2FwLabel,
417 		    16);
418 
419 		/*
420 		 * Lets try to read the SLI3 version
421 		 * Setup and issue mailbox READ REV(v3) command
422 		 */
423 		EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
424 
425 		/* Reuse mbq from previous mbox */
426 		bzero(mbq, sizeof (MAILBOXQ));
427 
428 		emlxs_mb_read_rev(hba, mbq, 1);
429 
430 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
431 		    MBX_SUCCESS) {
432 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
433 			    "Unable to read rev (v3). Mailbox cmd=%x status=%x",
434 			    mb->mbxCommand, mb->mbxStatus);
435 
436 			rval = EIO;
437 			goto failed;
438 		}
439 
440 		if (mb->un.varRdRev.rf3) {
441 			/*
442 			 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1;
443 			 * Not needed
444 			 */
445 			vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
446 			bcopy((char *)mb->un.varRdRev.sliFwName2,
447 			    vpd->sli3FwLabel, 16);
448 		}
449 	}
450 
451 	if ((sli_mode == EMLXS_HBA_SLI3_MODE) && (vpd->sli3FwRev == 0)) {
452 		if (vpd->sli2FwRev) {
453 			sli_mode = EMLXS_HBA_SLI2_MODE;
454 			sli_mode_mask = EMLXS_SLI2_MASK;
455 		} else {
456 			sli_mode = 0;
457 			sli_mode_mask = 0;
458 		}
459 	}
460 
461 	else if ((sli_mode == EMLXS_HBA_SLI2_MODE) && (vpd->sli2FwRev == 0)) {
462 		if (vpd->sli3FwRev) {
463 			sli_mode = EMLXS_HBA_SLI3_MODE;
464 			sli_mode_mask = EMLXS_SLI3_MASK;
465 		} else {
466 			sli_mode = 0;
467 			sli_mode_mask = 0;
468 		}
469 	}
470 
471 	if (!(hba->model_info.sli_mask & sli_mode_mask)) {
472 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
473 		    "Firmware not available. sli-mode=%d",
474 		    cfg[CFG_SLI_MODE].current);
475 
476 		rval = EIO;
477 		goto failed;
478 	}
479 
480 	/* Save information as VPD data */
481 	vpd->postKernRev = mb->un.varRdRev.postKernRev;
482 	vpd->opFwRev = mb->un.varRdRev.opFwRev;
483 	bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
484 	vpd->biuRev = mb->un.varRdRev.biuRev;
485 	vpd->smRev = mb->un.varRdRev.smRev;
486 	vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
487 	vpd->endecRev = mb->un.varRdRev.endecRev;
488 	vpd->fcphHigh = mb->un.varRdRev.fcphHigh;
489 	vpd->fcphLow = mb->un.varRdRev.fcphLow;
490 	vpd->feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
491 	vpd->feaLevelLow = mb->un.varRdRev.feaLevelLow;
492 
493 	/* Decode FW names */
494 	emlxs_decode_version(vpd->postKernRev, vpd->postKernName,
495 	    sizeof (vpd->postKernName));
496 	emlxs_decode_version(vpd->opFwRev, vpd->opFwName,
497 	    sizeof (vpd->opFwName));
498 	emlxs_decode_version(vpd->sli1FwRev, vpd->sli1FwName,
499 	    sizeof (vpd->sli1FwName));
500 	emlxs_decode_version(vpd->sli2FwRev, vpd->sli2FwName,
501 	    sizeof (vpd->sli2FwName));
502 	emlxs_decode_version(vpd->sli3FwRev, vpd->sli3FwName,
503 	    sizeof (vpd->sli3FwName));
504 	emlxs_decode_version(vpd->sli4FwRev, vpd->sli4FwName,
505 	    sizeof (vpd->sli4FwName));
506 
507 	/* Decode FW labels */
508 	emlxs_decode_label(vpd->opFwLabel, vpd->opFwLabel, 1,
509 	    sizeof (vpd->opFwLabel));
510 	emlxs_decode_label(vpd->sli1FwLabel, vpd->sli1FwLabel, 1,
511 	    sizeof (vpd->sli1FwLabel));
512 	emlxs_decode_label(vpd->sli2FwLabel, vpd->sli2FwLabel, 1,
513 	    sizeof (vpd->sli2FwLabel));
514 	emlxs_decode_label(vpd->sli3FwLabel, vpd->sli3FwLabel, 1,
515 	    sizeof (vpd->sli3FwLabel));
516 	emlxs_decode_label(vpd->sli4FwLabel, vpd->sli4FwLabel, 1,
517 	    sizeof (vpd->sli4FwLabel));
518 
519 	/* Reuse mbq from previous mbox */
520 	bzero(mbq, sizeof (MAILBOXQ));
521 
522 	key = emlxs_get_key(hba, mbq);
523 
524 	/* Get adapter VPD information */
525 	offset = 0;
526 	bzero(vpd_data, sizeof (vpd_data));
527 	vpd->port_index = (uint32_t)-1;
528 
529 	while (offset < DMP_VPD_SIZE) {
530 		/* Reuse mbq from previous mbox */
531 		bzero(mbq, sizeof (MAILBOXQ));
532 
533 		emlxs_mb_dump_vpd(hba, mbq, offset);
534 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
535 		    MBX_SUCCESS) {
536 			/*
537 			 * Let it go through even if failed.
538 			 * Not all adapter's have VPD info and thus will
539 			 * fail here. This is not a problem
540 			 */
541 
542 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
543 			    "No VPD found. offset=%x status=%x", offset,
544 			    mb->mbxStatus);
545 			break;
546 		} else {
547 			if (mb->un.varDmp.ra == 1) {
548 				uint32_t *lp1, *lp2;
549 				uint32_t bsize;
550 				uint32_t wsize;
551 
552 				/*
553 				 * mb->un.varDmp.word_cnt is actually byte
554 				 * count for the dump reply
555 				 */
556 				bsize = mb->un.varDmp.word_cnt;
557 
558 				/* Stop if no data was received */
559 				if (bsize == 0) {
560 					break;
561 				}
562 
563 				/* Check limit on byte size */
564 				bsize = (bsize >
565 				    (sizeof (vpd_data) - offset)) ?
566 				    (sizeof (vpd_data) - offset) : bsize;
567 
568 				/*
569 				 * Convert size from bytes to words with
570 				 * minimum of 1 word
571 				 */
572 				wsize = (bsize > 4) ? (bsize >> 2) : 1;
573 
574 				/*
575 				 * Transfer data into vpd_data buffer one
576 				 * word at a time
577 				 */
578 				lp1 = (uint32_t *)&mb->un.varDmp.resp_offset;
579 				lp2 = (uint32_t *)&vpd_data[offset];
580 
581 				for (i = 0; i < wsize; i++) {
582 					status = *lp1++;
583 					*lp2++ = BE_SWAP32(status);
584 				}
585 
586 				/* Increment total byte count saved */
587 				offset += (wsize << 2);
588 
589 				/*
590 				 * Stop if less than a full transfer was
591 				 * received
592 				 */
593 				if (wsize < DMP_VPD_DUMP_WCOUNT) {
594 					break;
595 				}
596 
597 			} else {
598 				EMLXS_MSGF(EMLXS_CONTEXT,
599 				    &emlxs_init_debug_msg,
600 				    "No VPD acknowledgment. offset=%x",
601 				    offset);
602 				break;
603 			}
604 		}
605 
606 	}
607 
608 	if (vpd_data[0]) {
609 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
610 
611 		/*
612 		 * If there is a VPD part number, and it does not
613 		 * match the current default HBA model info,
614 		 * replace the default data with an entry that
615 		 * does match.
616 		 *
617 		 * After emlxs_parse_vpd model holds the VPD value
618 		 * for V2 and part_num hold the value for PN. These
619 		 * 2 values are NOT necessarily the same.
620 		 */
621 
622 		rval = 0;
623 		if ((vpd->model[0] != 0) &&
624 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
625 
626 			/* First scan for a V2 match */
627 
628 			for (i = 1; i < emlxs_pci_model_count; i++) {
629 				if (strcmp(&vpd->model[0],
630 				    emlxs_pci_model[i].model) == 0) {
631 					bcopy(&emlxs_pci_model[i],
632 					    &hba->model_info,
633 					    sizeof (emlxs_model_t));
634 					rval = 1;
635 					break;
636 				}
637 			}
638 		}
639 
640 		if (!rval && (vpd->part_num[0] != 0) &&
641 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
642 
643 			/* Next scan for a PN match */
644 
645 			for (i = 1; i < emlxs_pci_model_count; i++) {
646 				if (strcmp(&vpd->part_num[0],
647 				    emlxs_pci_model[i].model) == 0) {
648 					bcopy(&emlxs_pci_model[i],
649 					    &hba->model_info,
650 					    sizeof (emlxs_model_t));
651 					break;
652 				}
653 			}
654 		}
655 
656 		/*
657 		 * Now lets update hba->model_info with the real
658 		 * VPD data, if any.
659 		 */
660 
661 		/*
662 		 * Replace the default model description with vpd data
663 		 */
664 		if (vpd->model_desc[0] != 0) {
665 			(void) strncpy(hba->model_info.model_desc,
666 			    vpd->model_desc,
667 			    (sizeof (hba->model_info.model_desc)-1));
668 		}
669 
670 		/* Replace the default model with vpd data */
671 		if (vpd->model[0] != 0) {
672 			(void) strncpy(hba->model_info.model, vpd->model,
673 			    (sizeof (hba->model_info.model)-1));
674 		}
675 
676 		/* Replace the default program types with vpd data */
677 		if (vpd->prog_types[0] != 0) {
678 			emlxs_parse_prog_types(hba, vpd->prog_types);
679 		}
680 	}
681 
682 	/*
683 	 * Since the adapter model may have changed with the vpd data
684 	 * lets double check if adapter is not supported
685 	 */
686 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
687 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
688 		    "Unsupported adapter found.  "
689 		    "Id:%d  Vendor id:0x%x  Device id:0x%x  SSDID:0x%x  "
690 		    "Model:%s", hba->model_info.id, hba->model_info.vendor_id,
691 		    hba->model_info.device_id, hba->model_info.ssdid,
692 		    hba->model_info.model);
693 
694 		rval = EIO;
695 		goto failed;
696 	}
697 
698 	/* Read the adapter's wakeup parms */
699 	(void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
700 	emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
701 	    vpd->boot_version, sizeof (vpd->boot_version));
702 
703 	/* Get fcode version property */
704 	emlxs_get_fcode_version(hba);
705 
706 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
707 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
708 	    vpd->opFwRev, vpd->sli1FwRev);
709 
710 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
711 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
712 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
713 
714 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
715 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
716 
717 	/*
718 	 * If firmware checking is enabled and the adapter model indicates
719 	 * a firmware image, then perform firmware version check
720 	 */
721 	hba->fw_flag = 0;
722 	hba->fw_timer = 0;
723 
724 	if (((fw_check & 0x1) &&
725 	    (hba->model_info.flags & EMLXS_ORACLE_BRANDED) &&
726 	    hba->model_info.fwid) || ((fw_check & 0x2) &&
727 	    hba->model_info.fwid)) {
728 		emlxs_firmware_t *fw;
729 
730 		/* Find firmware image indicated by adapter model */
731 		fw = NULL;
732 		for (i = 0; i < emlxs_fw_count; i++) {
733 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
734 				fw = &emlxs_fw_table[i];
735 				break;
736 			}
737 		}
738 
739 		/*
740 		 * If the image was found, then verify current firmware
741 		 * versions of adapter
742 		 */
743 		if (fw) {
744 			if (!kern_update &&
745 			    ((fw->kern && (vpd->postKernRev != fw->kern)) ||
746 			    (fw->stub && (vpd->opFwRev != fw->stub)))) {
747 
748 				hba->fw_flag |= FW_UPDATE_NEEDED;
749 
750 			} else if ((fw->kern && (vpd->postKernRev !=
751 			    fw->kern)) ||
752 			    (fw->stub && (vpd->opFwRev != fw->stub)) ||
753 			    (fw->sli1 && (vpd->sli1FwRev != fw->sli1)) ||
754 			    (fw->sli2 && (vpd->sli2FwRev != fw->sli2)) ||
755 			    (fw->sli3 && (vpd->sli3FwRev != fw->sli3)) ||
756 			    (fw->sli4 && (vpd->sli4FwRev != fw->sli4))) {
757 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
758 				    "Firmware update needed. "
759 				    "Updating. id=%d fw=%d",
760 				    hba->model_info.id, hba->model_info.fwid);
761 
762 #ifdef MODFW_SUPPORT
763 				/*
764 				 * Load the firmware image now
765 				 * If MODFW_SUPPORT is not defined, the
766 				 * firmware image will already be defined
767 				 * in the emlxs_fw_table
768 				 */
769 				emlxs_fw_load(hba, fw);
770 #endif /* MODFW_SUPPORT */
771 
772 				if (fw->image && fw->size) {
773 					uint32_t rc;
774 
775 					rc = emlxs_fw_download(hba,
776 					    (char *)fw->image, fw->size, 0);
777 					if ((rc != FC_SUCCESS) &&
778 					    (rc != EMLXS_REBOOT_REQUIRED)) {
779 						EMLXS_MSGF(EMLXS_CONTEXT,
780 						    &emlxs_init_msg,
781 						    "Firmware update failed.");
782 						hba->fw_flag |=
783 						    FW_UPDATE_NEEDED;
784 					}
785 #ifdef MODFW_SUPPORT
786 					/*
787 					 * Unload the firmware image from
788 					 * kernel memory
789 					 */
790 					emlxs_fw_unload(hba, fw);
791 #endif /* MODFW_SUPPORT */
792 
793 					fw_check = 0;
794 
795 					goto reset;
796 				}
797 
798 				hba->fw_flag |= FW_UPDATE_NEEDED;
799 
800 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
801 				    "Firmware image unavailable.");
802 			} else {
803 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
804 				    "Firmware update not needed.");
805 			}
806 		} else {
807 			/* This should not happen */
808 
809 			/*
810 			 * This means either the adapter database is not
811 			 * correct or a firmware image is missing from the
812 			 * compile
813 			 */
814 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
815 			    "Firmware image unavailable. id=%d fw=%d",
816 			    hba->model_info.id, hba->model_info.fwid);
817 		}
818 	}
819 
820 	/*
821 	 * Add our interrupt routine to kernel's interrupt chain & enable it
822 	 * If MSI is enabled this will cause Solaris to program the MSI address
823 	 * and data registers in PCI config space
824 	 */
825 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
826 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
827 		    "Unable to add interrupt(s).");
828 
829 		rval = EIO;
830 		goto failed;
831 	}
832 
833 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
834 
835 	/* Reuse mbq from previous mbox */
836 	bzero(mbq, sizeof (MAILBOXQ));
837 
838 	(void) emlxs_mb_config_port(hba, mbq, sli_mode, key);
839 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
840 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
841 		    "Unable to configure port. "
842 		    "Mailbox cmd=%x status=%x slimode=%d key=%x",
843 		    mb->mbxCommand, mb->mbxStatus, sli_mode, key);
844 
845 		for (sli_mode--; sli_mode > 0; sli_mode--) {
846 			/* Check if sli_mode is supported by this adapter */
847 			if (hba->model_info.sli_mask &
848 			    EMLXS_SLI_MASK(sli_mode)) {
849 				sli_mode_mask = EMLXS_SLI_MASK(sli_mode);
850 				break;
851 			}
852 		}
853 
854 		if (sli_mode) {
855 			fw_check = 0;
856 
857 			goto reset;
858 		}
859 
860 		hba->flag &= ~FC_SLIM2_MODE;
861 
862 		rval = EIO;
863 		goto failed;
864 	}
865 
866 	/* Check if SLI3 mode was achieved */
867 	if (mb->un.varCfgPort.rMA &&
868 	    (mb->un.varCfgPort.sli_mode == EMLXS_HBA_SLI3_MODE)) {
869 
870 		if (mb->un.varCfgPort.vpi_max > 1) {
871 			hba->flag |= FC_NPIV_ENABLED;
872 
873 			if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
874 				hba->vpi_max =
875 				    min(mb->un.varCfgPort.vpi_max,
876 				    MAX_VPORTS - 1);
877 			} else {
878 				hba->vpi_max =
879 				    min(mb->un.varCfgPort.vpi_max,
880 				    MAX_VPORTS_LIMITED - 1);
881 			}
882 		}
883 
884 #if (EMLXS_MODREV >= EMLXS_MODREV5)
885 		hba->fca_tran->fca_num_npivports =
886 		    (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
887 #endif /* >= EMLXS_MODREV5 */
888 
889 		if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
890 			hba->flag |= FC_HBQ_ENABLED;
891 		}
892 
893 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
894 		    "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
895 	} else {
896 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
897 		    "SLI2 mode: flag=%x", hba->flag);
898 		sli_mode = EMLXS_HBA_SLI2_MODE;
899 		sli_mode_mask = EMLXS_SLI2_MASK;
900 		hba->sli_mode = sli_mode;
901 #if (EMLXS_MODREV >= EMLXS_MODREV5)
902 		hba->fca_tran->fca_num_npivports = 0;
903 #endif /* >= EMLXS_MODREV5 */
904 
905 	}
906 
907 	/* Get and save the current firmware version (based on sli_mode) */
908 	emlxs_decode_firmware_rev(hba, vpd);
909 
910 	emlxs_pcix_mxr_update(hba, 0);
911 
912 	/* Reuse mbq from previous mbox */
913 	bzero(mbq, sizeof (MAILBOXQ));
914 
915 	emlxs_mb_read_config(hba, mbq);
916 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
917 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
918 		    "Unable to read configuration.  Mailbox cmd=%x status=%x",
919 		    mb->mbxCommand, mb->mbxStatus);
920 
921 		rval = EIO;
922 		goto failed;
923 	}
924 
925 	/* Save the link speed capabilities */
926 	vpd->link_speed = (uint16_t)mb->un.varRdConfig.lmt;
927 	emlxs_process_link_speed(hba);
928 
929 	/* Set the max node count */
930 	if (cfg[CFG_NUM_NODES].current > 0) {
931 		hba->max_nodes =
932 		    min(cfg[CFG_NUM_NODES].current,
933 		    mb->un.varRdConfig.max_rpi);
934 	} else {
935 		hba->max_nodes = mb->un.varRdConfig.max_rpi;
936 	}
937 
938 	/* Set the io throttle */
939 	hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
940 
941 	/* Set max_iotag */
942 	if (cfg[CFG_NUM_IOTAGS].current) {
943 		hba->max_iotag = (uint16_t)cfg[CFG_NUM_IOTAGS].current;
944 	} else {
945 		hba->max_iotag = mb->un.varRdConfig.max_xri;
946 	}
947 
948 	/* Set out-of-range iotag base */
949 	hba->fc_oor_iotag = hba->max_iotag;
950 
951 	/*
952 	 * Allocate some memory for buffers
953 	 */
954 	if (emlxs_mem_alloc_buffer(hba) == 0) {
955 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
956 		    "Unable to allocate memory buffers.");
957 
958 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
959 		return (ENOMEM);
960 	}
961 
962 	/*
963 	 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
964 	 */
965 	if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) ||
966 	    ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0)) {
967 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
968 		    "Unable to allocate diag buffers.");
969 
970 		rval = ENOMEM;
971 		goto failed;
972 	}
973 
974 	bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
975 	    MEM_ELSBUF_SIZE);
976 	EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
977 	    DDI_DMA_SYNC_FORDEV);
978 
979 	bzero(mp1->virt, MEM_ELSBUF_SIZE);
980 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
981 	    DDI_DMA_SYNC_FORDEV);
982 
983 	/* Reuse mbq from previous mbox */
984 	bzero(mbq, sizeof (MAILBOXQ));
985 
986 	(void) emlxs_mb_run_biu_diag(hba, mbq, mp->phys, mp1->phys);
987 
988 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
989 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
990 		    "Unable to run BIU diag.  Mailbox cmd=%x status=%x",
991 		    mb->mbxCommand, mb->mbxStatus);
992 
993 		rval = EIO;
994 		goto failed;
995 	}
996 
997 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
998 	    DDI_DMA_SYNC_FORKERNEL);
999 
1000 #ifdef FMA_SUPPORT
1001 	if (mp->dma_handle) {
1002 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
1003 		    != DDI_FM_OK) {
1004 			EMLXS_MSGF(EMLXS_CONTEXT,
1005 			    &emlxs_invalid_dma_handle_msg,
1006 			    "sli3_online: hdl=%p",
1007 			    mp->dma_handle);
1008 			rval = EIO;
1009 			goto failed;
1010 		}
1011 	}
1012 
1013 	if (mp1->dma_handle) {
1014 		if (emlxs_fm_check_dma_handle(hba, mp1->dma_handle)
1015 		    != DDI_FM_OK) {
1016 			EMLXS_MSGF(EMLXS_CONTEXT,
1017 			    &emlxs_invalid_dma_handle_msg,
1018 			    "sli3_online: hdl=%p",
1019 			    mp1->dma_handle);
1020 			rval = EIO;
1021 			goto failed;
1022 		}
1023 	}
1024 #endif  /* FMA_SUPPORT */
1025 
1026 	outptr = mp->virt;
1027 	inptr = mp1->virt;
1028 
1029 	for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
1030 		if (*outptr++ != *inptr++) {
1031 			outptr--;
1032 			inptr--;
1033 
1034 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1035 			    "BIU diagnostic failed. "
1036 			    "offset %x value %x should be %x.",
1037 			    i, (uint32_t)*inptr, (uint32_t)*outptr);
1038 
1039 			rval = EIO;
1040 			goto failed;
1041 		}
1042 	}
1043 
1044 	/* Free the buffers since we were polling */
1045 	emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1046 	mp = NULL;
1047 	emlxs_mem_put(hba, MEM_BUF, (void *)mp1);
1048 	mp1 = NULL;
1049 
1050 	hba->channel_fcp = FC_FCP_RING;
1051 	hba->channel_els = FC_ELS_RING;
1052 	hba->channel_ip = FC_IP_RING;
1053 	hba->channel_ct = FC_CT_RING;
1054 	hba->sli.sli3.ring_count = MAX_RINGS;
1055 
1056 	hba->channel_tx_count = 0;
1057 	hba->io_count = 0;
1058 	hba->fc_iotag = 1;
1059 
1060 	for (i = 0; i < hba->chan_count; i++) {
1061 		cp = &hba->chan[i];
1062 
1063 		/* 1 to 1 mapping between ring and channel */
1064 		cp->iopath = (void *)&hba->sli.sli3.ring[i];
1065 
1066 		cp->hba = hba;
1067 		cp->channelno = i;
1068 	}
1069 
1070 	/*
1071 	 * Setup and issue mailbox CONFIGURE RING command
1072 	 */
1073 	for (i = 0; i < (uint32_t)hba->sli.sli3.ring_count; i++) {
1074 		/*
1075 		 * Initialize cmd/rsp ring pointers
1076 		 */
1077 		rp = &hba->sli.sli3.ring[i];
1078 
1079 		/* 1 to 1 mapping between ring and channel */
1080 		rp->channelp = &hba->chan[i];
1081 
1082 		rp->hba = hba;
1083 		rp->ringno = (uint8_t)i;
1084 
1085 		rp->fc_cmdidx = 0;
1086 		rp->fc_rspidx = 0;
1087 		EMLXS_STATE_CHANGE(hba, FC_INIT_CFGRING);
1088 
1089 		/* Reuse mbq from previous mbox */
1090 		bzero(mbq, sizeof (MAILBOXQ));
1091 
1092 		emlxs_mb_config_ring(hba, i, mbq);
1093 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1094 		    MBX_SUCCESS) {
1095 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1096 			    "Unable to configure ring. "
1097 			    "Mailbox cmd=%x status=%x",
1098 			    mb->mbxCommand, mb->mbxStatus);
1099 
1100 			rval = EIO;
1101 			goto failed;
1102 		}
1103 	}
1104 
1105 	/*
1106 	 * Setup link timers
1107 	 */
1108 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1109 
1110 	/* Reuse mbq from previous mbox */
1111 	bzero(mbq, sizeof (MAILBOXQ));
1112 
1113 	emlxs_mb_config_link(hba, mbq);
1114 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1115 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1116 		    "Unable to configure link. Mailbox cmd=%x status=%x",
1117 		    mb->mbxCommand, mb->mbxStatus);
1118 
1119 		rval = EIO;
1120 		goto failed;
1121 	}
1122 
1123 #ifdef MAX_RRDY_SUPPORT
1124 	/* Set MAX_RRDY if one is provided */
1125 	if (cfg[CFG_MAX_RRDY].current) {
1126 
1127 		/* Reuse mbq from previous mbox */
1128 		bzero(mbq, sizeof (MAILBOXQ));
1129 
1130 		emlxs_mb_set_var(hba, (MAILBOX *)mbq, 0x00060412,
1131 		    cfg[CFG_MAX_RRDY].current);
1132 
1133 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1134 		    MBX_SUCCESS) {
1135 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1136 			    "MAX_RRDY: Unable to set.  status=%x " \
1137 			    "value=%d",
1138 			    mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
1139 		} else {
1140 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1141 			    "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
1142 		}
1143 	}
1144 #endif /* MAX_RRDY_SUPPORT */
1145 
1146 	/* Reuse mbq from previous mbox */
1147 	bzero(mbq, sizeof (MAILBOXQ));
1148 
1149 	/*
1150 	 * We need to get login parameters for NID
1151 	 */
1152 	(void) emlxs_mb_read_sparam(hba, mbq);
1153 	mp = (MATCHMAP *)mbq->bp;
1154 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1155 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1156 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
1157 		    mb->mbxCommand, mb->mbxStatus);
1158 
1159 		rval = EIO;
1160 		goto failed;
1161 	}
1162 
1163 	/* Free the buffer since we were polling */
1164 	emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1165 	mp = NULL;
1166 
1167 	/* If no serial number in VPD data, then use the WWPN */
1168 	if (vpd->serial_num[0] == 0) {
1169 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1170 		for (i = 0; i < 12; i++) {
1171 			status = *outptr++;
1172 			j = ((status & 0xf0) >> 4);
1173 			if (j <= 9) {
1174 				vpd->serial_num[i] =
1175 				    (char)((uint8_t)'0' + (uint8_t)j);
1176 			} else {
1177 				vpd->serial_num[i] =
1178 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1179 			}
1180 
1181 			i++;
1182 			j = (status & 0xf);
1183 			if (j <= 9) {
1184 				vpd->serial_num[i] =
1185 				    (char)((uint8_t)'0' + (uint8_t)j);
1186 			} else {
1187 				vpd->serial_num[i] =
1188 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1189 			}
1190 		}
1191 
1192 		/*
1193 		 * Set port number and port index to zero
1194 		 * The WWN's are unique to each port and therefore port_num
1195 		 * must equal zero. This effects the hba_fru_details structure
1196 		 * in fca_bind_port()
1197 		 */
1198 		vpd->port_num[0] = 0;
1199 		vpd->port_index = 0;
1200 	}
1201 
1202 	/*
1203 	 * Make first attempt to set a port index
1204 	 * Check if this is a multifunction adapter
1205 	 */
1206 	if ((vpd->port_index == (uint32_t)-1) &&
1207 	    (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1208 		char *buffer;
1209 		int32_t i;
1210 
1211 		/*
1212 		 * The port address looks like this:
1213 		 * 1	- for port index 0
1214 		 * 1,1	- for port index 1
1215 		 * 1,2	- for port index 2
1216 		 */
1217 		buffer = ddi_get_name_addr(hba->dip);
1218 
1219 		if (buffer) {
1220 			vpd->port_index = 0;
1221 
1222 			/* Reverse scan for a comma */
1223 			for (i = strlen(buffer) - 1; i > 0; i--) {
1224 				if (buffer[i] == ',') {
1225 					/* Comma found - set index now */
1226 					vpd->port_index =
1227 					    emlxs_strtol(&buffer[i + 1], 10);
1228 					break;
1229 				}
1230 			}
1231 		}
1232 	}
1233 
1234 	/* Make final attempt to set a port index */
1235 	if (vpd->port_index == (uint32_t)-1) {
1236 		dev_info_t *p_dip;
1237 		dev_info_t *c_dip;
1238 
1239 		p_dip = ddi_get_parent(hba->dip);
1240 		c_dip = ddi_get_child(p_dip);
1241 
1242 		vpd->port_index = 0;
1243 		while (c_dip && (hba->dip != c_dip)) {
1244 			c_dip = ddi_get_next_sibling(c_dip);
1245 			vpd->port_index++;
1246 		}
1247 	}
1248 
1249 	if (vpd->port_num[0] == 0) {
1250 		if (hba->model_info.channels == EMLXS_MULTI_CHANNEL) {
1251 			(void) snprintf(vpd->port_num,
1252 			    (sizeof (vpd->port_num)-1),
1253 			    "%d", vpd->port_index);
1254 		}
1255 	}
1256 
1257 	if (vpd->id[0] == 0) {
1258 		(void) strncpy(vpd->id, hba->model_info.model_desc,
1259 		    (sizeof (vpd->id)-1));
1260 	}
1261 
1262 	if (vpd->manufacturer[0] == 0) {
1263 		(void) strncpy(vpd->manufacturer, hba->model_info.manufacturer,
1264 		    (sizeof (vpd->manufacturer)-1));
1265 	}
1266 
1267 	if (vpd->part_num[0] == 0) {
1268 		(void) strncpy(vpd->part_num, hba->model_info.model,
1269 		    (sizeof (vpd->part_num)-1));
1270 	}
1271 
1272 	if (vpd->model_desc[0] == 0) {
1273 		(void) strncpy(vpd->model_desc, hba->model_info.model_desc,
1274 		    (sizeof (vpd->model_desc)-1));
1275 	}
1276 
1277 	if (vpd->model[0] == 0) {
1278 		(void) strncpy(vpd->model, hba->model_info.model,
1279 		    (sizeof (vpd->model)-1));
1280 	}
1281 
1282 	if (vpd->prog_types[0] == 0) {
1283 		emlxs_build_prog_types(hba, vpd);
1284 	}
1285 
1286 	/* Create the symbolic names */
1287 	(void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1288 	    "%s %s FV%s DV%s %s",
1289 	    hba->model_info.manufacturer, hba->model_info.model,
1290 	    hba->vpd.fw_version, emlxs_version,
1291 	    (char *)utsname.nodename);
1292 
1293 	(void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1294 	    "%s PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1295 	    hba->model_info.manufacturer,
1296 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1297 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1298 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1299 
1300 	if (cfg[CFG_NETWORK_ON].current) {
1301 		if ((hba->sparam.portName.nameType != NAME_IEEE) ||
1302 		    (hba->sparam.portName.IEEEextMsn != 0) ||
1303 		    (hba->sparam.portName.IEEEextLsb != 0)) {
1304 
1305 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1306 			    "WWPN doesn't conform to IP profile: "
1307 			    "nameType=%x. Disabling networking.",
1308 			    hba->sparam.portName.nameType);
1309 
1310 			cfg[CFG_NETWORK_ON].current = 0;
1311 		}
1312 	}
1313 
1314 	if (cfg[CFG_NETWORK_ON].current) {
1315 		/* Reuse mbq from previous mbox */
1316 		bzero(mbq, sizeof (MAILBOXQ));
1317 
1318 		/* Issue CONFIG FARP */
1319 		emlxs_mb_config_farp(hba, mbq);
1320 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1321 		    MBX_SUCCESS) {
1322 			/*
1323 			 * Let it go through even if failed.
1324 			 */
1325 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1326 			    "Unable to configure FARP. "
1327 			    "Mailbox cmd=%x status=%x",
1328 			    mb->mbxCommand, mb->mbxStatus);
1329 		}
1330 	}
1331 #ifdef MSI_SUPPORT
1332 	/* Configure MSI map if required */
1333 	if (hba->intr_count > 1) {
1334 
1335 		if (hba->intr_type == DDI_INTR_TYPE_MSIX) {
1336 			/* always start from 0 */
1337 			hba->last_msiid = 0;
1338 		}
1339 
1340 		/* Reuse mbq from previous mbox */
1341 		bzero(mbq, sizeof (MAILBOXQ));
1342 
1343 		emlxs_mb_config_msix(hba, mbq, hba->intr_map, hba->intr_count);
1344 
1345 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1346 		    MBX_SUCCESS) {
1347 			goto msi_configured;
1348 		}
1349 
1350 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1351 		    "Unable to config MSIX.  Mailbox cmd=0x%x status=0x%x",
1352 		    mb->mbxCommand, mb->mbxStatus);
1353 
1354 		/* Reuse mbq from previous mbox */
1355 		bzero(mbq, sizeof (MAILBOXQ));
1356 
1357 		emlxs_mb_config_msi(hba, mbq, hba->intr_map, hba->intr_count);
1358 
1359 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1360 		    MBX_SUCCESS) {
1361 			goto msi_configured;
1362 		}
1363 
1364 
1365 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1366 		    "Unable to config MSI.  Mailbox cmd=0x%x status=0x%x",
1367 		    mb->mbxCommand, mb->mbxStatus);
1368 
1369 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1370 		    "Attempting single interrupt mode...");
1371 
1372 		/* First cleanup old interrupts */
1373 		(void) emlxs_msi_remove(hba);
1374 		(void) emlxs_msi_uninit(hba);
1375 
1376 		status = emlxs_msi_init(hba, 1);
1377 
1378 		if (status != DDI_SUCCESS) {
1379 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1380 			    "Unable to initialize interrupt. status=%d",
1381 			    status);
1382 
1383 			rval = EIO;
1384 			goto failed;
1385 		}
1386 
1387 		/*
1388 		 * Reset adapter - The adapter needs to be reset because
1389 		 * the bus cannot handle the MSI change without handshaking
1390 		 * with the adapter again
1391 		 */
1392 
1393 		(void) emlxs_mem_free_buffer(hba);
1394 		fw_check = 0;
1395 		goto reset;
1396 	}
1397 
1398 msi_configured:
1399 
1400 
1401 	if ((hba->intr_count >= 1) &&
1402 	    (hba->sli_mode == EMLXS_HBA_SLI3_MODE)) {
1403 		/* intr_count is a sequence of msi id */
1404 		/* Setup msi2chan[msi_id] */
1405 		for (i = 0; i < hba->intr_count; i ++) {
1406 			hba->msi2chan[i] = i;
1407 			if (i >= hba->chan_count)
1408 				hba->msi2chan[i] = (i - hba->chan_count);
1409 		}
1410 	}
1411 #endif /* MSI_SUPPORT */
1412 
1413 	/*
1414 	 * We always disable the firmware traffic cop feature
1415 	 */
1416 	if (emlxs_disable_traffic_cop) {
1417 		/* Reuse mbq from previous mbox */
1418 		bzero(mbq, sizeof (MAILBOXQ));
1419 
1420 		emlxs_disable_tc(hba, mbq);
1421 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1422 		    MBX_SUCCESS) {
1423 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1424 			    "Unable to disable traffic cop. "
1425 			    "Mailbox cmd=%x status=%x",
1426 			    mb->mbxCommand, mb->mbxStatus);
1427 
1428 			rval = EIO;
1429 			goto failed;
1430 		}
1431 	}
1432 
1433 
1434 	/* Reuse mbq from previous mbox */
1435 	bzero(mbq, sizeof (MAILBOXQ));
1436 
1437 	/* Register for async events */
1438 	emlxs_mb_async_event(hba, mbq);
1439 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1440 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1441 		    "Async events disabled. Mailbox status=%x",
1442 		    mb->mbxStatus);
1443 	} else {
1444 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1445 		    "Async events enabled.");
1446 		hba->flag |= FC_ASYNC_EVENTS;
1447 	}
1448 
1449 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1450 
1451 	emlxs_sli3_enable_intr(hba);
1452 
1453 	if (hba->flag & FC_HBQ_ENABLED) {
1454 		if (port->flag & EMLXS_TGT_ENABLED) {
1455 			if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1456 				EMLXS_MSGF(EMLXS_CONTEXT,
1457 				    &emlxs_init_failed_msg,
1458 				    "Unable to setup FCT HBQ.");
1459 
1460 				rval = ENOMEM;
1461 
1462 #ifdef SFCT_SUPPORT
1463 				/* Check if we can fall back to just */
1464 				/* initiator mode */
1465 				if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
1466 				    (port->flag & EMLXS_INI_ENABLED) &&
1467 				    (cfg[CFG_DTM_ENABLE].current == 1) &&
1468 				    (cfg[CFG_TARGET_MODE].current == 0)) {
1469 
1470 					cfg[CFG_DTM_ENABLE].current = 0;
1471 
1472 					EMLXS_MSGF(EMLXS_CONTEXT,
1473 					    &emlxs_init_failed_msg,
1474 					    "Disabling dynamic target mode. "
1475 					    "Enabling initiator mode only.");
1476 
1477 					/* This will trigger the driver to */
1478 					/* reattach */
1479 					rval = EAGAIN;
1480 				}
1481 #endif /* SFCT_SUPPORT */
1482 				goto failed;
1483 			}
1484 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1485 			    "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1486 		}
1487 
1488 		if (cfg[CFG_NETWORK_ON].current) {
1489 			if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1490 				EMLXS_MSGF(EMLXS_CONTEXT,
1491 				    &emlxs_init_failed_msg,
1492 				    "Unable to setup IP HBQ.");
1493 
1494 				rval = ENOMEM;
1495 				goto failed;
1496 			}
1497 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1498 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1499 		}
1500 
1501 		if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1502 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1503 			    "Unable to setup ELS HBQ.");
1504 			rval = ENOMEM;
1505 			goto failed;
1506 		}
1507 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1508 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1509 
1510 		if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1511 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1512 			    "Unable to setup CT HBQ.");
1513 
1514 			rval = ENOMEM;
1515 			goto failed;
1516 		}
1517 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1518 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1519 	} else {
1520 		if (port->flag & EMLXS_TGT_ENABLED) {
1521 			/* Post the FCT unsol buffers */
1522 			rp = &hba->sli.sli3.ring[FC_FCT_RING];
1523 			for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1524 				(void) emlxs_post_buffer(hba, rp, 2);
1525 			}
1526 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1527 			    "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1528 		}
1529 
1530 		if (cfg[CFG_NETWORK_ON].current) {
1531 			/* Post the IP unsol buffers */
1532 			rp = &hba->sli.sli3.ring[FC_IP_RING];
1533 			for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1534 				(void) emlxs_post_buffer(hba, rp, 2);
1535 			}
1536 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1537 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1538 		}
1539 
1540 		/* Post the ELS unsol buffers */
1541 		rp = &hba->sli.sli3.ring[FC_ELS_RING];
1542 		for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1543 			(void) emlxs_post_buffer(hba, rp, 2);
1544 		}
1545 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1546 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1547 
1548 
1549 		/* Post the CT unsol buffers */
1550 		rp = &hba->sli.sli3.ring[FC_CT_RING];
1551 		for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1552 			(void) emlxs_post_buffer(hba, rp, 2);
1553 		}
1554 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1555 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1556 	}
1557 
1558 	(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1559 
1560 	/* Check persist-linkdown */
1561 	if (cfg[CFG_PERSIST_LINKDOWN].current) {
1562 		EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1563 		return (0);
1564 	}
1565 
1566 #ifdef SFCT_SUPPORT
1567 	if ((port->mode == MODE_TARGET) &&
1568 	    !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1569 		emlxs_enable_latt(hba);
1570 		return (0);
1571 	}
1572 #endif /* SFCT_SUPPORT */
1573 
1574 	/*
1575 	 * Setup and issue mailbox INITIALIZE LINK command
1576 	 * At this point, the interrupt will be generated by the HW
1577 	 */
1578 	mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX);
1579 	if (mbq == NULL) {
1580 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1581 		    "Unable to allocate mailbox buffer.");
1582 
1583 		rval = EIO;
1584 		goto failed;
1585 	}
1586 	mb = (MAILBOX *)mbq;
1587 
1588 	emlxs_mb_init_link(hba, mbq, cfg[CFG_TOPOLOGY].current,
1589 	    cfg[CFG_LINK_SPEED].current);
1590 
1591 	rval = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1592 	if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1593 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1594 		    "Unable to initialize link. " \
1595 		    "Mailbox cmd=%x status=%x",
1596 		    mb->mbxCommand, mb->mbxStatus);
1597 
1598 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1599 		mbq = NULL;
1600 		rval = EIO;
1601 		goto failed;
1602 	}
1603 
1604 	/*
1605 	 * Enable link attention interrupt
1606 	 */
1607 	emlxs_enable_latt(hba);
1608 
1609 	/* Wait for link to come up */
1610 	i = cfg[CFG_LINKUP_DELAY].current;
1611 	while (i && (hba->state < FC_LINK_UP)) {
1612 		/* Check for hardware error */
1613 		if (hba->state == FC_ERROR) {
1614 			EMLXS_MSGF(EMLXS_CONTEXT,
1615 			    &emlxs_init_failed_msg,
1616 			    "Adapter error.");
1617 
1618 			mbq = NULL;
1619 			rval = EIO;
1620 			goto failed;
1621 		}
1622 
1623 		BUSYWAIT_MS(1000);
1624 		i--;
1625 	}
1626 
1627 	/*
1628 	 * The leadvile driver will now handle the FLOGI at the driver level
1629 	 */
1630 
1631 	return (0);
1632 
1633 failed:
1634 
1635 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1636 
1637 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1638 		(void) EMLXS_INTR_REMOVE(hba);
1639 	}
1640 
1641 	if (mp) {
1642 		emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1643 		mp = NULL;
1644 	}
1645 
1646 	if (mp1) {
1647 		emlxs_mem_put(hba, MEM_BUF, (void *)mp1);
1648 		mp1 = NULL;
1649 	}
1650 
1651 	(void) emlxs_mem_free_buffer(hba);
1652 
1653 	if (mbq) {
1654 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1655 		mbq = NULL;
1656 		mb = NULL;
1657 	}
1658 
1659 	if (rval == 0) {
1660 		rval = EIO;
1661 	}
1662 
1663 	return (rval);
1664 
1665 } /* emlxs_sli3_online() */
1666 
1667 
1668 /*ARGSUSED*/
1669 static void
emlxs_sli3_offline(emlxs_hba_t * hba,uint32_t reset_requested)1670 emlxs_sli3_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1671 {
1672 	/* Reverse emlxs_sli3_online */
1673 
1674 	/* Kill the adapter */
1675 	emlxs_sli3_hba_kill(hba);
1676 
1677 	/* Free driver shared memory */
1678 	(void) emlxs_mem_free_buffer(hba);
1679 
1680 } /* emlxs_sli3_offline() */
1681 
1682 
1683 static int
emlxs_sli3_map_hdw(emlxs_hba_t * hba)1684 emlxs_sli3_map_hdw(emlxs_hba_t *hba)
1685 {
1686 	emlxs_port_t		*port = &PPORT;
1687 	dev_info_t		*dip;
1688 	ddi_device_acc_attr_t	dev_attr;
1689 	int			status;
1690 
1691 	dip = (dev_info_t *)hba->dip;
1692 	dev_attr = emlxs_dev_acc_attr;
1693 
1694 	if (hba->bus_type == SBUS_FC) {
1695 
1696 		if (hba->sli.sli3.slim_acc_handle == 0) {
1697 			status = ddi_regs_map_setup(dip,
1698 			    SBUS_DFLY_SLIM_RINDEX,
1699 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1700 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1701 			if (status != DDI_SUCCESS) {
1702 				EMLXS_MSGF(EMLXS_CONTEXT,
1703 				    &emlxs_attach_failed_msg,
1704 				    "(SBUS) ddi_regs_map_setup SLIM failed. "
1705 				    "status=%x", status);
1706 				goto failed;
1707 			}
1708 		}
1709 		if (hba->sli.sli3.csr_acc_handle == 0) {
1710 			status = ddi_regs_map_setup(dip,
1711 			    SBUS_DFLY_CSR_RINDEX,
1712 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1713 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1714 			if (status != DDI_SUCCESS) {
1715 				EMLXS_MSGF(EMLXS_CONTEXT,
1716 				    &emlxs_attach_failed_msg,
1717 				    "(SBUS) ddi_regs_map_setup DFLY CSR "
1718 				    "failed. status=%x", status);
1719 				goto failed;
1720 			}
1721 		}
1722 		if (hba->sli.sli3.sbus_flash_acc_handle == 0) {
1723 			status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
1724 			    (caddr_t *)&hba->sli.sli3.sbus_flash_addr, 0, 0,
1725 			    &dev_attr, &hba->sli.sli3.sbus_flash_acc_handle);
1726 			if (status != DDI_SUCCESS) {
1727 				EMLXS_MSGF(EMLXS_CONTEXT,
1728 				    &emlxs_attach_failed_msg,
1729 				    "(SBUS) ddi_regs_map_setup Fcode Flash "
1730 				    "failed. status=%x", status);
1731 				goto failed;
1732 			}
1733 		}
1734 		if (hba->sli.sli3.sbus_core_acc_handle == 0) {
1735 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
1736 			    (caddr_t *)&hba->sli.sli3.sbus_core_addr, 0, 0,
1737 			    &dev_attr, &hba->sli.sli3.sbus_core_acc_handle);
1738 			if (status != DDI_SUCCESS) {
1739 				EMLXS_MSGF(EMLXS_CONTEXT,
1740 				    &emlxs_attach_failed_msg,
1741 				    "(SBUS) ddi_regs_map_setup TITAN CORE "
1742 				    "failed. status=%x", status);
1743 				goto failed;
1744 			}
1745 		}
1746 
1747 		if (hba->sli.sli3.sbus_csr_handle == 0) {
1748 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
1749 			    (caddr_t *)&hba->sli.sli3.sbus_csr_addr,
1750 			    0, 0, &dev_attr, &hba->sli.sli3.sbus_csr_handle);
1751 			if (status != DDI_SUCCESS) {
1752 				EMLXS_MSGF(EMLXS_CONTEXT,
1753 				    &emlxs_attach_failed_msg,
1754 				    "(SBUS) ddi_regs_map_setup TITAN CSR "
1755 				    "failed. status=%x", status);
1756 				goto failed;
1757 			}
1758 		}
1759 	} else {	/* ****** PCI ****** */
1760 
1761 		if (hba->sli.sli3.slim_acc_handle == 0) {
1762 			status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
1763 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1764 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1765 			if (status != DDI_SUCCESS) {
1766 				EMLXS_MSGF(EMLXS_CONTEXT,
1767 				    &emlxs_attach_failed_msg,
1768 				    "(PCI) ddi_regs_map_setup SLIM failed. "
1769 				    "stat=%d mem=%p attr=%p hdl=%p",
1770 				    status, &hba->sli.sli3.slim_addr, &dev_attr,
1771 				    &hba->sli.sli3.slim_acc_handle);
1772 				goto failed;
1773 			}
1774 		}
1775 
1776 		/*
1777 		 * Map in control registers, using memory-mapped version of
1778 		 * the registers rather than the I/O space-mapped registers.
1779 		 */
1780 		if (hba->sli.sli3.csr_acc_handle == 0) {
1781 			status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
1782 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1783 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1784 			if (status != DDI_SUCCESS) {
1785 				EMLXS_MSGF(EMLXS_CONTEXT,
1786 				    &emlxs_attach_failed_msg,
1787 				    "ddi_regs_map_setup CSR failed. status=%x",
1788 				    status);
1789 				goto failed;
1790 			}
1791 		}
1792 	}
1793 
1794 	if (hba->sli.sli3.slim2.virt == 0) {
1795 		MBUF_INFO	*buf_info;
1796 		MBUF_INFO	bufinfo;
1797 
1798 		buf_info = &bufinfo;
1799 
1800 		bzero(buf_info, sizeof (MBUF_INFO));
1801 		buf_info->size = SLI_SLIM2_SIZE;
1802 		buf_info->flags =
1803 		    FC_MBUF_DMA | FC_MBUF_SNGLSG;
1804 		buf_info->align = ddi_ptob(dip, 1L);
1805 
1806 		(void) emlxs_mem_alloc(hba, buf_info);
1807 
1808 		if (buf_info->virt == NULL) {
1809 			goto failed;
1810 		}
1811 
1812 		hba->sli.sli3.slim2.virt = buf_info->virt;
1813 		hba->sli.sli3.slim2.phys = buf_info->phys;
1814 		hba->sli.sli3.slim2.size = SLI_SLIM2_SIZE;
1815 		hba->sli.sli3.slim2.data_handle = buf_info->data_handle;
1816 		hba->sli.sli3.slim2.dma_handle = buf_info->dma_handle;
1817 		bzero((char *)hba->sli.sli3.slim2.virt, SLI_SLIM2_SIZE);
1818 	}
1819 
1820 	/* offset from beginning of register space */
1821 	hba->sli.sli3.ha_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1822 	    (sizeof (uint32_t) * HA_REG_OFFSET));
1823 	hba->sli.sli3.ca_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1824 	    (sizeof (uint32_t) * CA_REG_OFFSET));
1825 	hba->sli.sli3.hs_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1826 	    (sizeof (uint32_t) * HS_REG_OFFSET));
1827 	hba->sli.sli3.hc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1828 	    (sizeof (uint32_t) * HC_REG_OFFSET));
1829 	hba->sli.sli3.bc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1830 	    (sizeof (uint32_t) * BC_REG_OFFSET));
1831 
1832 	if (hba->bus_type == SBUS_FC) {
1833 		/* offset from beginning of register space */
1834 		/* for TITAN registers */
1835 		hba->sli.sli3.shc_reg_addr =
1836 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1837 		    (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET));
1838 		hba->sli.sli3.shs_reg_addr =
1839 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1840 		    (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET));
1841 		hba->sli.sli3.shu_reg_addr =
1842 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1843 		    (sizeof (uint32_t) * SBUS_UPDATE_REG_OFFSET));
1844 	}
1845 	hba->chan_count = MAX_RINGS;
1846 
1847 	return (0);
1848 
1849 failed:
1850 
1851 	emlxs_sli3_unmap_hdw(hba);
1852 	return (ENOMEM);
1853 
1854 } /* emlxs_sli3_map_hdw() */
1855 
1856 
1857 static void
emlxs_sli3_unmap_hdw(emlxs_hba_t * hba)1858 emlxs_sli3_unmap_hdw(emlxs_hba_t *hba)
1859 {
1860 	MBUF_INFO	bufinfo;
1861 	MBUF_INFO	*buf_info = &bufinfo;
1862 
1863 	if (hba->sli.sli3.csr_acc_handle) {
1864 		ddi_regs_map_free(&hba->sli.sli3.csr_acc_handle);
1865 		hba->sli.sli3.csr_acc_handle = 0;
1866 	}
1867 
1868 	if (hba->sli.sli3.slim_acc_handle) {
1869 		ddi_regs_map_free(&hba->sli.sli3.slim_acc_handle);
1870 		hba->sli.sli3.slim_acc_handle = 0;
1871 	}
1872 
1873 	if (hba->sli.sli3.sbus_flash_acc_handle) {
1874 		ddi_regs_map_free(&hba->sli.sli3.sbus_flash_acc_handle);
1875 		hba->sli.sli3.sbus_flash_acc_handle = 0;
1876 	}
1877 
1878 	if (hba->sli.sli3.sbus_core_acc_handle) {
1879 		ddi_regs_map_free(&hba->sli.sli3.sbus_core_acc_handle);
1880 		hba->sli.sli3.sbus_core_acc_handle = 0;
1881 	}
1882 
1883 	if (hba->sli.sli3.sbus_csr_handle) {
1884 		ddi_regs_map_free(&hba->sli.sli3.sbus_csr_handle);
1885 		hba->sli.sli3.sbus_csr_handle = 0;
1886 	}
1887 
1888 	if (hba->sli.sli3.slim2.virt) {
1889 		bzero(buf_info, sizeof (MBUF_INFO));
1890 
1891 		if (hba->sli.sli3.slim2.phys) {
1892 			buf_info->phys = hba->sli.sli3.slim2.phys;
1893 			buf_info->data_handle = hba->sli.sli3.slim2.data_handle;
1894 			buf_info->dma_handle = hba->sli.sli3.slim2.dma_handle;
1895 			buf_info->flags = FC_MBUF_DMA;
1896 		}
1897 
1898 		buf_info->virt = hba->sli.sli3.slim2.virt;
1899 		buf_info->size = hba->sli.sli3.slim2.size;
1900 		emlxs_mem_free(hba, buf_info);
1901 
1902 		hba->sli.sli3.slim2.virt = NULL;
1903 	}
1904 
1905 
1906 	return;
1907 
1908 } /* emlxs_sli3_unmap_hdw() */
1909 
1910 
1911 static uint32_t
emlxs_sli3_hba_init(emlxs_hba_t * hba)1912 emlxs_sli3_hba_init(emlxs_hba_t *hba)
1913 {
1914 	emlxs_port_t *port = &PPORT;
1915 	emlxs_port_t *vport;
1916 	emlxs_config_t *cfg;
1917 	uint16_t i;
1918 	VPIobj_t *vpip;
1919 
1920 	cfg = &CFG;
1921 	i = 0;
1922 
1923 	/* Restart the adapter */
1924 	if (emlxs_sli3_hba_reset(hba, 1, 0, 0)) {
1925 		return (1);
1926 	}
1927 
1928 	hba->channel_fcp = FC_FCP_RING;
1929 	hba->channel_els = FC_ELS_RING;
1930 	hba->channel_ip = FC_IP_RING;
1931 	hba->channel_ct = FC_CT_RING;
1932 	hba->chan_count = MAX_RINGS;
1933 	hba->sli.sli3.ring_count = MAX_RINGS;
1934 
1935 	/*
1936 	 * WARNING: There is a max of 6 ring masks allowed
1937 	 */
1938 	/* RING 0 - FCP */
1939 	if (port->flag & EMLXS_TGT_ENABLED) {
1940 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 1;
1941 		hba->sli.sli3.ring_rval[i] = FC_FCP_CMND;
1942 		hba->sli.sli3.ring_rmask[i] = 0;
1943 		hba->sli.sli3.ring_tval[i] = FC_TYPE_SCSI_FCP;
1944 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1945 	} else {
1946 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 0;
1947 	}
1948 
1949 	hba->sli.sli3.ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES;
1950 	hba->sli.sli3.ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES;
1951 
1952 	/* RING 1 - IP */
1953 	if (cfg[CFG_NETWORK_ON].current) {
1954 		hba->sli.sli3.ring_masks[FC_IP_RING] = 1;
1955 		hba->sli.sli3.ring_rval[i] = FC_UNSOL_DATA; /* Unsol Data */
1956 		hba->sli.sli3.ring_rmask[i] = 0xFF;
1957 		hba->sli.sli3.ring_tval[i] = FC_TYPE_IS8802_SNAP; /* LLC/SNAP */
1958 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1959 	} else {
1960 		hba->sli.sli3.ring_masks[FC_IP_RING] = 0;
1961 	}
1962 
1963 	hba->sli.sli3.ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES;
1964 	hba->sli.sli3.ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES;
1965 
1966 	/* RING 2 - ELS */
1967 	hba->sli.sli3.ring_masks[FC_ELS_RING] = 1;
1968 	hba->sli.sli3.ring_rval[i] = FC_ELS_REQ;	/* ELS request/rsp */
1969 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1970 	hba->sli.sli3.ring_tval[i] = FC_TYPE_EXTENDED_LS;	/* ELS */
1971 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1972 
1973 	hba->sli.sli3.ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES;
1974 	hba->sli.sli3.ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES;
1975 
1976 	/* RING 3 - CT */
1977 	hba->sli.sli3.ring_masks[FC_CT_RING] = 1;
1978 	hba->sli.sli3.ring_rval[i] = FC_UNSOL_CTL;	/* CT request/rsp */
1979 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1980 	hba->sli.sli3.ring_tval[i] = FC_TYPE_FC_SERVICES;	/* CT */
1981 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1982 
1983 	hba->sli.sli3.ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES;
1984 	hba->sli.sli3.ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES;
1985 
1986 	if (i > 6) {
1987 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
1988 		    "hba_init: Too many ring masks defined. cnt=%d", i);
1989 		return (1);
1990 	}
1991 
1992 	/* Initialize all the port objects */
1993 	hba->vpi_max = 0;
1994 	for (i = 0; i < MAX_VPORTS; i++) {
1995 		vport = &VPORT(i);
1996 		vport->hba = hba;
1997 		vport->vpi = i;
1998 
1999 		vpip = &vport->VPIobj;
2000 		vpip->index = i;
2001 		vpip->VPI = i;
2002 		vpip->port = vport;
2003 		vpip->state = VPI_STATE_OFFLINE;
2004 		vport->vpip = vpip;
2005 	}
2006 
2007 	/*
2008 	 * Initialize the max_node count to a default value if needed
2009 	 * This determines how many node objects we preallocate in the pool
2010 	 * The actual max_nodes will be set later based on adapter info
2011 	 */
2012 	if (hba->max_nodes == 0) {
2013 		if (cfg[CFG_NUM_NODES].current > 0) {
2014 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
2015 		} else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
2016 			hba->max_nodes = 4096;
2017 		} else {
2018 			hba->max_nodes = 512;
2019 		}
2020 	}
2021 
2022 	return (0);
2023 
2024 } /* emlxs_sli3_hba_init() */
2025 
2026 
2027 /*
2028  * 0: quiesce indicates the call is not from quiesce routine.
2029  * 1: quiesce indicates the call is from quiesce routine.
2030  */
2031 static uint32_t
emlxs_sli3_hba_reset(emlxs_hba_t * hba,uint32_t restart,uint32_t skip_post,uint32_t quiesce)2032 emlxs_sli3_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
2033     uint32_t quiesce)
2034 {
2035 	emlxs_port_t *port = &PPORT;
2036 	MAILBOX swpmb;
2037 	MAILBOX *mb;
2038 	uint32_t *word0;
2039 	uint16_t cfg_value;
2040 	uint32_t status = 0;
2041 	uint32_t status1;
2042 	uint32_t status2;
2043 	uint32_t i;
2044 	uint32_t ready;
2045 	emlxs_port_t *vport;
2046 	RING *rp;
2047 	emlxs_config_t *cfg = &CFG;
2048 
2049 	if (!cfg[CFG_RESET_ENABLE].current) {
2050 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2051 		    "Adapter reset disabled.");
2052 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
2053 
2054 		return (1);
2055 	}
2056 
2057 	/* Kill the adapter first */
2058 	if (quiesce == 0) {
2059 		emlxs_sli3_hba_kill(hba);
2060 	} else {
2061 		emlxs_sli3_hba_kill4quiesce(hba);
2062 	}
2063 
2064 	if (restart) {
2065 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2066 		    "Restarting.");
2067 		EMLXS_STATE_CHANGE(hba, FC_INIT_START);
2068 
2069 		ready = (HS_FFRDY | HS_MBRDY);
2070 	} else {
2071 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2072 		    "Resetting.");
2073 		EMLXS_STATE_CHANGE(hba, FC_WARM_START);
2074 
2075 		ready = HS_MBRDY;
2076 	}
2077 
2078 	hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR);
2079 
2080 	mb = FC_SLIM1_MAILBOX(hba);
2081 	word0 = (uint32_t *)&swpmb;
2082 
2083 reset:
2084 
2085 	i = 0;
2086 
2087 	/* Save reset time */
2088 	HBASTATS.ResetTime = hba->timer_tics;
2089 
2090 	if (restart) {
2091 		/* First put restart command in mailbox */
2092 		*word0 = 0;
2093 		swpmb.mbxCommand = MBX_RESTART;
2094 		swpmb.mbxHc = 1;
2095 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb), *word0);
2096 
2097 		/* Only skip post after emlxs_sli3_online is completed */
2098 		if (skip_post) {
2099 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
2100 			    1);
2101 		} else {
2102 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
2103 			    0);
2104 		}
2105 
2106 	}
2107 
2108 	/*
2109 	 * Turn off SERR, PERR in PCI cmd register
2110 	 */
2111 	cfg_value = ddi_get16(hba->pci_acc_handle,
2112 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER));
2113 
2114 	ddi_put16(hba->pci_acc_handle,
2115 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2116 	    (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL)));
2117 
2118 	hba->sli.sli3.hc_copy = HC_INITFF;
2119 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
2120 
2121 	/* Wait 1 msec before restoring PCI config */
2122 	BUSYWAIT_MS(1);
2123 
2124 	/* Restore PCI cmd register */
2125 	ddi_put16(hba->pci_acc_handle,
2126 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2127 	    (uint16_t)cfg_value);
2128 
2129 	/* Wait 3 seconds before checking */
2130 	BUSYWAIT_MS(3000);
2131 	i += 3;
2132 
2133 	/* Wait for reset completion */
2134 	while (i < 30) {
2135 		/* Check status register to see what current state is */
2136 		status = READ_CSR_REG(hba, FC_HS_REG(hba));
2137 
2138 		/* Check to see if any errors occurred during init */
2139 		if (status & HS_FFERM) {
2140 			status1 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2141 			    hba->sli.sli3.slim_addr + 0xa8));
2142 			status2 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2143 			    hba->sli.sli3.slim_addr + 0xac));
2144 
2145 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2146 			    "HS_FFERM: status=0x%x status1=0x%x status2=0x%x",
2147 			    status, status1, status2);
2148 
2149 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2150 			return (1);
2151 		}
2152 
2153 		if ((status & ready) == ready) {
2154 			/* Reset Done !! */
2155 			goto done;
2156 		}
2157 
2158 		/*
2159 		 * Check every 1 second for 15 seconds, then reset board
2160 		 * again (w/post), then check every 1 second for 15 * seconds.
2161 		 */
2162 		BUSYWAIT_MS(1000);
2163 		i++;
2164 
2165 		/* Reset again (w/post) at 15 seconds */
2166 		if (i == 15) {
2167 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2168 			    "Reset failed. Retrying...");
2169 
2170 			goto reset;
2171 		}
2172 	}
2173 
2174 #ifdef FMA_SUPPORT
2175 reset_fail:
2176 #endif  /* FMA_SUPPORT */
2177 
2178 	/* Timeout occurred */
2179 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2180 	    "Timeout: status=0x%x", status);
2181 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
2182 
2183 	/* Log a dump event */
2184 	emlxs_log_dump_event(port, NULL, 0);
2185 
2186 	return (1);
2187 
2188 done:
2189 
2190 	/* Initialize hc_copy */
2191 	hba->sli.sli3.hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba));
2192 
2193 #ifdef FMA_SUPPORT
2194 	/* Access handle validation */
2195 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2196 	    != DDI_FM_OK) ||
2197 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
2198 	    != DDI_FM_OK) ||
2199 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
2200 	    != DDI_FM_OK)) {
2201 		EMLXS_MSGF(EMLXS_CONTEXT,
2202 		    &emlxs_invalid_access_handle_msg, NULL);
2203 		goto reset_fail;
2204 	}
2205 #endif  /* FMA_SUPPORT */
2206 
2207 	/* Reset the hba structure */
2208 	hba->flag &= FC_RESET_MASK;
2209 	hba->channel_tx_count = 0;
2210 	hba->io_count = 0;
2211 	hba->iodone_count = 0;
2212 	hba->topology = 0;
2213 	hba->linkspeed = 0;
2214 	hba->heartbeat_active = 0;
2215 	hba->discovery_timer = 0;
2216 	hba->linkup_timer = 0;
2217 	hba->loopback_tics = 0;
2218 
2219 	/* Reset the ring objects */
2220 	for (i = 0; i < MAX_RINGS; i++) {
2221 		rp = &hba->sli.sli3.ring[i];
2222 		rp->fc_mpon = 0;
2223 		rp->fc_mpoff = 0;
2224 	}
2225 
2226 	/* Reset the port objects */
2227 	for (i = 0; i < MAX_VPORTS; i++) {
2228 		vport = &VPORT(i);
2229 
2230 		vport->flag &= EMLXS_PORT_RESET_MASK;
2231 		vport->did = 0;
2232 		vport->prev_did = 0;
2233 		vport->lip_type = 0;
2234 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2235 		bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2236 
2237 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2238 		vport->node_base.nlp_Rpi = 0;
2239 		vport->node_base.nlp_DID = 0xffffff;
2240 		vport->node_base.nlp_list_next = NULL;
2241 		vport->node_base.nlp_list_prev = NULL;
2242 		vport->node_base.nlp_active = 1;
2243 		vport->node_count = 0;
2244 
2245 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2246 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2247 		}
2248 	}
2249 
2250 	return (0);
2251 
2252 } /* emlxs_sli3_hba_reset */
2253 
2254 
2255 #define	BPL_CMD		0
2256 #define	BPL_RESP	1
2257 #define	BPL_DATA	2
2258 
2259 static ULP_BDE64 *
emlxs_pkt_to_bpl(fc_packet_t * pkt,ULP_BDE64 * bpl,uint32_t bpl_type)2260 emlxs_pkt_to_bpl(fc_packet_t *pkt, ULP_BDE64 *bpl, uint32_t bpl_type)
2261 {
2262 	ddi_dma_cookie_t *cp;
2263 	uint_t	i;
2264 	int32_t	size;
2265 	uint_t	cookie_cnt;
2266 	uint8_t bdeFlags;
2267 
2268 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2269 	switch (bpl_type) {
2270 	case BPL_CMD:
2271 		cp = pkt->pkt_cmd_cookie;
2272 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2273 		size = (int32_t)pkt->pkt_cmdlen;
2274 		bdeFlags = 0;
2275 		break;
2276 
2277 	case BPL_RESP:
2278 		cp = pkt->pkt_resp_cookie;
2279 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
2280 		size = (int32_t)pkt->pkt_rsplen;
2281 		bdeFlags = BUFF_USE_RCV;
2282 		break;
2283 
2284 
2285 	case BPL_DATA:
2286 		cp = pkt->pkt_data_cookie;
2287 		cookie_cnt = pkt->pkt_data_cookie_cnt;
2288 		size = (int32_t)pkt->pkt_datalen;
2289 		bdeFlags = (pkt->pkt_tran_type == FC_PKT_FCP_READ) ?
2290 		    BUFF_USE_RCV : 0;
2291 		break;
2292 
2293 	default:
2294 		return (NULL);
2295 	}
2296 
2297 #else
2298 	switch (bpl_type) {
2299 	case BPL_CMD:
2300 		cp = &pkt->pkt_cmd_cookie;
2301 		cookie_cnt = 1;
2302 		size = (int32_t)pkt->pkt_cmdlen;
2303 		bdeFlags = 0;
2304 		break;
2305 
2306 	case BPL_RESP:
2307 		cp = &pkt->pkt_resp_cookie;
2308 		cookie_cnt = 1;
2309 		size = (int32_t)pkt->pkt_rsplen;
2310 		bdeFlags = BUFF_USE_RCV;
2311 		break;
2312 
2313 
2314 	case BPL_DATA:
2315 		cp = &pkt->pkt_data_cookie;
2316 		cookie_cnt = 1;
2317 		size = (int32_t)pkt->pkt_datalen;
2318 		bdeFlags = (pkt->pkt_tran_type == FC_PKT_FCP_READ) ?
2319 		    BUFF_USE_RCV : 0;
2320 		break;
2321 
2322 	default:
2323 		return (NULL);
2324 	}
2325 #endif	/* >= EMLXS_MODREV3 */
2326 
2327 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2328 		bpl->addrHigh =
2329 		    BE_SWAP32(PADDR_HI(cp->dmac_laddress));
2330 		bpl->addrLow =
2331 		    BE_SWAP32(PADDR_LO(cp->dmac_laddress));
2332 		bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
2333 		bpl->tus.f.bdeFlags = bdeFlags;
2334 		bpl->tus.w = BE_SWAP32(bpl->tus.w);
2335 
2336 		bpl++;
2337 		size -= cp->dmac_size;
2338 	}
2339 
2340 	return (bpl);
2341 
2342 } /* emlxs_pkt_to_bpl */
2343 
2344 
2345 static uint32_t
emlxs_sli2_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2346 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2347 {
2348 	emlxs_hba_t	*hba = HBA;
2349 	fc_packet_t	*pkt;
2350 	MATCHMAP	*bmp;
2351 	ULP_BDE64	*bpl;
2352 	uint64_t	bp;
2353 	IOCB		*iocb;
2354 	IOCBQ		*iocbq;
2355 	CHANNEL	*cp;
2356 	uint32_t	data_cookie_cnt;
2357 	uint32_t	channelno;
2358 
2359 	cp = sbp->channel;
2360 	iocb = (IOCB *) & sbp->iocbq;
2361 	pkt = PRIV2PKT(sbp);
2362 
2363 	if (hba->sli.sli3.bpl_table) {
2364 		bmp = hba->sli.sli3.bpl_table[sbp->iotag];
2365 	} else {
2366 		bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL);
2367 	}
2368 
2369 	if (!bmp) {
2370 		return (1);
2371 	}
2372 
2373 	sbp->bmp = bmp;
2374 	bpl = (ULP_BDE64 *)bmp->virt;
2375 	bp = bmp->phys;
2376 
2377 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2378 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2379 #else
2380 	data_cookie_cnt = 1;
2381 #endif	/* >= EMLXS_MODREV3 */
2382 
2383 	iocbq = &sbp->iocbq;
2384 
2385 	channelno = (iocbq->flag & IOCB_FCP_CMD)? FC_FCP_RING:cp->channelno;
2386 	switch (channelno) {
2387 		case FC_FCP_RING:
2388 
2389 		/* CMD payload */
2390 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2391 		if (! bpl) {
2392 			return (1);
2393 		}
2394 
2395 		/* Check if response & data payloads are needed */
2396 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2397 			break;
2398 		}
2399 
2400 		/* RSP payload */
2401 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2402 		if (! bpl) {
2403 			return (1);
2404 		}
2405 
2406 		/* Check if data payload is needed */
2407 		if ((pkt->pkt_datalen == 0) ||
2408 		    (data_cookie_cnt == 0)) {
2409 			break;
2410 		}
2411 
2412 		/* DATA payload */
2413 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_DATA);
2414 		if (! bpl) {
2415 			return (1);
2416 		}
2417 		break;
2418 
2419 	case FC_IP_RING:
2420 
2421 		/* CMD payload */
2422 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2423 		if (! bpl) {
2424 			return (1);
2425 		}
2426 		break;
2427 
2428 	case FC_ELS_RING:
2429 
2430 		/* CMD payload */
2431 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2432 		if (! bpl) {
2433 			return (1);
2434 		}
2435 
2436 		/* Check if response payload is needed */
2437 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2438 			break;
2439 		}
2440 
2441 		/* RSP payload */
2442 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2443 		if (! bpl) {
2444 			return (1);
2445 		}
2446 		break;
2447 
2448 	case FC_CT_RING:
2449 
2450 		/* CMD payload */
2451 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2452 		if (! bpl) {
2453 			return (1);
2454 		}
2455 
2456 		/* Check if response payload is needed */
2457 		if ((pkt->pkt_tran_type == FC_PKT_OUTBOUND) &&
2458 		    (pkt->pkt_cmd_fhdr.type != EMLXS_MENLO_TYPE)) {
2459 			break;
2460 		}
2461 
2462 		/* RSP payload */
2463 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2464 		if (! bpl) {
2465 			return (1);
2466 		}
2467 		break;
2468 
2469 	}
2470 
2471 	iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
2472 	iocb->un.genreq64.bdl.addrHigh = PADDR_HI(bp);
2473 	iocb->un.genreq64.bdl.addrLow  = PADDR_LO(bp);
2474 	iocb->un.genreq64.bdl.bdeSize  =
2475 	    (uint32_t)(((uintptr_t)bpl - (uintptr_t)bmp->virt) & 0xFFFFFFFF);
2476 	iocb->ULPBDECOUNT = 1;
2477 	iocb->ULPLE = 1;
2478 
2479 	return (0);
2480 
2481 } /* emlxs_sli2_bde_setup */
2482 
2483 
2484 static uint32_t
emlxs_sli3_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2485 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2486 {
2487 	ddi_dma_cookie_t *cp_cmd;
2488 	ddi_dma_cookie_t *cp_resp;
2489 	ddi_dma_cookie_t *cp_data;
2490 	fc_packet_t	*pkt;
2491 	ULP_BDE64	*bde;
2492 	int		data_cookie_cnt;
2493 	uint32_t	i;
2494 	uint32_t	channelno;
2495 	IOCB		*iocb;
2496 	IOCBQ		*iocbq;
2497 	CHANNEL		*cp;
2498 
2499 	pkt = PRIV2PKT(sbp);
2500 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2501 	if ((pkt->pkt_cmd_cookie_cnt > 1) ||
2502 	    (pkt->pkt_resp_cookie_cnt > 1) ||
2503 	    ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
2504 	    pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
2505 		i = emlxs_sli2_bde_setup(port, sbp);
2506 		return (i);
2507 	}
2508 
2509 	cp_cmd = pkt->pkt_cmd_cookie;
2510 	cp_resp = pkt->pkt_resp_cookie;
2511 	cp_data = pkt->pkt_data_cookie;
2512 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2513 #else
2514 	cp_cmd  = &pkt->pkt_cmd_cookie;
2515 	cp_resp = &pkt->pkt_resp_cookie;
2516 	cp_data = &pkt->pkt_data_cookie;
2517 	data_cookie_cnt = 1;
2518 #endif	/* >= EMLXS_MODREV3 */
2519 
2520 	cp = sbp->channel;
2521 	iocbq = &sbp->iocbq;
2522 	iocb = (IOCB *)iocbq;
2523 	iocb->unsli3.ext_iocb.ebde_count = 0;
2524 
2525 	channelno = (iocbq->flag & IOCB_FCP_CMD)? FC_FCP_RING:cp->channelno;
2526 	switch (channelno) {
2527 	case FC_FCP_RING:
2528 		/* CMD payload */
2529 		iocb->un.fcpi64.bdl.addrHigh =
2530 		    PADDR_HI(cp_cmd->dmac_laddress);
2531 		iocb->un.fcpi64.bdl.addrLow =
2532 		    PADDR_LO(cp_cmd->dmac_laddress);
2533 		iocb->un.fcpi64.bdl.bdeSize  = pkt->pkt_cmdlen;
2534 		iocb->un.fcpi64.bdl.bdeFlags = 0;
2535 
2536 		/* Check if a response & data payload are needed */
2537 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2538 			break;
2539 		}
2540 
2541 		/* RSP payload */
2542 		iocb->unsli3.ext_iocb.ebde1.addrHigh =
2543 		    PADDR_HI(cp_resp->dmac_laddress);
2544 		iocb->unsli3.ext_iocb.ebde1.addrLow =
2545 		    PADDR_LO(cp_resp->dmac_laddress);
2546 		iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2547 		iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
2548 		iocb->unsli3.ext_iocb.ebde_count = 1;
2549 
2550 		/* Check if a data payload is needed */
2551 		if ((pkt->pkt_datalen == 0) ||
2552 		    (data_cookie_cnt == 0)) {
2553 			break;
2554 		}
2555 
2556 		/* DATA payload */
2557 		bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde2;
2558 		for (i = 0; i < data_cookie_cnt; i++) {
2559 			bde->addrHigh = PADDR_HI(cp_data->dmac_laddress);
2560 			bde->addrLow = PADDR_LO(cp_data->dmac_laddress);
2561 			bde->tus.f.bdeSize = cp_data->dmac_size;
2562 			bde->tus.f.bdeFlags = 0;
2563 			cp_data++;
2564 			bde++;
2565 		}
2566 		iocb->unsli3.ext_iocb.ebde_count += data_cookie_cnt;
2567 
2568 		break;
2569 
2570 	case FC_IP_RING:
2571 		/* CMD payload */
2572 		iocb->un.xseq64.bdl.addrHigh =
2573 		    PADDR_HI(cp_cmd->dmac_laddress);
2574 		iocb->un.xseq64.bdl.addrLow =
2575 		    PADDR_LO(cp_cmd->dmac_laddress);
2576 		iocb->un.xseq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2577 		iocb->un.xseq64.bdl.bdeFlags = 0;
2578 
2579 		break;
2580 
2581 	case FC_ELS_RING:
2582 
2583 		/* CMD payload */
2584 		iocb->un.elsreq64.bdl.addrHigh =
2585 		    PADDR_HI(cp_cmd->dmac_laddress);
2586 		iocb->un.elsreq64.bdl.addrLow =
2587 		    PADDR_LO(cp_cmd->dmac_laddress);
2588 		iocb->un.elsreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2589 		iocb->un.elsreq64.bdl.bdeFlags = 0;
2590 
2591 		/* Check if a response payload is needed */
2592 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2593 			break;
2594 		}
2595 
2596 		/* RSP payload */
2597 		iocb->unsli3.ext_iocb.ebde1.addrHigh =
2598 		    PADDR_HI(cp_resp->dmac_laddress);
2599 		iocb->unsli3.ext_iocb.ebde1.addrLow =
2600 		    PADDR_LO(cp_resp->dmac_laddress);
2601 		iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2602 		iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = BUFF_USE_RCV;
2603 		iocb->unsli3.ext_iocb.ebde_count = 1;
2604 		break;
2605 
2606 	case FC_CT_RING:
2607 
2608 		/* CMD payload */
2609 		iocb->un.genreq64.bdl.addrHigh =
2610 		    PADDR_HI(cp_cmd->dmac_laddress);
2611 		iocb->un.genreq64.bdl.addrLow =
2612 		    PADDR_LO(cp_cmd->dmac_laddress);
2613 		iocb->un.genreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2614 		iocb->un.genreq64.bdl.bdeFlags = 0;
2615 
2616 		/* Check if a response payload is needed */
2617 		if ((pkt->pkt_tran_type == FC_PKT_OUTBOUND) &&
2618 		    (pkt->pkt_cmd_fhdr.type != EMLXS_MENLO_TYPE)) {
2619 			break;
2620 		}
2621 
2622 		/* RSP payload */
2623 		iocb->unsli3.ext_iocb.ebde1.addrHigh =
2624 		    PADDR_HI(cp_resp->dmac_laddress);
2625 		iocb->unsli3.ext_iocb.ebde1.addrLow =
2626 		    PADDR_LO(cp_resp->dmac_laddress);
2627 		iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2628 		iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = BUFF_USE_RCV;
2629 		iocb->unsli3.ext_iocb.ebde_count = 1;
2630 		break;
2631 	}
2632 
2633 	iocb->ULPBDECOUNT = 0;
2634 	iocb->ULPLE = 0;
2635 
2636 	return (0);
2637 
2638 } /* emlxs_sli3_bde_setup */
2639 
2640 
2641 /* Only used for FCP Data xfers */
2642 #ifdef SFCT_SUPPORT
2643 /*ARGSUSED*/
2644 static uint32_t
emlxs_sli2_fct_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2645 emlxs_sli2_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2646 {
2647 	emlxs_hba_t *hba = HBA;
2648 	scsi_task_t *fct_task;
2649 	MATCHMAP *bmp;
2650 	ULP_BDE64 *bpl;
2651 	uint64_t bp;
2652 	uint8_t bdeFlags;
2653 	IOCB *iocb;
2654 	uint32_t size;
2655 	MATCHMAP *mp;
2656 
2657 	iocb = (IOCB *)&sbp->iocbq.iocb;
2658 	sbp->bmp = NULL;
2659 
2660 	if (!sbp->fct_buf) {
2661 		iocb->un.fcpt64.bdl.addrHigh = 0;
2662 		iocb->un.fcpt64.bdl.addrLow = 0;
2663 		iocb->un.fcpt64.bdl.bdeSize = 0;
2664 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2665 		iocb->un.fcpt64.fcpt_Offset = 0;
2666 		iocb->un.fcpt64.fcpt_Length = 0;
2667 		iocb->ULPBDECOUNT = 0;
2668 		iocb->ULPLE = 1;
2669 		return (0);
2670 	}
2671 
2672 	if (hba->sli.sli3.bpl_table) {
2673 		bmp = hba->sli.sli3.bpl_table[sbp->iotag];
2674 	} else {
2675 		bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL);
2676 	}
2677 
2678 	if (!bmp) {
2679 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2680 		    "fct_sli2_bde_setup: Unable to BPL buffer. iotag=%d",
2681 		    sbp->iotag);
2682 
2683 		iocb->un.fcpt64.bdl.addrHigh = 0;
2684 		iocb->un.fcpt64.bdl.addrLow = 0;
2685 		iocb->un.fcpt64.bdl.bdeSize = 0;
2686 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2687 		iocb->un.fcpt64.fcpt_Offset = 0;
2688 		iocb->un.fcpt64.fcpt_Length = 0;
2689 		iocb->ULPBDECOUNT = 0;
2690 		iocb->ULPLE = 1;
2691 		return (1);
2692 	}
2693 
2694 	bpl = (ULP_BDE64 *)bmp->virt;
2695 	bp = bmp->phys;
2696 
2697 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2698 
2699 	size = sbp->fct_buf->db_data_size;
2700 	mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2701 
2702 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2703 
2704 	/* Init the buffer list */
2705 	bpl->addrHigh = BE_SWAP32(PADDR_HI(mp->phys));
2706 	bpl->addrLow = BE_SWAP32(PADDR_LO(mp->phys));
2707 	bpl->tus.f.bdeSize = size;
2708 	bpl->tus.f.bdeFlags = bdeFlags;
2709 	bpl->tus.w = BE_SWAP32(bpl->tus.w);
2710 
2711 	/* Init the IOCB */
2712 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bp);
2713 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bp);
2714 	iocb->un.fcpt64.bdl.bdeSize = sizeof (ULP_BDE64);
2715 	iocb->un.fcpt64.bdl.bdeFlags = BUFF_TYPE_BDL;
2716 
2717 	iocb->un.fcpt64.fcpt_Length =
2718 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2719 	iocb->un.fcpt64.fcpt_Offset = 0;
2720 
2721 	iocb->ULPBDECOUNT = 1;
2722 	iocb->ULPLE = 1;
2723 	sbp->bmp = bmp;
2724 
2725 	return (0);
2726 
2727 } /* emlxs_sli2_fct_bde_setup */
2728 #endif /* SFCT_SUPPORT */
2729 
2730 
2731 #ifdef SFCT_SUPPORT
2732 /*ARGSUSED*/
2733 static uint32_t
emlxs_sli3_fct_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2734 emlxs_sli3_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2735 {
2736 	scsi_task_t *fct_task;
2737 	IOCB *iocb;
2738 	MATCHMAP *mp;
2739 	uint32_t bdeFlags;
2740 	uint32_t size;
2741 
2742 	iocb = (IOCB *)&sbp->iocbq;
2743 
2744 	if (!sbp->fct_buf) {
2745 		iocb->un.fcpt64.bdl.addrHigh = 0;
2746 		iocb->un.fcpt64.bdl.addrLow = 0;
2747 		iocb->un.fcpt64.bdl.bdeSize = 0;
2748 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2749 		iocb->un.fcpt64.fcpt_Offset = 0;
2750 		iocb->un.fcpt64.fcpt_Length = 0;
2751 		iocb->ULPBDECOUNT = 0;
2752 		iocb->ULPLE = 0;
2753 		iocb->unsli3.ext_iocb.ebde_count = 0;
2754 		return (0);
2755 	}
2756 
2757 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2758 
2759 	size = sbp->fct_buf->db_data_size;
2760 	mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2761 
2762 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2763 
2764 	/* Init first BDE */
2765 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(mp->phys);
2766 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(mp->phys);
2767 	iocb->un.fcpt64.bdl.bdeSize = size;
2768 	iocb->un.fcpt64.bdl.bdeFlags = bdeFlags;
2769 
2770 	iocb->unsli3.ext_iocb.ebde_count = 0;
2771 	iocb->un.fcpt64.fcpt_Length =
2772 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2773 	iocb->un.fcpt64.fcpt_Offset = 0;
2774 
2775 	iocb->ULPBDECOUNT = 0;
2776 	iocb->ULPLE = 0;
2777 
2778 	return (0);
2779 
2780 } /* emlxs_sli3_fct_bde_setup */
2781 #endif /* SFCT_SUPPORT */
2782 
2783 
2784 static void
emlxs_sli3_issue_iocb_cmd(emlxs_hba_t * hba,CHANNEL * cp,IOCBQ * iocbq)2785 emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2786 {
2787 #ifdef FMA_SUPPORT
2788 	emlxs_port_t *port = &PPORT;
2789 #endif	/* FMA_SUPPORT */
2790 	PGP *pgp;
2791 	emlxs_buf_t *sbp;
2792 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
2793 	RING *rp;
2794 	uint32_t nextIdx;
2795 	uint32_t status;
2796 	void *ioa2;
2797 	off_t offset;
2798 	uint32_t count = 0;
2799 	uint32_t flag;
2800 	uint32_t channelno;
2801 	int32_t throttle;
2802 #ifdef NODE_THROTTLE_SUPPORT
2803 	int32_t node_throttle;
2804 	NODELIST *marked_node = NULL;
2805 #endif /* NODE_THROTTLE_SUPPORT */
2806 
2807 	channelno = cp->channelno;
2808 	rp = (RING *)cp->iopath;
2809 
2810 	throttle = 0;
2811 
2812 	/* Check if FCP ring and adapter is not ready */
2813 	/* We may use any ring for FCP_CMD */
2814 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2815 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2816 		    (((emlxs_port_t *)iocbq->port)->mode == MODE_INITIATOR)) {
2817 			emlxs_tx_put(iocbq, 1);
2818 			return;
2819 		}
2820 	}
2821 
2822 	/* Attempt to acquire CMD_RING lock */
2823 	if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
2824 		/* Queue it for later */
2825 		if (iocbq) {
2826 			if ((hba->io_count -
2827 			    hba->channel_tx_count) > 10) {
2828 				emlxs_tx_put(iocbq, 1);
2829 				return;
2830 			} else {
2831 
2832 				/*
2833 				 * EMLXS_MSGF(EMLXS_CONTEXT,
2834 				 * &emlxs_ring_watchdog_msg,
2835 				 * "%s host=%d port=%d cnt=%d,%d  RACE
2836 				 * CONDITION3 DETECTED.",
2837 				 * emlxs_ring_xlate(channelno),
2838 				 * rp->fc_cmdidx, rp->fc_port_cmdidx,
2839 				 * hba->channel_tx_count,
2840 				 * hba->io_count);
2841 				 */
2842 				mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
2843 			}
2844 		} else {
2845 			return;
2846 		}
2847 	}
2848 	/* CMD_RING_LOCK acquired */
2849 
2850 	/* Throttle check only applies to non special iocb */
2851 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2852 		/* Check if HBA is full */
2853 		throttle = hba->io_throttle - hba->io_active;
2854 		if (throttle <= 0) {
2855 			/* Hitting adapter throttle limit */
2856 			/* Queue it for later */
2857 			if (iocbq) {
2858 				emlxs_tx_put(iocbq, 1);
2859 			}
2860 
2861 			goto busy;
2862 		}
2863 	}
2864 
2865 	/* Read adapter's get index */
2866 	pgp = (PGP *)
2867 	    &((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[channelno];
2868 	offset =
2869 	    (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
2870 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
2871 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2872 	    DDI_DMA_SYNC_FORKERNEL);
2873 	rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2874 
2875 	/* Calculate the next put index */
2876 	nextIdx =
2877 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2878 
2879 	/* Check if ring is full */
2880 	if (nextIdx == rp->fc_port_cmdidx) {
2881 		/* Try one more time */
2882 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2883 		    DDI_DMA_SYNC_FORKERNEL);
2884 		rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2885 
2886 		if (nextIdx == rp->fc_port_cmdidx) {
2887 			/* Queue it for later */
2888 			if (iocbq) {
2889 				emlxs_tx_put(iocbq, 1);
2890 			}
2891 
2892 			goto busy;
2893 		}
2894 	}
2895 
2896 	/*
2897 	 * We have a command ring slot available
2898 	 * Make sure we have an iocb to send
2899 	 */
2900 	if (iocbq) {
2901 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2902 
2903 		/* Check if the ring already has iocb's waiting */
2904 		if (cp->nodeq.q_first != NULL) {
2905 			/* Put the current iocbq on the tx queue */
2906 			emlxs_tx_put(iocbq, 0);
2907 
2908 			/*
2909 			 * Attempt to replace it with the next iocbq
2910 			 * in the tx queue
2911 			 */
2912 			iocbq = emlxs_tx_get(cp, 0);
2913 		}
2914 
2915 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2916 	} else {
2917 		/* Try to get the next iocb on the tx queue */
2918 		iocbq = emlxs_tx_get(cp, 1);
2919 	}
2920 
2921 sendit:
2922 	count = 0;
2923 
2924 	/* Process each iocbq */
2925 	while (iocbq) {
2926 		sbp = iocbq->sbp;
2927 
2928 #ifdef NODE_THROTTLE_SUPPORT
2929 		if (sbp && sbp->node && sbp->node->io_throttle) {
2930 			node_throttle = sbp->node->io_throttle -
2931 			    sbp->node->io_active;
2932 			if (node_throttle <= 0) {
2933 				/* Node is busy */
2934 				/* Queue this iocb and get next iocb from */
2935 				/* channel */
2936 
2937 				if (!marked_node) {
2938 					marked_node = sbp->node;
2939 				}
2940 
2941 				mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2942 				emlxs_tx_put(iocbq, 0);
2943 
2944 				if (cp->nodeq.q_first == marked_node) {
2945 					mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2946 					goto busy;
2947 				}
2948 
2949 				iocbq = emlxs_tx_get(cp, 0);
2950 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2951 				continue;
2952 			}
2953 		}
2954 		marked_node = 0;
2955 #endif /* NODE_THROTTLE_SUPPORT */
2956 
2957 		if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) {
2958 			/*
2959 			 * Update adapter if needed, since we are about to
2960 			 * delay here
2961 			 */
2962 			if (count) {
2963 				count = 0;
2964 
2965 				/* Update the adapter's cmd put index */
2966 				if (hba->bus_type == SBUS_FC) {
2967 					slim2p->mbx.us.s2.host[channelno].
2968 					    cmdPutInx =
2969 					    BE_SWAP32(rp->fc_cmdidx);
2970 
2971 					/* DMA sync the index for the adapter */
2972 					offset = (off_t)
2973 					    ((uint64_t)
2974 					    ((unsigned long)&(slim2p->mbx.us.
2975 					    s2.host[channelno].cmdPutInx)) -
2976 					    (uint64_t)((unsigned long)slim2p));
2977 					EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.
2978 					    dma_handle, offset, 4,
2979 					    DDI_DMA_SYNC_FORDEV);
2980 				} else {
2981 					ioa2 = (void *)
2982 					    ((char *)hba->sli.sli3.slim_addr +
2983 					    hba->sli.sli3.hgp_ring_offset +
2984 					    ((channelno * 2) *
2985 					    sizeof (uint32_t)));
2986 					WRITE_SLIM_ADDR(hba,
2987 					    (volatile uint32_t *)ioa2,
2988 					    rp->fc_cmdidx);
2989 				}
2990 
2991 				status = (CA_R0ATT << (channelno * 4));
2992 				WRITE_CSR_REG(hba, FC_CA_REG(hba),
2993 				    (volatile uint32_t)status);
2994 
2995 			}
2996 			/* Perform delay */
2997 			if ((channelno == FC_ELS_RING) &&
2998 			    !(iocbq->flag & IOCB_FCP_CMD)) {
2999 				drv_usecwait(100000);
3000 			} else {
3001 				drv_usecwait(20000);
3002 			}
3003 		}
3004 
3005 		/*
3006 		 * At this point, we have a command ring slot available
3007 		 * and an iocb to send
3008 		 */
3009 		flag =  iocbq->flag;
3010 
3011 		/* Send the iocb */
3012 		emlxs_sli3_issue_iocb(hba, rp, iocbq);
3013 		/*
3014 		 * After this, the sbp / iocb should not be
3015 		 * accessed in the xmit path.
3016 		 */
3017 
3018 		count++;
3019 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
3020 			/* Check if HBA is full */
3021 			throttle = hba->io_throttle - hba->io_active;
3022 			if (throttle <= 0) {
3023 				goto busy;
3024 			}
3025 		}
3026 
3027 		/* Calculate the next put index */
3028 		nextIdx =
3029 		    (rp->fc_cmdidx + 1 >=
3030 		    rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
3031 
3032 		/* Check if ring is full */
3033 		if (nextIdx == rp->fc_port_cmdidx) {
3034 			/* Try one more time */
3035 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3036 			    offset, 4, DDI_DMA_SYNC_FORKERNEL);
3037 			rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
3038 
3039 			if (nextIdx == rp->fc_port_cmdidx) {
3040 				goto busy;
3041 			}
3042 		}
3043 
3044 		/* Get the next iocb from the tx queue if there is one */
3045 		iocbq = emlxs_tx_get(cp, 1);
3046 	}
3047 
3048 	if (count) {
3049 		/* Update the adapter's cmd put index */
3050 		if (hba->bus_type == SBUS_FC) {
3051 			slim2p->mbx.us.s2.host[channelno].
3052 			    cmdPutInx = BE_SWAP32(rp->fc_cmdidx);
3053 
3054 			/* DMA sync the index for the adapter */
3055 			offset = (off_t)
3056 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
3057 			    host[channelno].cmdPutInx)) -
3058 			    (uint64_t)((unsigned long)slim2p));
3059 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3060 			    offset, 4, DDI_DMA_SYNC_FORDEV);
3061 		} else {
3062 			ioa2 =
3063 			    (void *)((char *)hba->sli.sli3.slim_addr +
3064 			    hba->sli.sli3.hgp_ring_offset +
3065 			    ((channelno * 2) * sizeof (uint32_t)));
3066 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
3067 			    rp->fc_cmdidx);
3068 		}
3069 
3070 		status = (CA_R0ATT << (channelno * 4));
3071 		WRITE_CSR_REG(hba, FC_CA_REG(hba),
3072 		    (volatile uint32_t)status);
3073 
3074 		/* Check tx queue one more time before releasing */
3075 		if ((iocbq = emlxs_tx_get(cp, 1))) {
3076 			/*
3077 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_watchdog_msg,
3078 			 * "%s host=%d port=%d   RACE CONDITION1
3079 			 * DETECTED.", emlxs_ring_xlate(channelno),
3080 			 * rp->fc_cmdidx, rp->fc_port_cmdidx);
3081 			 */
3082 			goto sendit;
3083 		}
3084 	}
3085 
3086 #ifdef FMA_SUPPORT
3087 	/* Access handle validation */
3088 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3089 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3090 #endif  /* FMA_SUPPORT */
3091 
3092 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3093 
3094 	return;
3095 
3096 busy:
3097 
3098 	/*
3099 	 * Set ring to SET R0CE_REQ in Chip Att register.
3100 	 * Chip will tell us when an entry is freed.
3101 	 */
3102 	if (count) {
3103 		/* Update the adapter's cmd put index */
3104 		if (hba->bus_type == SBUS_FC) {
3105 			slim2p->mbx.us.s2.host[channelno].cmdPutInx =
3106 			    BE_SWAP32(rp->fc_cmdidx);
3107 
3108 			/* DMA sync the index for the adapter */
3109 			offset = (off_t)
3110 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
3111 			    host[channelno].cmdPutInx)) -
3112 			    (uint64_t)((unsigned long)slim2p));
3113 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3114 			    offset, 4, DDI_DMA_SYNC_FORDEV);
3115 		} else {
3116 			ioa2 =
3117 			    (void *)((char *)hba->sli.sli3.slim_addr +
3118 			    hba->sli.sli3.hgp_ring_offset +
3119 			    ((channelno * 2) * sizeof (uint32_t)));
3120 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
3121 			    rp->fc_cmdidx);
3122 		}
3123 	}
3124 
3125 	status = ((CA_R0ATT | CA_R0CE_REQ) << (channelno * 4));
3126 	WRITE_CSR_REG(hba, FC_CA_REG(hba), (volatile uint32_t)status);
3127 
3128 	if (throttle <= 0) {
3129 		HBASTATS.IocbThrottled++;
3130 	} else {
3131 		HBASTATS.IocbRingFull[channelno]++;
3132 	}
3133 
3134 #ifdef FMA_SUPPORT
3135 	/* Access handle validation */
3136 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3137 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3138 #endif  /* FMA_SUPPORT */
3139 
3140 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3141 
3142 	return;
3143 
3144 } /* emlxs_sli3_issue_iocb_cmd() */
3145 
3146 
3147 /* MBX_NOWAIT - returns MBX_BUSY or MBX_SUCCESS or MBX_HARDWARE_ERROR */
3148 /* MBX_WAIT   - returns MBX_TIMEOUT or mailbox_status */
3149 /* MBX_SLEEP  - returns MBX_TIMEOUT or mailbox_status */
3150 /* MBX_POLL   - returns MBX_TIMEOUT or mailbox_status */
3151 
3152 static uint32_t
emlxs_sli3_issue_mbox_cmd(emlxs_hba_t * hba,MAILBOXQ * mbq,int32_t flag,uint32_t tmo)3153 emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3154     uint32_t tmo)
3155 {
3156 	emlxs_port_t		*port;
3157 	SLIM2			*slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
3158 	MAILBOX			*mbox;
3159 	MAILBOX			*mb;
3160 	uint32_t		*word0;
3161 	volatile uint32_t	ldata;
3162 	off_t			offset;
3163 	MATCHMAP		*mbox_bp;
3164 	uint32_t		tmo_local;
3165 	MAILBOX			swpmb;
3166 
3167 	if (!mbq->port) {
3168 		mbq->port = &PPORT;
3169 	}
3170 
3171 	port = (emlxs_port_t *)mbq->port;
3172 
3173 	mb = (MAILBOX *)mbq;
3174 	word0 = (uint32_t *)&swpmb;
3175 
3176 	mb->mbxStatus = MBX_SUCCESS;
3177 
3178 	/* Check for minimum timeouts */
3179 	switch (mb->mbxCommand) {
3180 	/* Mailbox commands that erase/write flash */
3181 	case MBX_DOWN_LOAD:
3182 	case MBX_UPDATE_CFG:
3183 	case MBX_LOAD_AREA:
3184 	case MBX_LOAD_EXP_ROM:
3185 	case MBX_WRITE_NV:
3186 	case MBX_FLASH_WR_ULA:
3187 	case MBX_DEL_LD_ENTRY:
3188 	case MBX_LOAD_SM:
3189 		if (tmo < 300) {
3190 			tmo = 300;
3191 		}
3192 		break;
3193 
3194 	default:
3195 		if (tmo < 30) {
3196 			tmo = 30;
3197 		}
3198 		break;
3199 	}
3200 
3201 	/* Convert tmo seconds to 10 millisecond tics */
3202 	tmo_local = tmo * 100;
3203 
3204 	/* Adjust wait flag */
3205 	if (flag != MBX_NOWAIT) {
3206 		/* If interrupt is enabled, use sleep, otherwise poll */
3207 		if (hba->sli.sli3.hc_copy & HC_MBINT_ENA) {
3208 			flag = MBX_SLEEP;
3209 		} else {
3210 			flag = MBX_POLL;
3211 		}
3212 	}
3213 
3214 	mutex_enter(&EMLXS_PORT_LOCK);
3215 
3216 	/* Check for hardware error */
3217 	if (hba->flag & FC_HARDWARE_ERROR) {
3218 		mb->mbxStatus = (hba->flag & FC_OVERTEMP_EVENT) ?
3219 		    MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3220 
3221 		mutex_exit(&EMLXS_PORT_LOCK);
3222 
3223 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3224 		    "Hardware error reported. %s failed. status=%x mb=%p",
3225 		    emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb);
3226 
3227 		return (MBX_HARDWARE_ERROR);
3228 	}
3229 
3230 	if (hba->mbox_queue_flag) {
3231 		/* If we are not polling, then queue it for later */
3232 		if (flag == MBX_NOWAIT) {
3233 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3234 			    "Busy.      %s: mb=%p NoWait.",
3235 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3236 
3237 			emlxs_mb_put(hba, mbq);
3238 
3239 			HBASTATS.MboxBusy++;
3240 
3241 			mutex_exit(&EMLXS_PORT_LOCK);
3242 
3243 			return (MBX_BUSY);
3244 		}
3245 
3246 		while (hba->mbox_queue_flag) {
3247 			mutex_exit(&EMLXS_PORT_LOCK);
3248 
3249 			if (tmo_local-- == 0) {
3250 				EMLXS_MSGF(EMLXS_CONTEXT,
3251 				    &emlxs_mbox_event_msg,
3252 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
3253 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3254 				    tmo);
3255 
3256 				/* Non-lethalStatus mailbox timeout */
3257 				/* Does not indicate a hardware error */
3258 				mb->mbxStatus = MBX_TIMEOUT;
3259 				return (MBX_TIMEOUT);
3260 			}
3261 
3262 			BUSYWAIT_MS(10);
3263 			mutex_enter(&EMLXS_PORT_LOCK);
3264 
3265 			/* Check for hardware error */
3266 			if (hba->flag & FC_HARDWARE_ERROR) {
3267 				mb->mbxStatus =
3268 				    (hba->flag & FC_OVERTEMP_EVENT) ?
3269 				    MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3270 
3271 				mutex_exit(&EMLXS_PORT_LOCK);
3272 
3273 				EMLXS_MSGF(EMLXS_CONTEXT,
3274 				    &emlxs_mbox_detail_msg,
3275 				    "Hardware error reported. %s failed. "
3276 				    "status=%x mb=%p",
3277 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
3278 				    mb->mbxStatus, mb);
3279 
3280 				return (MBX_HARDWARE_ERROR);
3281 			}
3282 		}
3283 	}
3284 
3285 	/* Initialize mailbox area */
3286 	emlxs_mb_init(hba, mbq, flag, tmo);
3287 
3288 	switch (flag) {
3289 	case MBX_NOWAIT:
3290 
3291 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3292 			if (mb->mbxCommand != MBX_DOWN_LOAD &&
3293 			    mb->mbxCommand != MBX_DUMP_MEMORY) {
3294 				EMLXS_MSGF(EMLXS_CONTEXT,
3295 				    &emlxs_mbox_detail_msg,
3296 				    "Sending.   %s: mb=%p NoWait.",
3297 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3298 			}
3299 		}
3300 
3301 		break;
3302 
3303 	case MBX_SLEEP:
3304 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3305 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3306 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3307 			    "Sending.   %s: mb=%p Sleep.",
3308 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3309 		}
3310 
3311 		break;
3312 
3313 	case MBX_POLL:
3314 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3315 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3316 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3317 			    "Sending.   %s: mb=%p Polled.",
3318 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3319 		}
3320 		break;
3321 	}
3322 
3323 	mb->mbxOwner = OWN_CHIP;
3324 
3325 	/* Clear the attention bit */
3326 	WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3327 
3328 	if (hba->flag & FC_SLIM2_MODE) {
3329 		/* First copy command data */
3330 		mbox = FC_SLIM2_MAILBOX(hba);
3331 		offset =
3332 		    (off_t)((uint64_t)((unsigned long)mbox)
3333 		    - (uint64_t)((unsigned long)slim2p));
3334 
3335 #ifdef MBOX_EXT_SUPPORT
3336 		if (mbq->extbuf) {
3337 			uint32_t *mbox_ext =
3338 			    (uint32_t *)((uint8_t *)mbox +
3339 			    MBOX_EXTENSION_OFFSET);
3340 			off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
3341 
3342 			BE_SWAP32_BCOPY((uint8_t *)mbq->extbuf,
3343 			    (uint8_t *)mbox_ext, mbq->extsize);
3344 
3345 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3346 			    offset_ext, mbq->extsize,
3347 			    DDI_DMA_SYNC_FORDEV);
3348 		}
3349 #endif /* MBOX_EXT_SUPPORT */
3350 
3351 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3352 		    MAILBOX_CMD_BSIZE);
3353 
3354 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3355 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3356 	} else {	/* SLIM 1 */
3357 
3358 		mbox = FC_SLIM1_MAILBOX(hba);
3359 
3360 #ifdef MBOX_EXT_SUPPORT
3361 		if (mbq->extbuf) {
3362 			uint32_t *mbox_ext =
3363 			    (uint32_t *)((uint8_t *)mbox +
3364 			    MBOX_EXTENSION_OFFSET);
3365 			WRITE_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
3366 			    mbox_ext, (mbq->extsize / 4));
3367 		}
3368 #endif /* MBOX_EXT_SUPPORT */
3369 
3370 		/* First copy command data */
3371 		WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3372 		    (MAILBOX_CMD_WSIZE - 1));
3373 
3374 		/* copy over last word, with mbxOwner set */
3375 		ldata = *((volatile uint32_t *)mb);
3376 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3377 	}
3378 
3379 	/* Interrupt board to do it right away */
3380 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
3381 
3382 	mutex_exit(&EMLXS_PORT_LOCK);
3383 
3384 #ifdef FMA_SUPPORT
3385 	/* Access handle validation */
3386 	if ((emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
3387 	    != DDI_FM_OK) ||
3388 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
3389 	    != DDI_FM_OK)) {
3390 		EMLXS_MSGF(EMLXS_CONTEXT,
3391 		    &emlxs_invalid_access_handle_msg, NULL);
3392 		return (MBX_HARDWARE_ERROR);
3393 	}
3394 #endif  /* FMA_SUPPORT */
3395 
3396 	switch (flag) {
3397 	case MBX_NOWAIT:
3398 		return (MBX_SUCCESS);
3399 
3400 	case MBX_SLEEP:
3401 
3402 		/* Wait for completion */
3403 		/* The driver clock is timing the mailbox. */
3404 		/* emlxs_mb_fini() will be called externally. */
3405 
3406 		mutex_enter(&EMLXS_MBOX_LOCK);
3407 		while (!(mbq->flag & MBQ_COMPLETED)) {
3408 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3409 		}
3410 		mutex_exit(&EMLXS_MBOX_LOCK);
3411 
3412 		if (mb->mbxStatus == MBX_TIMEOUT) {
3413 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3414 			    "Timeout.   %s: mb=%p tmo=%d. Sleep.",
3415 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3416 		} else {
3417 			if (mb->mbxCommand != MBX_DOWN_LOAD &&
3418 			    mb->mbxCommand != MBX_DUMP_MEMORY) {
3419 				EMLXS_MSGF(EMLXS_CONTEXT,
3420 				    &emlxs_mbox_detail_msg,
3421 				    "Completed. %s: mb=%p status=%x Sleep.",
3422 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3423 				    mb->mbxStatus);
3424 			}
3425 		}
3426 
3427 		break;
3428 
3429 	case MBX_POLL:
3430 
3431 		/* Convert tmo seconds to 500 usec tics */
3432 		tmo_local = tmo * 2000;
3433 
3434 		/* Get first word of mailbox */
3435 		if (hba->flag & FC_SLIM2_MODE) {
3436 			mbox = FC_SLIM2_MAILBOX(hba);
3437 			offset = (off_t)((uint64_t)((unsigned long)mbox) -
3438 			    (uint64_t)((unsigned long)slim2p));
3439 
3440 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3441 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3442 			*word0 = *((volatile uint32_t *)mbox);
3443 			*word0 = BE_SWAP32(*word0);
3444 		} else {
3445 			mbox = FC_SLIM1_MAILBOX(hba);
3446 			*word0 =
3447 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
3448 		}
3449 
3450 		/* Wait for command to complete */
3451 		while ((swpmb.mbxOwner == OWN_CHIP) &&
3452 		    !(mbq->flag & MBQ_COMPLETED)) {
3453 			if (!hba->timer_id && (tmo_local-- == 0)) {
3454 				/* self time */
3455 				EMLXS_MSGF(EMLXS_CONTEXT,
3456 				    &emlxs_mbox_timeout_msg,
3457 				    "%s: mb=%p tmo=%d Polled.",
3458 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
3459 				    mb, tmo);
3460 
3461 				hba->flag |= FC_MBOX_TIMEOUT;
3462 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
3463 				emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3464 
3465 				break;
3466 			}
3467 
3468 			BUSYWAIT_US(500);
3469 
3470 			/* Get first word of mailbox */
3471 			if (hba->flag & FC_SLIM2_MODE) {
3472 				EMLXS_MPDATA_SYNC(
3473 				    hba->sli.sli3.slim2.dma_handle, offset,
3474 				    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3475 				*word0 = *((volatile uint32_t *)mbox);
3476 				*word0 = BE_SWAP32(*word0);
3477 			} else {
3478 				*word0 =
3479 				    READ_SLIM_ADDR(hba,
3480 				    ((volatile uint32_t *)mbox));
3481 			}
3482 
3483 		}	/* while */
3484 
3485 		if (mb->mbxStatus == MBX_TIMEOUT) {
3486 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3487 			    "Timeout.   %s: mb=%p tmo=%d. Polled.",
3488 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3489 
3490 			break;
3491 		}
3492 
3493 		/* Check for config port command */
3494 		if ((swpmb.mbxCommand == MBX_CONFIG_PORT) &&
3495 		    (swpmb.mbxStatus == MBX_SUCCESS)) {
3496 			/* Setup host mbox for cmpl */
3497 			mbox = FC_SLIM2_MAILBOX(hba);
3498 			offset = (off_t)((uint64_t)((unsigned long)mbox)
3499 			    - (uint64_t)((unsigned long)slim2p));
3500 
3501 			hba->flag |= FC_SLIM2_MODE;
3502 		}
3503 
3504 		/* copy results back to user */
3505 		if (hba->flag & FC_SLIM2_MODE) {
3506 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3507 			    offset, MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3508 
3509 			BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
3510 			    MAILBOX_CMD_BSIZE);
3511 		} else {
3512 			READ_SLIM_COPY(hba, (uint32_t *)mb,
3513 			    (uint32_t *)mbox, MAILBOX_CMD_WSIZE);
3514 		}
3515 
3516 #ifdef MBOX_EXT_SUPPORT
3517 		if (mbq->extbuf) {
3518 			uint32_t *mbox_ext =
3519 			    (uint32_t *)((uint8_t *)mbox +
3520 			    MBOX_EXTENSION_OFFSET);
3521 			off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
3522 
3523 			if (hba->flag & FC_SLIM2_MODE) {
3524 				EMLXS_MPDATA_SYNC(
3525 				    hba->sli.sli3.slim2.dma_handle, offset_ext,
3526 				    mbq->extsize, DDI_DMA_SYNC_FORKERNEL);
3527 
3528 				BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
3529 				    (uint8_t *)mbq->extbuf, mbq->extsize);
3530 			} else {
3531 				READ_SLIM_COPY(hba,
3532 				    (uint32_t *)mbq->extbuf, mbox_ext,
3533 				    (mbq->extsize / 4));
3534 			}
3535 		}
3536 #endif /* MBOX_EXT_SUPPORT */
3537 
3538 		/* Sync the memory buffer */
3539 		if (mbq->bp) {
3540 			mbox_bp = (MATCHMAP *)mbq->bp;
3541 			EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0,
3542 			    mbox_bp->size, DDI_DMA_SYNC_FORKERNEL);
3543 		}
3544 
3545 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3546 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3547 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3548 			    "Completed. %s: mb=%p status=%x Polled.",
3549 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3550 			    mb->mbxStatus);
3551 		}
3552 
3553 		/* Process the result */
3554 		if (!(mbq->flag & MBQ_PASSTHRU)) {
3555 			if (mbq->mbox_cmpl) {
3556 				(void) (mbq->mbox_cmpl)(hba, mbq);
3557 			}
3558 		}
3559 
3560 		/* Clear the attention bit */
3561 		WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3562 
3563 		/* Clean up the mailbox area */
3564 		emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3565 
3566 		break;
3567 
3568 	}	/* switch (flag) */
3569 
3570 	return (mb->mbxStatus);
3571 
3572 } /* emlxs_sli3_issue_mbox_cmd() */
3573 
3574 
3575 #ifdef SFCT_SUPPORT
3576 /*ARGSUSED*/
3577 static uint32_t
emlxs_sli3_prep_fct_iocb(emlxs_port_t * port,emlxs_buf_t * cmd_sbp,int channel)3578 emlxs_sli3_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp,
3579     int channel)
3580 {
3581 	emlxs_hba_t *hba = HBA;
3582 	emlxs_config_t *cfg = &CFG;
3583 	fct_cmd_t *fct_cmd;
3584 	stmf_data_buf_t *dbuf;
3585 	scsi_task_t *fct_task;
3586 	fc_packet_t *pkt;
3587 	uint32_t did;
3588 	IOCBQ *iocbq;
3589 	IOCB *iocb;
3590 	uint32_t timeout;
3591 	uint32_t iotag;
3592 	emlxs_node_t *ndlp;
3593 	CHANNEL *cp;
3594 	ddi_dma_cookie_t *cp_cmd;
3595 
3596 	pkt = PRIV2PKT(cmd_sbp);
3597 
3598 	cp = (CHANNEL *)cmd_sbp->channel;
3599 
3600 	iocbq = &cmd_sbp->iocbq;
3601 	iocb = &iocbq->iocb;
3602 
3603 
3604 	/* Get the iotag by registering the packet */
3605 	iotag = emlxs_register_pkt(cp, cmd_sbp);
3606 
3607 	if (!iotag) {
3608 		/* No more command slots available, retry later */
3609 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3610 		    "Adapter Busy. Unable to allocate iotag. did=0x%x",
3611 		    cmd_sbp->did);
3612 
3613 		return (IOERR_NO_RESOURCES);
3614 	}
3615 
3616 
3617 	/* Point of no return */
3618 
3619 	if (iocb->ULPCOMMAND == CMD_ABORT_XRI_CX) {
3620 
3621 		ndlp = cmd_sbp->node;
3622 		cp->ulpSendCmd++;
3623 
3624 		/* Initalize iocbq */
3625 		iocbq->port = (void *)port;
3626 		iocbq->node = (void *)ndlp;
3627 		iocbq->channel = (void *)cp;
3628 
3629 		/*
3630 		 * Don't give the abort priority, we want the IOCB
3631 		 * we are aborting to be processed first.
3632 		 */
3633 		iocbq->flag |= IOCB_SPECIAL;
3634 
3635 		iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
3636 		iocb->ULPIOTAG = (uint16_t)iotag;
3637 		iocb->ULPLE = 1;
3638 		iocb->ULPCLASS = cmd_sbp->class;
3639 		iocb->ULPOWNER = OWN_CHIP;
3640 
3641 		if (hba->state >= FC_LINK_UP) {
3642 			/* Create the abort IOCB */
3643 			iocb->un.acxri.abortType = ABORT_TYPE_ABTS;
3644 			iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
3645 
3646 		} else {
3647 			/* Create the close IOCB */
3648 			iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
3649 
3650 		}
3651 
3652 		iocb->ULPRSVDBYTE =
3653 		    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3654 		/* Set the pkt timer */
3655 		cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
3656 		    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
3657 
3658 		return (IOERR_SUCCESS);
3659 
3660 	} else if (iocb->ULPCOMMAND == CMD_FCP_TRSP64_CX) {
3661 
3662 		ndlp = cmd_sbp->node;
3663 		cp->ulpSendCmd++;
3664 
3665 		/* Initalize iocbq */
3666 		iocbq->port = (void *)port;
3667 		iocbq->node = (void *)ndlp;
3668 		iocbq->channel = (void *)cp;
3669 
3670 #if (EMLXS_MODREV >= EMLXS_MODREV3)
3671 		cp_cmd = pkt->pkt_cmd_cookie;
3672 #else
3673 		cp_cmd  = &pkt->pkt_cmd_cookie;
3674 #endif	/* >= EMLXS_MODREV3 */
3675 
3676 		iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
3677 		iocb->un.fcpt64.bdl.addrLow = PADDR_LO(cp_cmd->dmac_laddress);
3678 		iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen;
3679 		iocb->un.fcpt64.bdl.bdeFlags = 0;
3680 
3681 		if (hba->sli_mode < 3) {
3682 			iocb->ULPBDECOUNT = 1;
3683 			iocb->ULPLE = 1;
3684 		} else {	/* SLI3 */
3685 
3686 			iocb->ULPBDECOUNT = 0;
3687 			iocb->ULPLE = 0;
3688 			iocb->unsli3.ext_iocb.ebde_count = 0;
3689 		}
3690 
3691 		/* Initalize iocb */
3692 		iocb->ULPCONTEXT = (uint16_t)pkt->pkt_cmd_fhdr.rx_id;
3693 		iocb->ULPIOTAG = (uint16_t)iotag;
3694 		iocb->ULPRSVDBYTE =
3695 		    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3696 		iocb->ULPOWNER = OWN_CHIP;
3697 		iocb->ULPCLASS = cmd_sbp->class;
3698 		iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
3699 
3700 		/* Set the pkt timer */
3701 		cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
3702 		    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
3703 
3704 		if (pkt->pkt_cmdlen) {
3705 			EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
3706 			    DDI_DMA_SYNC_FORDEV);
3707 		}
3708 
3709 		return (IOERR_SUCCESS);
3710 	}
3711 
3712 	dbuf = cmd_sbp->fct_buf;
3713 	fct_cmd = cmd_sbp->fct_cmd;
3714 	fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
3715 	ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
3716 	did = fct_cmd->cmd_rportid;
3717 
3718 	iocbq->channel = (void *)cmd_sbp->channel;
3719 
3720 	if (emlxs_fct_bde_setup(port, cmd_sbp)) {
3721 		/* Unregister the packet */
3722 		(void) emlxs_unregister_pkt(cmd_sbp->channel, iotag, 0);
3723 
3724 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3725 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3726 
3727 		return (IOERR_INTERNAL_ERROR);
3728 	}
3729 
3730 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
3731 		timeout =
3732 		    ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
3733 	} else {
3734 		timeout = 0x80000000;
3735 	}
3736 
3737 	cmd_sbp->ticks =
3738 	    hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
3739 
3740 	/* Initalize iocbq */
3741 	iocbq->port = (void *)port;
3742 	iocbq->node = (void *)ndlp;
3743 
3744 	/* Initalize iocb */
3745 	iocb->ULPCONTEXT = (uint16_t)fct_cmd->cmd_rxid;
3746 	iocb->ULPIOTAG = (uint16_t)iotag;
3747 	iocb->ULPRSVDBYTE = ((timeout > 0xff) ? 0 : timeout);
3748 	iocb->ULPOWNER = OWN_CHIP;
3749 	iocb->ULPCLASS = cmd_sbp->class;
3750 
3751 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
3752 	iocb->un.fcpt64.fcpt_Offset = dbuf->db_relative_offset;
3753 
3754 	if (fct_task->task_flags & TF_WRITE_DATA) {
3755 		iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
3756 	} else {	/* TF_READ_DATA */
3757 
3758 		iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
3759 
3760 		if ((hba->sli_mode == EMLXS_HBA_SLI3_MODE) &&
3761 		    (dbuf->db_data_size >=
3762 		    fct_task->task_expected_xfer_length)) {
3763 			iocb->ULPCT = 0x1;
3764 			/* enable auto-rsp AP feature */
3765 		}
3766 	}
3767 
3768 	return (IOERR_SUCCESS);
3769 
3770 } /* emlxs_sli3_prep_fct_iocb() */
3771 #endif /* SFCT_SUPPORT */
3772 
3773 /* ARGSUSED */
3774 static uint32_t
emlxs_sli3_prep_fcp_iocb(emlxs_port_t * port,emlxs_buf_t * sbp,int channel)3775 emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
3776 {
3777 	emlxs_hba_t *hba = HBA;
3778 	fc_packet_t *pkt;
3779 	CHANNEL *cp;
3780 	IOCBQ *iocbq;
3781 	IOCB *iocb;
3782 	NODELIST *ndlp;
3783 	uint16_t iotag;
3784 	uint32_t did;
3785 
3786 	pkt = PRIV2PKT(sbp);
3787 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3788 	cp = &hba->chan[FC_FCP_RING];
3789 
3790 	iocbq = &sbp->iocbq;
3791 	iocb = &iocbq->iocb;
3792 
3793 	/* Find target node object */
3794 	ndlp = (NODELIST *)iocbq->node;
3795 
3796 	/* Get the iotag by registering the packet */
3797 	iotag = emlxs_register_pkt(cp, sbp);
3798 
3799 	if (!iotag) {
3800 		/*
3801 		 * No more command slots available, retry later
3802 		 */
3803 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3804 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3805 
3806 		return (FC_TRAN_BUSY);
3807 	}
3808 
3809 	/* Initalize iocbq */
3810 	iocbq->port = (void *) port;
3811 	iocbq->channel = (void *) cp;
3812 
3813 	/* Indicate this is a FCP cmd */
3814 	iocbq->flag |= IOCB_FCP_CMD;
3815 
3816 	if (emlxs_bde_setup(port, sbp)) {
3817 		/* Unregister the packet */
3818 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3819 
3820 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3821 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3822 
3823 		return (FC_TRAN_BUSY);
3824 	}
3825 	/* Point of no return */
3826 
3827 	/* Initalize iocb */
3828 	iocb->ULPCONTEXT = ndlp->nlp_Rpi;
3829 	iocb->ULPIOTAG = iotag;
3830 	iocb->ULPRSVDBYTE =
3831 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3832 	iocb->ULPOWNER = OWN_CHIP;
3833 
3834 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3835 	case FC_TRAN_CLASS1:
3836 		iocb->ULPCLASS = CLASS1;
3837 		break;
3838 	case FC_TRAN_CLASS2:
3839 		iocb->ULPCLASS = CLASS2;
3840 		/* iocb->ULPCLASS = CLASS3; */
3841 		break;
3842 	case FC_TRAN_CLASS3:
3843 	default:
3844 		iocb->ULPCLASS = CLASS3;
3845 		break;
3846 	}
3847 
3848 	/* if device is FCP-2 device, set the following bit */
3849 	/* that says to run the FC-TAPE protocol. */
3850 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
3851 		iocb->ULPFCP2RCVY = 1;
3852 	}
3853 
3854 	if (pkt->pkt_datalen == 0) {
3855 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
3856 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
3857 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
3858 		iocb->ULPPU = PARM_XFER_CHECK;
3859 		iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen;
3860 	} else {
3861 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
3862 	}
3863 
3864 	return (FC_SUCCESS);
3865 
3866 } /* emlxs_sli3_prep_fcp_iocb() */
3867 
3868 
3869 static uint32_t
emlxs_sli3_prep_ip_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)3870 emlxs_sli3_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3871 {
3872 	emlxs_hba_t *hba = HBA;
3873 	fc_packet_t *pkt;
3874 	IOCBQ *iocbq;
3875 	IOCB *iocb;
3876 	CHANNEL *cp;
3877 	NODELIST *ndlp;
3878 	uint16_t iotag;
3879 	uint32_t did;
3880 
3881 	pkt = PRIV2PKT(sbp);
3882 	cp = &hba->chan[FC_IP_RING];
3883 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3884 
3885 	iocbq = &sbp->iocbq;
3886 	iocb = &iocbq->iocb;
3887 	ndlp = (NODELIST *)iocbq->node;
3888 
3889 	/* Get the iotag by registering the packet */
3890 	iotag = emlxs_register_pkt(cp, sbp);
3891 
3892 	if (!iotag) {
3893 		/*
3894 		 * No more command slots available, retry later
3895 		 */
3896 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3897 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3898 
3899 		return (FC_TRAN_BUSY);
3900 	}
3901 
3902 	/* Initalize iocbq */
3903 	iocbq->port = (void *) port;
3904 	iocbq->channel = (void *) cp;
3905 
3906 	if (emlxs_bde_setup(port, sbp)) {
3907 		/* Unregister the packet */
3908 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3909 
3910 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3911 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3912 
3913 		return (FC_TRAN_BUSY);
3914 	}
3915 	/* Point of no return */
3916 
3917 	/* Initalize iocb */
3918 	iocb->un.xseq64.w5.hcsw.Fctl = 0;
3919 
3920 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) {
3921 		iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ;
3922 	}
3923 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3924 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
3925 	}
3926 
3927 	/* network headers */
3928 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
3929 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
3930 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
3931 
3932 	iocb->ULPIOTAG = iotag;
3933 	iocb->ULPRSVDBYTE =
3934 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3935 	iocb->ULPOWNER = OWN_CHIP;
3936 
3937 	if (pkt->pkt_tran_type == FC_PKT_BROADCAST) {
3938 		HBASTATS.IpBcastIssued++;
3939 
3940 		iocb->ULPCOMMAND = CMD_XMIT_BCAST64_CN;
3941 		iocb->ULPCONTEXT = 0;
3942 
3943 		if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
3944 			if (hba->topology != TOPOLOGY_LOOP) {
3945 				iocb->ULPCT = 0x1;
3946 			}
3947 			iocb->ULPCONTEXT = port->vpi;
3948 		}
3949 	} else {
3950 		HBASTATS.IpSeqIssued++;
3951 
3952 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3953 		iocb->ULPCONTEXT = ndlp->nlp_Xri;
3954 	}
3955 
3956 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3957 	case FC_TRAN_CLASS1:
3958 		iocb->ULPCLASS = CLASS1;
3959 		break;
3960 	case FC_TRAN_CLASS2:
3961 		iocb->ULPCLASS = CLASS2;
3962 		break;
3963 	case FC_TRAN_CLASS3:
3964 	default:
3965 		iocb->ULPCLASS = CLASS3;
3966 		break;
3967 	}
3968 
3969 	return (FC_SUCCESS);
3970 
3971 } /* emlxs_sli3_prep_ip_iocb() */
3972 
3973 
3974 static uint32_t
emlxs_sli3_prep_els_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)3975 emlxs_sli3_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3976 {
3977 	emlxs_hba_t *hba = HBA;
3978 	fc_packet_t *pkt;
3979 	IOCBQ *iocbq;
3980 	IOCB *iocb;
3981 	CHANNEL *cp;
3982 	uint16_t iotag;
3983 	uint32_t did;
3984 	uint32_t cmd;
3985 
3986 	pkt = PRIV2PKT(sbp);
3987 	cp = &hba->chan[FC_ELS_RING];
3988 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3989 
3990 	iocbq = &sbp->iocbq;
3991 	iocb = &iocbq->iocb;
3992 
3993 
3994 	/* Get the iotag by registering the packet */
3995 	iotag = emlxs_register_pkt(cp, sbp);
3996 
3997 	if (!iotag) {
3998 		/*
3999 		 * No more command slots available, retry later
4000 		 */
4001 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4002 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
4003 
4004 		return (FC_TRAN_BUSY);
4005 	}
4006 	/* Initalize iocbq */
4007 	iocbq->port = (void *) port;
4008 	iocbq->channel = (void *) cp;
4009 
4010 	if (emlxs_bde_setup(port, sbp)) {
4011 		/* Unregister the packet */
4012 		(void) emlxs_unregister_pkt(cp, iotag, 0);
4013 
4014 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4015 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
4016 
4017 		return (FC_TRAN_BUSY);
4018 	}
4019 	/* Point of no return */
4020 
4021 	/* Initalize iocb */
4022 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4023 		/* ELS Response */
4024 		iocb->ULPCONTEXT = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id;
4025 		iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
4026 	} else {
4027 		/* ELS Request */
4028 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
4029 		iocb->ULPCONTEXT =
4030 		    (did == BCAST_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0;
4031 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
4032 
4033 		if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
4034 			if (hba->topology != TOPOLOGY_LOOP) {
4035 				cmd = *((uint32_t *)pkt->pkt_cmd);
4036 				cmd &= ELS_CMD_MASK;
4037 
4038 				if ((cmd == ELS_CMD_FLOGI) ||
4039 				    (cmd == ELS_CMD_FDISC)) {
4040 					iocb->ULPCT = 0x2;
4041 				} else {
4042 					iocb->ULPCT = 0x1;
4043 				}
4044 			}
4045 			iocb->ULPCONTEXT = port->vpi;
4046 		}
4047 	}
4048 	iocb->ULPIOTAG = iotag;
4049 	iocb->ULPRSVDBYTE =
4050 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4051 	iocb->ULPOWNER = OWN_CHIP;
4052 
4053 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4054 	case FC_TRAN_CLASS1:
4055 		iocb->ULPCLASS = CLASS1;
4056 		break;
4057 	case FC_TRAN_CLASS2:
4058 		iocb->ULPCLASS = CLASS2;
4059 		break;
4060 	case FC_TRAN_CLASS3:
4061 	default:
4062 		iocb->ULPCLASS = CLASS3;
4063 		break;
4064 	}
4065 	sbp->class = iocb->ULPCLASS;
4066 
4067 	return (FC_SUCCESS);
4068 
4069 } /* emlxs_sli3_prep_els_iocb() */
4070 
4071 
4072 static uint32_t
emlxs_sli3_prep_ct_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)4073 emlxs_sli3_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4074 {
4075 	emlxs_hba_t *hba = HBA;
4076 	fc_packet_t *pkt;
4077 	IOCBQ *iocbq;
4078 	IOCB *iocb;
4079 	CHANNEL *cp;
4080 	NODELIST *ndlp;
4081 	uint16_t iotag;
4082 	uint32_t did;
4083 
4084 	pkt = PRIV2PKT(sbp);
4085 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4086 	cp = &hba->chan[FC_CT_RING];
4087 
4088 	iocbq = &sbp->iocbq;
4089 	iocb = &iocbq->iocb;
4090 	ndlp = (NODELIST *)iocbq->node;
4091 
4092 	/* Get the iotag by registering the packet */
4093 	iotag = emlxs_register_pkt(cp, sbp);
4094 
4095 	if (!iotag) {
4096 		/*
4097 		 * No more command slots available, retry later
4098 		 */
4099 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4100 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
4101 
4102 		return (FC_TRAN_BUSY);
4103 	}
4104 
4105 	if (emlxs_bde_setup(port, sbp)) {
4106 		/* Unregister the packet */
4107 		(void) emlxs_unregister_pkt(cp, iotag, 0);
4108 
4109 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4110 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
4111 
4112 		return (FC_TRAN_BUSY);
4113 	}
4114 
4115 	/* Point of no return */
4116 
4117 	/* Initalize iocbq */
4118 	iocbq->port = (void *) port;
4119 	iocbq->channel = (void *) cp;
4120 
4121 	/* Fill in rest of iocb */
4122 	iocb->un.genreq64.w5.hcsw.Fctl = LA;
4123 
4124 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
4125 		iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
4126 	}
4127 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
4128 		iocb->un.genreq64.w5.hcsw.Fctl |= SI;
4129 	}
4130 
4131 	/* Initalize iocb */
4132 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4133 		/* CT Response */
4134 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
4135 		iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
4136 		iocb->ULPCONTEXT  = pkt->pkt_cmd_fhdr.rx_id;
4137 	} else {
4138 		/* CT Request */
4139 		iocb->ULPCOMMAND  = CMD_GEN_REQUEST64_CR;
4140 		iocb->un.genreq64.w5.hcsw.Dfctl = 0;
4141 		iocb->ULPCONTEXT  = ndlp->nlp_Rpi;
4142 	}
4143 
4144 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
4145 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
4146 
4147 	iocb->ULPIOTAG    = iotag;
4148 	iocb->ULPRSVDBYTE =
4149 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4150 	iocb->ULPOWNER    = OWN_CHIP;
4151 
4152 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4153 	case FC_TRAN_CLASS1:
4154 		iocb->ULPCLASS = CLASS1;
4155 		break;
4156 	case FC_TRAN_CLASS2:
4157 		iocb->ULPCLASS = CLASS2;
4158 		break;
4159 	case FC_TRAN_CLASS3:
4160 	default:
4161 		iocb->ULPCLASS = CLASS3;
4162 		break;
4163 	}
4164 
4165 	return (FC_SUCCESS);
4166 
4167 } /* emlxs_sli3_prep_ct_iocb() */
4168 
4169 
4170 #ifdef SFCT_SUPPORT
4171 static uint32_t
emlxs_fct_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)4172 emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4173 {
4174 	emlxs_hba_t *hba = HBA;
4175 	uint32_t rval;
4176 
4177 	if (sbp->fct_buf->db_sglist_length != 1) {
4178 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
4179 		    "fct_bde_setup: Only 1 sglist entry supported: %d",
4180 		    sbp->fct_buf->db_sglist_length);
4181 		return (1);
4182 	}
4183 
4184 	if (hba->sli_mode < EMLXS_HBA_SLI3_MODE) {
4185 		rval = emlxs_sli2_fct_bde_setup(port, sbp);
4186 	} else {
4187 		rval = emlxs_sli3_fct_bde_setup(port, sbp);
4188 	}
4189 
4190 	return (rval);
4191 
4192 } /* emlxs_fct_bde_setup() */
4193 #endif /* SFCT_SUPPORT */
4194 
4195 
4196 static uint32_t
emlxs_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)4197 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4198 {
4199 	uint32_t	rval;
4200 	emlxs_hba_t	*hba = HBA;
4201 
4202 	if (hba->sli_mode < EMLXS_HBA_SLI3_MODE) {
4203 		rval = emlxs_sli2_bde_setup(port, sbp);
4204 	} else {
4205 		rval = emlxs_sli3_bde_setup(port, sbp);
4206 	}
4207 
4208 	return (rval);
4209 
4210 } /* emlxs_bde_setup() */
4211 
4212 
4213 static void
emlxs_sli3_poll_intr(emlxs_hba_t * hba)4214 emlxs_sli3_poll_intr(emlxs_hba_t *hba)
4215 {
4216 	uint32_t ha_copy;
4217 
4218 	/* Check attention bits once and process if required */
4219 
4220 	ha_copy = emlxs_check_attention(hba);
4221 
4222 	if (ha_copy == 0) {
4223 		return;
4224 	}
4225 
4226 	mutex_enter(&EMLXS_PORT_LOCK);
4227 	ha_copy = emlxs_get_attention(hba, -1);
4228 	mutex_exit(&EMLXS_PORT_LOCK);
4229 
4230 	emlxs_proc_attention(hba, ha_copy);
4231 
4232 	return;
4233 
4234 } /* emlxs_sli3_poll_intr() */
4235 
4236 
4237 #ifdef MSI_SUPPORT
4238 static uint32_t
emlxs_sli3_msi_intr(char * arg1,char * arg2)4239 emlxs_sli3_msi_intr(char *arg1, char *arg2)
4240 {
4241 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4242 #ifdef FMA_SUPPORT
4243 	emlxs_port_t *port = &PPORT;
4244 #endif  /* FMA_SUPPORT */
4245 	uint16_t msgid;
4246 	uint32_t hc_copy;
4247 	uint32_t ha_copy;
4248 	uint32_t restore = 0;
4249 
4250 	/*
4251 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
4252 	 * "sli3_msi_intr: arg1=%p arg2=%p", arg1, arg2);
4253 	 */
4254 
4255 	/* Check for legacy interrupt handling */
4256 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4257 		mutex_enter(&EMLXS_PORT_LOCK);
4258 
4259 		if (hba->flag & FC_OFFLINE_MODE) {
4260 			mutex_exit(&EMLXS_PORT_LOCK);
4261 
4262 			if (hba->bus_type == SBUS_FC) {
4263 				return (DDI_INTR_CLAIMED);
4264 			} else {
4265 				return (DDI_INTR_UNCLAIMED);
4266 			}
4267 		}
4268 
4269 		/* Get host attention bits */
4270 		ha_copy = emlxs_get_attention(hba, -1);
4271 
4272 		if (ha_copy == 0) {
4273 			if (hba->intr_unclaimed) {
4274 				mutex_exit(&EMLXS_PORT_LOCK);
4275 				return (DDI_INTR_UNCLAIMED);
4276 			}
4277 
4278 			hba->intr_unclaimed = 1;
4279 		} else {
4280 			hba->intr_unclaimed = 0;
4281 		}
4282 
4283 		mutex_exit(&EMLXS_PORT_LOCK);
4284 
4285 		/* Process the interrupt */
4286 		emlxs_proc_attention(hba, ha_copy);
4287 
4288 		return (DDI_INTR_CLAIMED);
4289 	}
4290 
4291 	/* DDI_INTR_TYPE_MSI  */
4292 	/* DDI_INTR_TYPE_MSIX */
4293 
4294 	/* Get MSI message id */
4295 	msgid = (uint16_t)((unsigned long)arg2);
4296 
4297 	/* Validate the message id */
4298 	if (msgid >= hba->intr_count) {
4299 		msgid = 0;
4300 	}
4301 
4302 	mutex_enter(&EMLXS_INTR_LOCK(msgid));
4303 
4304 	mutex_enter(&EMLXS_PORT_LOCK);
4305 
4306 	/* Check if adapter is offline */
4307 	if (hba->flag & FC_OFFLINE_MODE) {
4308 		mutex_exit(&EMLXS_PORT_LOCK);
4309 		mutex_exit(&EMLXS_INTR_LOCK(msgid));
4310 
4311 		/* Always claim an MSI interrupt */
4312 		return (DDI_INTR_CLAIMED);
4313 	}
4314 
4315 	/* Disable interrupts associated with this msgid */
4316 	if (msgid == 0 && (hba->model_info.chip == EMLXS_ZEPHYR_CHIP)) {
4317 		hc_copy = hba->sli.sli3.hc_copy & ~hba->intr_mask;
4318 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hc_copy);
4319 		restore = 1;
4320 	}
4321 
4322 	/* Get host attention bits */
4323 	ha_copy = emlxs_get_attention(hba, msgid);
4324 
4325 	mutex_exit(&EMLXS_PORT_LOCK);
4326 
4327 	/* Process the interrupt */
4328 	emlxs_proc_attention(hba, ha_copy);
4329 
4330 	/* Restore interrupts */
4331 	if (restore) {
4332 		mutex_enter(&EMLXS_PORT_LOCK);
4333 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
4334 #ifdef FMA_SUPPORT
4335 		/* Access handle validation */
4336 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4337 #endif  /* FMA_SUPPORT */
4338 		mutex_exit(&EMLXS_PORT_LOCK);
4339 	}
4340 
4341 	mutex_exit(&EMLXS_INTR_LOCK(msgid));
4342 
4343 	return (DDI_INTR_CLAIMED);
4344 
4345 } /* emlxs_sli3_msi_intr() */
4346 #endif /* MSI_SUPPORT */
4347 
4348 
4349 static int
emlxs_sli3_intx_intr(char * arg)4350 emlxs_sli3_intx_intr(char *arg)
4351 {
4352 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4353 	uint32_t ha_copy = 0;
4354 
4355 	mutex_enter(&EMLXS_PORT_LOCK);
4356 
4357 	if (hba->flag & FC_OFFLINE_MODE) {
4358 		mutex_exit(&EMLXS_PORT_LOCK);
4359 
4360 		if (hba->bus_type == SBUS_FC) {
4361 			return (DDI_INTR_CLAIMED);
4362 		} else {
4363 			return (DDI_INTR_UNCLAIMED);
4364 		}
4365 	}
4366 
4367 	/* Get host attention bits */
4368 	ha_copy = emlxs_get_attention(hba, -1);
4369 
4370 	if (ha_copy == 0) {
4371 		if (hba->intr_unclaimed) {
4372 			mutex_exit(&EMLXS_PORT_LOCK);
4373 			return (DDI_INTR_UNCLAIMED);
4374 		}
4375 
4376 		hba->intr_unclaimed = 1;
4377 	} else {
4378 		hba->intr_unclaimed = 0;
4379 	}
4380 
4381 	mutex_exit(&EMLXS_PORT_LOCK);
4382 
4383 	/* Process the interrupt */
4384 	emlxs_proc_attention(hba, ha_copy);
4385 
4386 	return (DDI_INTR_CLAIMED);
4387 
4388 } /* emlxs_sli3_intx_intr() */
4389 
4390 
4391 /* EMLXS_PORT_LOCK must be held when call this routine */
4392 static uint32_t
emlxs_get_attention(emlxs_hba_t * hba,int32_t msgid)4393 emlxs_get_attention(emlxs_hba_t *hba, int32_t msgid)
4394 {
4395 #ifdef FMA_SUPPORT
4396 	emlxs_port_t *port = &PPORT;
4397 #endif  /* FMA_SUPPORT */
4398 	uint32_t ha_copy = 0;
4399 	uint32_t ha_copy2;
4400 	uint32_t mask = hba->sli.sli3.hc_copy;
4401 
4402 #ifdef MSI_SUPPORT
4403 
4404 read_ha_register:
4405 
4406 	/* Check for default MSI interrupt */
4407 	if (msgid == 0) {
4408 		/* Read host attention register to determine interrupt source */
4409 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4410 
4411 		/* Filter out MSI non-default attention bits */
4412 		ha_copy2 &= ~(hba->intr_cond);
4413 	}
4414 
4415 	/* Check for polled or fixed type interrupt */
4416 	else if (msgid == -1) {
4417 		/* Read host attention register to determine interrupt source */
4418 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4419 	}
4420 
4421 	/* Otherwise, assume a mapped MSI interrupt */
4422 	else {
4423 		/* Convert MSI msgid to mapped attention bits */
4424 		ha_copy2 = hba->intr_map[msgid];
4425 	}
4426 
4427 #else /* !MSI_SUPPORT */
4428 
4429 	/* Read host attention register to determine interrupt source */
4430 	ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4431 
4432 #endif /* MSI_SUPPORT */
4433 
4434 	/* Check if Hardware error interrupt is enabled */
4435 	if ((ha_copy2 & HA_ERATT) && !(mask & HC_ERINT_ENA)) {
4436 		ha_copy2 &= ~HA_ERATT;
4437 	}
4438 
4439 	/* Check if link interrupt is enabled */
4440 	if ((ha_copy2 & HA_LATT) && !(mask & HC_LAINT_ENA)) {
4441 		ha_copy2 &= ~HA_LATT;
4442 	}
4443 
4444 	/* Check if Mailbox interrupt is enabled */
4445 	if ((ha_copy2 & HA_MBATT) && !(mask & HC_MBINT_ENA)) {
4446 		ha_copy2 &= ~HA_MBATT;
4447 	}
4448 
4449 	/* Check if ring0 interrupt is enabled */
4450 	if ((ha_copy2 & HA_R0ATT) && !(mask & HC_R0INT_ENA)) {
4451 		ha_copy2 &= ~HA_R0ATT;
4452 	}
4453 
4454 	/* Check if ring1 interrupt is enabled */
4455 	if ((ha_copy2 & HA_R1ATT) && !(mask & HC_R1INT_ENA)) {
4456 		ha_copy2 &= ~HA_R1ATT;
4457 	}
4458 
4459 	/* Check if ring2 interrupt is enabled */
4460 	if ((ha_copy2 & HA_R2ATT) && !(mask & HC_R2INT_ENA)) {
4461 		ha_copy2 &= ~HA_R2ATT;
4462 	}
4463 
4464 	/* Check if ring3 interrupt is enabled */
4465 	if ((ha_copy2 & HA_R3ATT) && !(mask & HC_R3INT_ENA)) {
4466 		ha_copy2 &= ~HA_R3ATT;
4467 	}
4468 
4469 	/* Accumulate attention bits */
4470 	ha_copy |= ha_copy2;
4471 
4472 	/* Clear attentions except for error, link, and autoclear(MSIX) */
4473 	ha_copy2 &= ~(HA_ERATT | HA_LATT);	/* | hba->intr_autoClear */
4474 
4475 	if (ha_copy2) {
4476 		WRITE_CSR_REG(hba, FC_HA_REG(hba), ha_copy2);
4477 	}
4478 
4479 #ifdef FMA_SUPPORT
4480 	/* Access handle validation */
4481 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4482 #endif  /* FMA_SUPPORT */
4483 
4484 	return (ha_copy);
4485 
4486 } /* emlxs_get_attention() */
4487 
4488 
4489 static void
emlxs_proc_attention(emlxs_hba_t * hba,uint32_t ha_copy)4490 emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy)
4491 {
4492 #ifdef FMA_SUPPORT
4493 	emlxs_port_t *port = &PPORT;
4494 #endif  /* FMA_SUPPORT */
4495 
4496 	/* ha_copy should be pre-filtered */
4497 
4498 	/*
4499 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4500 	 * "proc_attention: ha_copy=%x", ha_copy);
4501 	 */
4502 
4503 	if (hba->state < FC_WARM_START) {
4504 		return;
4505 	}
4506 
4507 	if (!ha_copy) {
4508 		return;
4509 	}
4510 
4511 	if (hba->bus_type == SBUS_FC) {
4512 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
4513 	}
4514 
4515 	/* Adapter error */
4516 	if (ha_copy & HA_ERATT) {
4517 		HBASTATS.IntrEvent[6]++;
4518 		emlxs_handle_ff_error(hba);
4519 		return;
4520 	}
4521 
4522 	/* Mailbox interrupt */
4523 	if (ha_copy & HA_MBATT) {
4524 		HBASTATS.IntrEvent[5]++;
4525 		(void) emlxs_handle_mb_event(hba);
4526 	}
4527 
4528 	/* Link Attention interrupt */
4529 	if (ha_copy & HA_LATT) {
4530 		HBASTATS.IntrEvent[4]++;
4531 		emlxs_sli3_handle_link_event(hba);
4532 	}
4533 
4534 	/* event on ring 0 - FCP Ring */
4535 	if (ha_copy & HA_R0ATT) {
4536 		HBASTATS.IntrEvent[0]++;
4537 		emlxs_sli3_handle_ring_event(hba, 0, ha_copy);
4538 	}
4539 
4540 	/* event on ring 1 - IP Ring */
4541 	if (ha_copy & HA_R1ATT) {
4542 		HBASTATS.IntrEvent[1]++;
4543 		emlxs_sli3_handle_ring_event(hba, 1, ha_copy);
4544 	}
4545 
4546 	/* event on ring 2 - ELS Ring */
4547 	if (ha_copy & HA_R2ATT) {
4548 		HBASTATS.IntrEvent[2]++;
4549 		emlxs_sli3_handle_ring_event(hba, 2, ha_copy);
4550 	}
4551 
4552 	/* event on ring 3 - CT Ring */
4553 	if (ha_copy & HA_R3ATT) {
4554 		HBASTATS.IntrEvent[3]++;
4555 		emlxs_sli3_handle_ring_event(hba, 3, ha_copy);
4556 	}
4557 
4558 	if (hba->bus_type == SBUS_FC) {
4559 		WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), SBUS_STAT_IP);
4560 	}
4561 
4562 	/* Set heartbeat flag to show activity */
4563 	hba->heartbeat_flag = 1;
4564 
4565 #ifdef FMA_SUPPORT
4566 	if (hba->bus_type == SBUS_FC) {
4567 		/* Access handle validation */
4568 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
4569 	}
4570 #endif  /* FMA_SUPPORT */
4571 
4572 	return;
4573 
4574 } /* emlxs_proc_attention() */
4575 
4576 
4577 /*
4578  * emlxs_handle_ff_error()
4579  *
4580  *    Description: Processes a FireFly error
4581  *    Runs at Interrupt level
4582  */
4583 static void
emlxs_handle_ff_error(emlxs_hba_t * hba)4584 emlxs_handle_ff_error(emlxs_hba_t *hba)
4585 {
4586 	emlxs_port_t *port = &PPORT;
4587 	uint32_t status;
4588 	uint32_t status1;
4589 	uint32_t status2;
4590 	int i = 0;
4591 
4592 	/* do what needs to be done, get error from STATUS REGISTER */
4593 	status = READ_CSR_REG(hba, FC_HS_REG(hba));
4594 
4595 	/* Clear Chip error bit */
4596 	WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_ERATT);
4597 
4598 	/* If HS_FFER1 is set, then wait until the HS_FFER1 bit clears */
4599 	if (status & HS_FFER1) {
4600 
4601 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4602 		    "HS_FFER1 received");
4603 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4604 		(void) emlxs_offline(hba, 1);
4605 		while ((status & HS_FFER1) && (i < 300)) {
4606 			status =
4607 			    READ_CSR_REG(hba, FC_HS_REG(hba));
4608 			BUSYWAIT_MS(1000);
4609 			i++;
4610 		}
4611 	}
4612 
4613 	if (i == 300) {
4614 		/* 5 minutes is up, shutdown HBA */
4615 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4616 		    "HS_FFER1 clear timeout");
4617 
4618 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4619 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
4620 
4621 		goto done;
4622 	}
4623 
4624 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4625 	    "HS_FFER1 cleared");
4626 
4627 	if (status & HS_OVERTEMP) {
4628 		status1 =
4629 		    READ_SLIM_ADDR(hba,
4630 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xb0));
4631 
4632 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4633 		    "Maximum adapter temperature exceeded (%d �C).", status1);
4634 
4635 		hba->temperature = status1;
4636 		hba->flag |= FC_OVERTEMP_EVENT;
4637 
4638 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4639 		emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4640 		    NULL, NULL);
4641 
4642 	} else {
4643 		status1 =
4644 		    READ_SLIM_ADDR(hba,
4645 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xa8));
4646 		status2 =
4647 		    READ_SLIM_ADDR(hba,
4648 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xac));
4649 
4650 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4651 		    "Host Error Attention: "
4652 		    "status=0x%x status1=0x%x status2=0x%x",
4653 		    status, status1, status2);
4654 
4655 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4656 
4657 		if (status & HS_FFER6) {
4658 			emlxs_thread_spawn(hba, emlxs_restart_thread,
4659 			    NULL, NULL);
4660 		} else {
4661 			emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4662 			    NULL, NULL);
4663 		}
4664 	}
4665 
4666 done:
4667 #ifdef FMA_SUPPORT
4668 	/* Access handle validation */
4669 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
4670 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4671 #endif  /* FMA_SUPPORT */
4672 
4673 	return;
4674 
4675 } /* emlxs_handle_ff_error() */
4676 
4677 
4678 /*
4679  *  emlxs_sli3_handle_link_event()
4680  *
4681  *    Description: Process a Link Attention.
4682  */
4683 static void
emlxs_sli3_handle_link_event(emlxs_hba_t * hba)4684 emlxs_sli3_handle_link_event(emlxs_hba_t *hba)
4685 {
4686 	emlxs_port_t *port = &PPORT;
4687 	MAILBOXQ *mbq;
4688 	int rc;
4689 
4690 	HBASTATS.LinkEvent++;
4691 
4692 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_event_msg, "event=%x",
4693 	    HBASTATS.LinkEvent);
4694 
4695 	/* Make sure link is declared down */
4696 	emlxs_linkdown(hba);
4697 
4698 	/* Get a buffer which will be used for mailbox commands */
4699 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
4700 		/* Get link attention message */
4701 		if (emlxs_mb_read_la(hba, mbq) == 0) {
4702 			rc =  emlxs_sli3_issue_mbox_cmd(hba, mbq,
4703 			    MBX_NOWAIT, 0);
4704 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4705 				emlxs_mem_put(hba, MEM_MBOX,
4706 				    (void *)mbq);
4707 			}
4708 
4709 			mutex_enter(&EMLXS_PORT_LOCK);
4710 
4711 			/*
4712 			 * Clear Link Attention in HA REG
4713 			 */
4714 			WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_LATT);
4715 
4716 #ifdef FMA_SUPPORT
4717 			/* Access handle validation */
4718 			EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4719 #endif  /* FMA_SUPPORT */
4720 
4721 			mutex_exit(&EMLXS_PORT_LOCK);
4722 		} else {
4723 			emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4724 		}
4725 	}
4726 
4727 } /* emlxs_sli3_handle_link_event()  */
4728 
4729 
4730 /*
4731  *  emlxs_sli3_handle_ring_event()
4732  *
4733  *    Description: Process a Ring Attention.
4734  */
4735 static void
emlxs_sli3_handle_ring_event(emlxs_hba_t * hba,int32_t ring_no,uint32_t ha_copy)4736 emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
4737     uint32_t ha_copy)
4738 {
4739 	emlxs_port_t *port = &PPORT;
4740 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
4741 	CHANNEL *cp;
4742 	RING *rp;
4743 	IOCB *entry;
4744 	IOCBQ *iocbq;
4745 	IOCBQ local_iocbq;
4746 	PGP *pgp;
4747 	uint32_t count;
4748 	volatile uint32_t chipatt;
4749 	void *ioa2;
4750 	uint32_t reg;
4751 	uint32_t channel_no;
4752 	off_t offset;
4753 	IOCBQ *rsp_head = NULL;
4754 	IOCBQ *rsp_tail = NULL;
4755 	emlxs_buf_t *sbp = NULL;
4756 
4757 	count = 0;
4758 	rp = &hba->sli.sli3.ring[ring_no];
4759 	cp = rp->channelp;
4760 	channel_no = cp->channelno;
4761 
4762 	/*
4763 	 * Isolate this ring's host attention bits
4764 	 * This makes all ring attention bits equal
4765 	 * to Ring0 attention bits
4766 	 */
4767 	reg = (ha_copy >> (ring_no * 4)) & 0x0f;
4768 
4769 	/*
4770 	 * Gather iocb entries off response ring.
4771 	 * Ensure entry is owned by the host.
4772 	 */
4773 	pgp = (PGP *)&slim2p->mbx.us.s2.port[ring_no];
4774 	offset =
4775 	    (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx)) -
4776 	    (uint64_t)((unsigned long)slim2p));
4777 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
4778 	    DDI_DMA_SYNC_FORKERNEL);
4779 	rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
4780 
4781 	/* While ring is not empty */
4782 	while (rp->fc_rspidx != rp->fc_port_rspidx) {
4783 		HBASTATS.IocbReceived[channel_no]++;
4784 
4785 		/* Get the next response ring iocb */
4786 		entry =
4787 		    (IOCB *)(((char *)rp->fc_rspringaddr +
4788 		    (rp->fc_rspidx * hba->sli.sli3.iocb_rsp_size)));
4789 
4790 		/* DMA sync the response ring iocb for the adapter */
4791 		offset = (off_t)((uint64_t)((unsigned long)entry)
4792 		    - (uint64_t)((unsigned long)slim2p));
4793 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
4794 		    hba->sli.sli3.iocb_rsp_size, DDI_DMA_SYNC_FORKERNEL);
4795 
4796 		count++;
4797 
4798 		/* Copy word6 and word7 to local iocb for now */
4799 		iocbq = &local_iocbq;
4800 
4801 		BE_SWAP32_BCOPY((uint8_t *)entry + (sizeof (uint32_t) * 6),
4802 		    (uint8_t *)iocbq + (sizeof (uint32_t) * 6),
4803 		    (sizeof (uint32_t) * 2));
4804 
4805 		/* when LE is not set, entire Command has not been received */
4806 		if (!iocbq->iocb.ULPLE) {
4807 			/* This should never happen */
4808 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_error_msg,
4809 			    "ulpLE is not set. "
4810 			    "ring=%d iotag=%d cmd=%x status=%x",
4811 			    channel_no, iocbq->iocb.ULPIOTAG,
4812 			    iocbq->iocb.ULPCOMMAND, iocbq->iocb.ULPSTATUS);
4813 
4814 			goto next;
4815 		}
4816 
4817 		sbp = NULL;
4818 		switch (iocbq->iocb.ULPCOMMAND) {
4819 #ifdef SFCT_SUPPORT
4820 		case CMD_CLOSE_XRI_CX:
4821 		case CMD_CLOSE_XRI_CN:
4822 		case CMD_ABORT_XRI_CX:
4823 			if (port->mode == MODE_TARGET) {
4824 				sbp = emlxs_unregister_pkt(cp,
4825 				    iocbq->iocb.ULPIOTAG, 0);
4826 			}
4827 			break;
4828 #endif /* SFCT_SUPPORT */
4829 
4830 			/* Ring 0 registered commands */
4831 		case CMD_FCP_ICMND_CR:
4832 		case CMD_FCP_ICMND_CX:
4833 		case CMD_FCP_IREAD_CR:
4834 		case CMD_FCP_IREAD_CX:
4835 		case CMD_FCP_IWRITE_CR:
4836 		case CMD_FCP_IWRITE_CX:
4837 		case CMD_FCP_ICMND64_CR:
4838 		case CMD_FCP_ICMND64_CX:
4839 		case CMD_FCP_IREAD64_CR:
4840 		case CMD_FCP_IREAD64_CX:
4841 		case CMD_FCP_IWRITE64_CR:
4842 		case CMD_FCP_IWRITE64_CX:
4843 #ifdef SFCT_SUPPORT
4844 		case CMD_FCP_TSEND_CX:
4845 		case CMD_FCP_TSEND64_CX:
4846 		case CMD_FCP_TRECEIVE_CX:
4847 		case CMD_FCP_TRECEIVE64_CX:
4848 		case CMD_FCP_TRSP_CX:
4849 		case CMD_FCP_TRSP64_CX:
4850 #endif /* SFCT_SUPPORT */
4851 
4852 			/* Ring 1 registered commands */
4853 		case CMD_XMIT_BCAST_CN:
4854 		case CMD_XMIT_BCAST_CX:
4855 		case CMD_XMIT_SEQUENCE_CX:
4856 		case CMD_XMIT_SEQUENCE_CR:
4857 		case CMD_XMIT_BCAST64_CN:
4858 		case CMD_XMIT_BCAST64_CX:
4859 		case CMD_XMIT_SEQUENCE64_CX:
4860 		case CMD_XMIT_SEQUENCE64_CR:
4861 		case CMD_CREATE_XRI_CR:
4862 		case CMD_CREATE_XRI_CX:
4863 
4864 			/* Ring 2 registered commands */
4865 		case CMD_ELS_REQUEST_CR:
4866 		case CMD_ELS_REQUEST_CX:
4867 		case CMD_XMIT_ELS_RSP_CX:
4868 		case CMD_ELS_REQUEST64_CR:
4869 		case CMD_ELS_REQUEST64_CX:
4870 		case CMD_XMIT_ELS_RSP64_CX:
4871 
4872 			/* Ring 3 registered commands */
4873 		case CMD_GEN_REQUEST64_CR:
4874 		case CMD_GEN_REQUEST64_CX:
4875 
4876 			sbp =
4877 			    emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4878 			break;
4879 		}
4880 
4881 		/* If packet is stale, then drop it. */
4882 		if (sbp == STALE_PACKET) {
4883 			cp->hbaCmplCmd_sbp++;
4884 			/* Copy entry to the local iocbq */
4885 			BE_SWAP32_BCOPY((uint8_t *)entry,
4886 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4887 
4888 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_stale_msg,
4889 			    "channelno=%d iocb=%p cmd=%x status=%x "
4890 			    "error=%x iotag=%d context=%x info=%x",
4891 			    channel_no, iocbq, (uint8_t)iocbq->iocb.ULPCOMMAND,
4892 			    iocbq->iocb.ULPSTATUS,
4893 			    (uint8_t)iocbq->iocb.un.grsp.perr.statLocalError,
4894 			    (uint16_t)iocbq->iocb.ULPIOTAG,
4895 			    (uint16_t)iocbq->iocb.ULPCONTEXT,
4896 			    (uint8_t)iocbq->iocb.ULPRSVDBYTE);
4897 
4898 			goto next;
4899 		}
4900 
4901 		/*
4902 		 * If a packet was found, then queue the packet's
4903 		 * iocb for deferred processing
4904 		 */
4905 		else if (sbp) {
4906 #ifdef SFCT_SUPPORT
4907 			fct_cmd_t *fct_cmd;
4908 			emlxs_buf_t *cmd_sbp;
4909 
4910 			fct_cmd = sbp->fct_cmd;
4911 			if (fct_cmd) {
4912 				cmd_sbp =
4913 				    (emlxs_buf_t *)fct_cmd->cmd_fca_private;
4914 				mutex_enter(&cmd_sbp->fct_mtx);
4915 				EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp,
4916 				    EMLXS_FCT_IOCB_COMPLETE);
4917 				mutex_exit(&cmd_sbp->fct_mtx);
4918 			}
4919 #endif /* SFCT_SUPPORT */
4920 			cp->hbaCmplCmd_sbp++;
4921 			atomic_dec_32(&hba->io_active);
4922 #ifdef NODE_THROTTLE_SUPPORT
4923 			if (sbp->node) {
4924 				atomic_dec_32(&sbp->node->io_active);
4925 			}
4926 #endif /* NODE_THROTTLE_SUPPORT */
4927 
4928 			/* Copy entry to sbp's iocbq */
4929 			iocbq = &sbp->iocbq;
4930 			BE_SWAP32_BCOPY((uint8_t *)entry,
4931 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4932 
4933 			iocbq->next = NULL;
4934 
4935 			/*
4936 			 * If this is NOT a polled command completion
4937 			 * or a driver allocated pkt, then defer pkt
4938 			 * completion.
4939 			 */
4940 			if (!(sbp->pkt_flags &
4941 			    (PACKET_POLLED | PACKET_ALLOCATED))) {
4942 				/* Add the IOCB to the local list */
4943 				if (!rsp_head) {
4944 					rsp_head = iocbq;
4945 				} else {
4946 					rsp_tail->next = iocbq;
4947 				}
4948 
4949 				rsp_tail = iocbq;
4950 
4951 				goto next;
4952 			}
4953 		} else {
4954 			cp->hbaCmplCmd++;
4955 			/* Copy entry to the local iocbq */
4956 			BE_SWAP32_BCOPY((uint8_t *)entry,
4957 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4958 
4959 			iocbq->next = NULL;
4960 			iocbq->bp = NULL;
4961 			iocbq->port = &PPORT;
4962 			iocbq->channel = cp;
4963 			iocbq->node = NULL;
4964 			iocbq->sbp = NULL;
4965 			iocbq->flag = 0;
4966 		}
4967 
4968 		/* process the channel event now */
4969 		emlxs_proc_channel_event(hba, cp, iocbq);
4970 
4971 next:
4972 		/* Increment the driver's local response get index */
4973 		if (++rp->fc_rspidx >= rp->fc_numRiocb) {
4974 			rp->fc_rspidx = 0;
4975 		}
4976 
4977 	}	/* while (TRUE) */
4978 
4979 	if (rsp_head) {
4980 		mutex_enter(&cp->rsp_lock);
4981 		if (cp->rsp_head == NULL) {
4982 			cp->rsp_head = rsp_head;
4983 			cp->rsp_tail = rsp_tail;
4984 		} else {
4985 			cp->rsp_tail->next = rsp_head;
4986 			cp->rsp_tail = rsp_tail;
4987 		}
4988 		mutex_exit(&cp->rsp_lock);
4989 
4990 		emlxs_thread_trigger2(&cp->intr_thread, emlxs_proc_channel, cp);
4991 	}
4992 
4993 	/* Check if at least one response entry was processed */
4994 	if (count) {
4995 		/* Update response get index for the adapter */
4996 		if (hba->bus_type == SBUS_FC) {
4997 			slim2p->mbx.us.s2.host[channel_no].rspGetInx
4998 			    = BE_SWAP32(rp->fc_rspidx);
4999 
5000 			/* DMA sync the index for the adapter */
5001 			offset = (off_t)
5002 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
5003 			    host[channel_no].rspGetInx))
5004 			    - (uint64_t)((unsigned long)slim2p));
5005 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5006 			    offset, 4, DDI_DMA_SYNC_FORDEV);
5007 		} else {
5008 			ioa2 =
5009 			    (void *)((char *)hba->sli.sli3.slim_addr +
5010 			    hba->sli.sli3.hgp_ring_offset + (((channel_no * 2) +
5011 			    1) * sizeof (uint32_t)));
5012 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
5013 			    rp->fc_rspidx);
5014 #ifdef FMA_SUPPORT
5015 			/* Access handle validation */
5016 			EMLXS_CHK_ACC_HANDLE(hba,
5017 			    hba->sli.sli3.slim_acc_handle);
5018 #endif  /* FMA_SUPPORT */
5019 		}
5020 
5021 		if (reg & HA_R0RE_REQ) {
5022 			/* HBASTATS.chipRingFree++; */
5023 
5024 			mutex_enter(&EMLXS_PORT_LOCK);
5025 
5026 			/* Tell the adapter we serviced the ring */
5027 			chipatt = ((CA_R0ATT | CA_R0RE_RSP) <<
5028 			    (channel_no * 4));
5029 			WRITE_CSR_REG(hba, FC_CA_REG(hba), chipatt);
5030 
5031 #ifdef FMA_SUPPORT
5032 			/* Access handle validation */
5033 			EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5034 #endif  /* FMA_SUPPORT */
5035 
5036 			mutex_exit(&EMLXS_PORT_LOCK);
5037 		}
5038 	}
5039 
5040 	if ((reg & HA_R0CE_RSP) || hba->channel_tx_count) {
5041 		/* HBASTATS.hostRingFree++; */
5042 
5043 		/* Cmd ring may be available. Try sending more iocbs */
5044 		emlxs_sli3_issue_iocb_cmd(hba, cp, 0);
5045 	}
5046 
5047 	/* HBASTATS.ringEvent++; */
5048 
5049 	return;
5050 
5051 } /* emlxs_sli3_handle_ring_event() */
5052 
5053 
5054 extern int
emlxs_handle_rcv_seq(emlxs_hba_t * hba,CHANNEL * cp,IOCBQ * iocbq)5055 emlxs_handle_rcv_seq(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
5056 {
5057 	emlxs_port_t *port = &PPORT;
5058 	IOCB *iocb;
5059 	RING *rp;
5060 	MATCHMAP *mp = NULL;
5061 	uint64_t bdeAddr;
5062 	uint32_t vpi = 0;
5063 	uint32_t channelno;
5064 	uint32_t size = 0;
5065 	uint32_t *RcvError;
5066 	uint32_t *RcvDropped;
5067 	uint32_t *UbPosted;
5068 	emlxs_msg_t *dropped_msg;
5069 	char error_str[64];
5070 	uint32_t buf_type;
5071 	uint32_t *word;
5072 
5073 	channelno = cp->channelno;
5074 	rp = &hba->sli.sli3.ring[channelno];
5075 
5076 	iocb = &iocbq->iocb;
5077 	word = (uint32_t *)iocb;
5078 
5079 	switch (channelno) {
5080 #ifdef SFCT_SUPPORT
5081 	case FC_FCT_RING:
5082 		HBASTATS.FctRingEvent++;
5083 		RcvError = &HBASTATS.FctRingError;
5084 		RcvDropped = &HBASTATS.FctRingDropped;
5085 		UbPosted = &HBASTATS.FctUbPosted;
5086 		dropped_msg = &emlxs_fct_detail_msg;
5087 		buf_type = MEM_FCTBUF;
5088 		break;
5089 #endif /* SFCT_SUPPORT */
5090 
5091 	case FC_IP_RING:
5092 		HBASTATS.IpRcvEvent++;
5093 		RcvError = &HBASTATS.IpDropped;
5094 		RcvDropped = &HBASTATS.IpDropped;
5095 		UbPosted = &HBASTATS.IpUbPosted;
5096 		dropped_msg = &emlxs_unsol_ip_dropped_msg;
5097 		buf_type = MEM_IPBUF;
5098 		break;
5099 
5100 	case FC_ELS_RING:
5101 		HBASTATS.ElsRcvEvent++;
5102 		RcvError = &HBASTATS.ElsRcvError;
5103 		RcvDropped = &HBASTATS.ElsRcvDropped;
5104 		UbPosted = &HBASTATS.ElsUbPosted;
5105 		dropped_msg = &emlxs_unsol_els_dropped_msg;
5106 		buf_type = MEM_ELSBUF;
5107 		break;
5108 
5109 	case FC_CT_RING:
5110 		HBASTATS.CtRcvEvent++;
5111 		RcvError = &HBASTATS.CtRcvError;
5112 		RcvDropped = &HBASTATS.CtRcvDropped;
5113 		UbPosted = &HBASTATS.CtUbPosted;
5114 		dropped_msg = &emlxs_unsol_ct_dropped_msg;
5115 		buf_type = MEM_CTBUF;
5116 		break;
5117 
5118 	default:
5119 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
5120 		    "channel=%d cmd=%x  %s %x %x %x %x",
5121 		    channelno, iocb->ULPCOMMAND,
5122 		    emlxs_state_xlate(iocb->ULPSTATUS), word[4], word[5],
5123 		    word[6], word[7]);
5124 		return (1);
5125 	}
5126 
5127 	if (iocb->ULPSTATUS) {
5128 		if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
5129 		    (iocb->un.grsp.perr.statLocalError ==
5130 		    IOERR_RCV_BUFFER_TIMEOUT)) {
5131 			(void) strlcpy(error_str, "Out of posted buffers:",
5132 			    sizeof (error_str));
5133 			iocb->ULPBDECOUNT = 0;
5134 		} else if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
5135 		    (iocb->un.grsp.perr.statLocalError ==
5136 		    IOERR_RCV_BUFFER_WAITING)) {
5137 			(void) strlcpy(error_str, "Buffer waiting:",
5138 			    sizeof (error_str));
5139 			iocb->ULPBDECOUNT = 0;
5140 			goto done;
5141 		} else if (iocb->ULPSTATUS == IOSTAT_NEED_BUFF_ENTRY) {
5142 			(void) strlcpy(error_str, "Need Buffer Entry:",
5143 			    sizeof (error_str));
5144 			iocb->ULPBDECOUNT = 0;
5145 			goto done;
5146 		} else {
5147 			(void) strlcpy(error_str, "General error:",
5148 			    sizeof (error_str));
5149 		}
5150 
5151 		goto failed;
5152 	}
5153 
5154 	if (hba->flag & FC_HBQ_ENABLED) {
5155 		HBQ_INIT_t *hbq;
5156 		HBQE_t *hbqE;
5157 		uint32_t hbqe_tag;
5158 		uint32_t hbq_id;
5159 
5160 		(*UbPosted)--;
5161 
5162 		hbqE = (HBQE_t *)iocb;
5163 		hbq_id = hbqE->unt.ext.HBQ_tag;
5164 		hbqe_tag = hbqE->unt.ext.HBQE_tag;
5165 
5166 		hbq = &hba->sli.sli3.hbq_table[hbq_id];
5167 
5168 		if (hbqe_tag >= hbq->HBQ_numEntries) {
5169 			(void) snprintf(error_str, sizeof (error_str),
5170 			    "Invalid HBQE iotag=%d:", hbqe_tag);
5171 			goto dropped;
5172 		}
5173 
5174 		mp = hba->sli.sli3.hbq_table[hbq_id].HBQ_PostBufs[hbqe_tag];
5175 
5176 		size = iocb->unsli3.ext_rcv.seq_len;
5177 	} else {
5178 		bdeAddr =
5179 		    PADDR(iocb->un.cont64[0].addrHigh,
5180 		    iocb->un.cont64[0].addrLow);
5181 
5182 		/* Check for invalid buffer */
5183 		if (iocb->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID) {
5184 			(void) strlcpy(error_str, "Invalid buffer:",
5185 			    sizeof (error_str));
5186 			goto dropped;
5187 		}
5188 
5189 		mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr);
5190 
5191 		size = iocb->un.rcvseq64.rcvBde.tus.f.bdeSize;
5192 	}
5193 
5194 	if (!mp) {
5195 		(void) strlcpy(error_str, "Buffer not mapped:",
5196 		    sizeof (error_str));
5197 		goto dropped;
5198 	}
5199 
5200 #ifdef FMA_SUPPORT
5201 	if (mp->dma_handle) {
5202 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
5203 		    != DDI_FM_OK) {
5204 			EMLXS_MSGF(EMLXS_CONTEXT,
5205 			    &emlxs_invalid_dma_handle_msg,
5206 			    "handle_rcv_seq: hdl=%p",
5207 			    mp->dma_handle);
5208 			goto dropped;
5209 		}
5210 	}
5211 #endif  /* FMA_SUPPORT */
5212 
5213 	if (!size) {
5214 		(void) strlcpy(error_str, "Buffer empty:", sizeof (error_str));
5215 		goto dropped;
5216 	}
5217 
5218 	/* To avoid we drop the broadcast packets */
5219 	if (channelno != FC_IP_RING) {
5220 		/* Get virtual port */
5221 		if (hba->flag & FC_NPIV_ENABLED) {
5222 			vpi = iocb->unsli3.ext_rcv.vpi;
5223 			if (vpi >= hba->vpi_max) {
5224 				(void) snprintf(error_str, sizeof (error_str),
5225 				"Invalid VPI=%d:", vpi);
5226 				goto dropped;
5227 			}
5228 
5229 			port = &VPORT(vpi);
5230 		}
5231 	}
5232 
5233 	/* Process request */
5234 	switch (channelno) {
5235 	case FC_FCT_RING:
5236 		if (port->mode == MODE_INITIATOR) {
5237 			(void) strlcpy(error_str, "Target mode disabled:",
5238 			    sizeof (error_str));
5239 			goto dropped;
5240 #ifdef SFCT_SUPPORT
5241 		} else if (port->mode == MODE_TARGET) {
5242 			(void) emlxs_fct_handle_unsol_req(port, cp, iocbq, mp,
5243 			    size);
5244 #endif /* SFCT_SUPPORT */
5245 		} else {
5246 			(void) snprintf(error_str, sizeof (error_str),
5247 			    "Invalid mode=%x:", port->mode);
5248 			goto dropped;
5249 		}
5250 		break;
5251 
5252 	case FC_IP_RING:
5253 		if (port->mode == MODE_INITIATOR) {
5254 			(void) emlxs_ip_handle_unsol_req(port, cp, iocbq,
5255 			    mp, size);
5256 #ifdef SFCT_SUPPORT
5257 		} else if (port->mode == MODE_TARGET) {
5258 			(void) strlcpy(error_str, "Initiator mode disabled:",
5259 			    sizeof (error_str));
5260 			goto dropped;
5261 #endif /* SFCT_SUPPORT */
5262 		} else {
5263 			(void) snprintf(error_str, sizeof (error_str),
5264 			    "Invalid mode=%x:", port->mode);
5265 			goto dropped;
5266 		}
5267 		break;
5268 
5269 	case FC_ELS_RING:
5270 		if (port->mode == MODE_INITIATOR) {
5271 			(void) emlxs_els_handle_unsol_req(port, cp, iocbq, mp,
5272 			    size);
5273 #ifdef SFCT_SUPPORT
5274 		} else if (port->mode == MODE_TARGET) {
5275 			(void) emlxs_fct_handle_unsol_els(port, cp, iocbq, mp,
5276 			    size);
5277 #endif /* SFCT_SUPPORT */
5278 		} else {
5279 			(void) snprintf(error_str, sizeof (error_str),
5280 			    "Invalid mode=%x:", port->mode);
5281 			goto dropped;
5282 		}
5283 		break;
5284 
5285 	case FC_CT_RING:
5286 		(void) emlxs_ct_handle_unsol_req(port, cp, iocbq, mp, size);
5287 		break;
5288 	}
5289 
5290 	goto done;
5291 
5292 dropped:
5293 	(*RcvDropped)++;
5294 
5295 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5296 	    "%s: cmd=%x  %s %x %x %x %x",
5297 	    error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5298 	    word[4], word[5], word[6], word[7]);
5299 
5300 	if (channelno == FC_FCT_RING) {
5301 		uint32_t sid;
5302 
5303 		if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
5304 			emlxs_node_t *ndlp;
5305 			ndlp = emlxs_node_find_rpi(port, iocb->ULPIOTAG);
5306 			if (! ndlp) {
5307 				goto done;
5308 			}
5309 			sid = ndlp->nlp_DID;
5310 		} else {
5311 			sid = iocb->un.ulpWord[4] & 0xFFFFFF;
5312 		}
5313 
5314 		emlxs_send_logo(port, sid);
5315 	}
5316 
5317 	goto done;
5318 
5319 failed:
5320 	(*RcvError)++;
5321 
5322 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5323 	    "%s: cmd=%x %s  %x %x %x %x  hba:%x %x",
5324 	    error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5325 	    word[4], word[5], word[6], word[7], hba->state, hba->flag);
5326 
5327 done:
5328 
5329 	if (hba->flag & FC_HBQ_ENABLED) {
5330 		if (iocb->ULPBDECOUNT) {
5331 			HBQE_t *hbqE;
5332 			uint32_t hbq_id;
5333 
5334 			hbqE = (HBQE_t *)iocb;
5335 			hbq_id = hbqE->unt.ext.HBQ_tag;
5336 
5337 			emlxs_update_HBQ_index(hba, hbq_id);
5338 		}
5339 	} else {
5340 		if (mp) {
5341 			emlxs_mem_put(hba, buf_type, (void *)mp);
5342 		}
5343 
5344 		if (iocb->ULPBDECOUNT) {
5345 			(void) emlxs_post_buffer(hba, rp, 1);
5346 		}
5347 	}
5348 
5349 	return (0);
5350 
5351 } /* emlxs_handle_rcv_seq() */
5352 
5353 
5354 /* EMLXS_CMD_RING_LOCK must be held when calling this function */
5355 static void
emlxs_sli3_issue_iocb(emlxs_hba_t * hba,RING * rp,IOCBQ * iocbq)5356 emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
5357 {
5358 	emlxs_port_t *port;
5359 	IOCB *icmd;
5360 	IOCB *iocb;
5361 	emlxs_buf_t *sbp;
5362 	off_t offset;
5363 	uint32_t ringno;
5364 
5365 	ringno = rp->ringno;
5366 	sbp = iocbq->sbp;
5367 	icmd = &iocbq->iocb;
5368 	port = iocbq->port;
5369 
5370 	HBASTATS.IocbIssued[ringno]++;
5371 
5372 	/* Check for ULP pkt request */
5373 	if (sbp) {
5374 		mutex_enter(&sbp->mtx);
5375 
5376 		if (sbp->node == NULL) {
5377 			/* Set node to base node by default */
5378 			iocbq->node = (void *)&port->node_base;
5379 			sbp->node = (void *)&port->node_base;
5380 		}
5381 
5382 		sbp->pkt_flags |= PACKET_IN_CHIPQ;
5383 		mutex_exit(&sbp->mtx);
5384 
5385 		atomic_inc_32(&hba->io_active);
5386 #ifdef NODE_THROTTLE_SUPPORT
5387 		if (sbp->node) {
5388 			atomic_inc_32(&sbp->node->io_active);
5389 		}
5390 #endif /* NODE_THROTTLE_SUPPORT */
5391 
5392 #ifdef SFCT_SUPPORT
5393 #ifdef FCT_IO_TRACE
5394 		if (sbp->fct_cmd) {
5395 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5396 			    EMLXS_FCT_IOCB_ISSUED);
5397 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5398 			    icmd->ULPCOMMAND);
5399 		}
5400 #endif /* FCT_IO_TRACE */
5401 #endif /* SFCT_SUPPORT */
5402 
5403 		rp->channelp->hbaSendCmd_sbp++;
5404 		iocbq->channel = rp->channelp;
5405 	} else {
5406 		rp->channelp->hbaSendCmd++;
5407 	}
5408 
5409 	/* get the next available command ring iocb */
5410 	iocb =
5411 	    (IOCB *)(((char *)rp->fc_cmdringaddr +
5412 	    (rp->fc_cmdidx * hba->sli.sli3.iocb_cmd_size)));
5413 
5414 	/* Copy the local iocb to the command ring iocb */
5415 	BE_SWAP32_BCOPY((uint8_t *)icmd, (uint8_t *)iocb,
5416 	    hba->sli.sli3.iocb_cmd_size);
5417 
5418 	/* DMA sync the command ring iocb for the adapter */
5419 	offset = (off_t)((uint64_t)((unsigned long)iocb)
5420 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5421 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5422 	    hba->sli.sli3.iocb_cmd_size, DDI_DMA_SYNC_FORDEV);
5423 
5424 	/*
5425 	 * After this, the sbp / iocb should not be
5426 	 * accessed in the xmit path.
5427 	 */
5428 
5429 	/* Free the local iocb if there is no sbp tracking it */
5430 	if (!sbp) {
5431 		emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
5432 	}
5433 
5434 	/* update local ring index to next available ring index */
5435 	rp->fc_cmdidx =
5436 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
5437 
5438 
5439 	return;
5440 
5441 } /* emlxs_sli3_issue_iocb() */
5442 
5443 
5444 static void
emlxs_sli3_hba_kill(emlxs_hba_t * hba)5445 emlxs_sli3_hba_kill(emlxs_hba_t *hba)
5446 {
5447 	emlxs_port_t *port = &PPORT;
5448 	MAILBOX swpmb;
5449 	MAILBOX *mb2;
5450 	MAILBOX *mb1;
5451 	uint32_t *word0;
5452 	uint32_t j;
5453 	uint32_t interlock_failed;
5454 	uint32_t ha_copy;
5455 	uint32_t value;
5456 	off_t offset;
5457 	uint32_t size;
5458 
5459 	/* Perform adapter interlock to kill adapter */
5460 	interlock_failed = 0;
5461 
5462 	mutex_enter(&EMLXS_PORT_LOCK);
5463 	if (hba->flag & FC_INTERLOCKED) {
5464 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5465 
5466 		mutex_exit(&EMLXS_PORT_LOCK);
5467 
5468 		return;
5469 	}
5470 
5471 	j = 0;
5472 	while (j++ < 10000) {
5473 		if (hba->mbox_queue_flag == 0) {
5474 			break;
5475 		}
5476 
5477 		mutex_exit(&EMLXS_PORT_LOCK);
5478 		BUSYWAIT_US(100);
5479 		mutex_enter(&EMLXS_PORT_LOCK);
5480 	}
5481 
5482 	if (hba->mbox_queue_flag != 0) {
5483 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5484 		    "Interlock failed. Mailbox busy.");
5485 		mutex_exit(&EMLXS_PORT_LOCK);
5486 		return;
5487 	}
5488 
5489 	hba->flag |= FC_INTERLOCKED;
5490 	hba->mbox_queue_flag = 1;
5491 
5492 	/* Disable all host interrupts */
5493 	hba->sli.sli3.hc_copy = 0;
5494 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5495 	WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5496 
5497 	mb2 = FC_SLIM2_MAILBOX(hba);
5498 	mb1 = FC_SLIM1_MAILBOX(hba);
5499 	word0 = (uint32_t *)&swpmb;
5500 
5501 	if (!(hba->flag & FC_SLIM2_MODE)) {
5502 		goto mode_B;
5503 	}
5504 
5505 mode_A:
5506 
5507 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5508 	    "Attempting SLIM2 Interlock...");
5509 
5510 interlock_A:
5511 
5512 	value = 0x55555555;
5513 	*word0 = 0;
5514 	swpmb.mbxCommand = MBX_KILL_BOARD;
5515 	swpmb.mbxOwner = OWN_CHIP;
5516 
5517 	/* Write value to SLIM */
5518 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5519 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), *word0);
5520 
5521 	/* Send Kill board request */
5522 	mb2->un.varWords[0] = value;
5523 	mb2->mbxCommand = MBX_KILL_BOARD;
5524 	mb2->mbxOwner = OWN_CHIP;
5525 
5526 	/* Sync the memory */
5527 	offset = (off_t)((uint64_t)((unsigned long)mb2)
5528 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5529 	size = (sizeof (uint32_t) * 2);
5530 
5531 	BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5532 
5533 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5534 	    DDI_DMA_SYNC_FORDEV);
5535 
5536 	/* interrupt board to do it right away */
5537 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5538 
5539 	/* First wait for command acceptence */
5540 	j = 0;
5541 	while (j++ < 1000) {
5542 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5543 
5544 		if (value == 0xAAAAAAAA) {
5545 			break;
5546 		}
5547 
5548 		BUSYWAIT_US(50);
5549 	}
5550 
5551 	if (value == 0xAAAAAAAA) {
5552 		/* Now wait for mailbox ownership to clear */
5553 		while (j++ < 10000) {
5554 			*word0 =
5555 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5556 
5557 			if (swpmb.mbxOwner == 0) {
5558 				break;
5559 			}
5560 
5561 			BUSYWAIT_US(50);
5562 		}
5563 
5564 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5565 		    "Interlock succeeded.");
5566 
5567 		goto done;
5568 	}
5569 
5570 	/* Interlock failed !!! */
5571 	interlock_failed = 1;
5572 
5573 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Interlock failed.");
5574 
5575 mode_B:
5576 
5577 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5578 	    "Attempting SLIM1 Interlock...");
5579 
5580 interlock_B:
5581 
5582 	value = 0x55555555;
5583 	*word0 = 0;
5584 	swpmb.mbxCommand = MBX_KILL_BOARD;
5585 	swpmb.mbxOwner = OWN_CHIP;
5586 
5587 	/* Write KILL BOARD to mailbox */
5588 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5589 	WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb1), *word0);
5590 
5591 	/* interrupt board to do it right away */
5592 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5593 
5594 	/* First wait for command acceptence */
5595 	j = 0;
5596 	while (j++ < 1000) {
5597 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5598 
5599 		if (value == 0xAAAAAAAA) {
5600 			break;
5601 		}
5602 
5603 		BUSYWAIT_US(50);
5604 	}
5605 
5606 	if (value == 0xAAAAAAAA) {
5607 		/* Now wait for mailbox ownership to clear */
5608 		while (j++ < 10000) {
5609 			*word0 =
5610 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5611 
5612 			if (swpmb.mbxOwner == 0) {
5613 				break;
5614 			}
5615 
5616 			BUSYWAIT_US(50);
5617 		}
5618 
5619 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5620 		    "Interlock succeeded.");
5621 
5622 		goto done;
5623 	}
5624 
5625 	/* Interlock failed !!! */
5626 
5627 	/* If this is the first time then try again */
5628 	if (interlock_failed == 0) {
5629 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5630 		    "Interlock failed. Retrying...");
5631 
5632 		/* Try again */
5633 		interlock_failed = 1;
5634 		goto interlock_B;
5635 	}
5636 
5637 	/*
5638 	 * Now check for error attention to indicate the board has
5639 	 * been kiilled
5640 	 */
5641 	j = 0;
5642 	while (j++ < 10000) {
5643 		ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
5644 
5645 		if (ha_copy & HA_ERATT) {
5646 			break;
5647 		}
5648 
5649 		BUSYWAIT_US(50);
5650 	}
5651 
5652 	if (ha_copy & HA_ERATT) {
5653 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5654 		    "Interlock failed. Board killed.");
5655 	} else {
5656 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5657 		    "Interlock failed. Board not killed.");
5658 	}
5659 
5660 done:
5661 
5662 	hba->mbox_queue_flag = 0;
5663 
5664 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5665 
5666 #ifdef FMA_SUPPORT
5667 	/* Access handle validation */
5668 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5669 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5670 #endif  /* FMA_SUPPORT */
5671 
5672 	mutex_exit(&EMLXS_PORT_LOCK);
5673 
5674 	return;
5675 
5676 } /* emlxs_sli3_hba_kill() */
5677 
5678 
5679 static void
emlxs_sli3_hba_kill4quiesce(emlxs_hba_t * hba)5680 emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba)
5681 {
5682 	emlxs_port_t *port = &PPORT;
5683 	MAILBOX swpmb;
5684 	MAILBOX *mb2;
5685 	MAILBOX *mb1;
5686 	uint32_t *word0;
5687 	off_t offset;
5688 	uint32_t j;
5689 	uint32_t value;
5690 	uint32_t size;
5691 
5692 	/* Disable all host interrupts */
5693 	hba->sli.sli3.hc_copy = 0;
5694 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5695 	WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5696 
5697 	mb2 = FC_SLIM2_MAILBOX(hba);
5698 	mb1 = FC_SLIM1_MAILBOX(hba);
5699 	word0 = (uint32_t *)&swpmb;
5700 
5701 	value = 0x55555555;
5702 	*word0 = 0;
5703 	swpmb.mbxCommand = MBX_KILL_BOARD;
5704 	swpmb.mbxOwner = OWN_CHIP;
5705 
5706 	/* Write value to SLIM */
5707 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5708 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), *word0);
5709 
5710 	/* Send Kill board request */
5711 	mb2->un.varWords[0] = value;
5712 	mb2->mbxCommand = MBX_KILL_BOARD;
5713 	mb2->mbxOwner = OWN_CHIP;
5714 
5715 	/* Sync the memory */
5716 	offset = (off_t)((uint64_t)((unsigned long)mb2)
5717 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5718 	size = (sizeof (uint32_t) * 2);
5719 
5720 	BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5721 
5722 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5723 	    DDI_DMA_SYNC_FORDEV);
5724 
5725 	/* interrupt board to do it right away */
5726 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5727 
5728 	/* First wait for command acceptence */
5729 	j = 0;
5730 	while (j++ < 1000) {
5731 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5732 
5733 		if (value == 0xAAAAAAAA) {
5734 			break;
5735 		}
5736 		BUSYWAIT_US(50);
5737 	}
5738 	if (value == 0xAAAAAAAA) {
5739 		/* Now wait for mailbox ownership to clear */
5740 		while (j++ < 10000) {
5741 			*word0 =
5742 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5743 			if (swpmb.mbxOwner == 0) {
5744 				break;
5745 			}
5746 			BUSYWAIT_US(50);
5747 		}
5748 		goto done;
5749 	}
5750 
5751 done:
5752 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5753 
5754 #ifdef FMA_SUPPORT
5755 	/* Access handle validation */
5756 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5757 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5758 #endif  /* FMA_SUPPORT */
5759 	return;
5760 
5761 } /* emlxs_sli3_hba_kill4quiesce */
5762 
5763 
5764 
5765 
5766 /*
5767  * emlxs_handle_mb_event
5768  *
5769  * Description: Process a Mailbox Attention.
5770  * Called from host_interrupt to process MBATT
5771  *
5772  *   Returns:
5773  *
5774  */
5775 static uint32_t
emlxs_handle_mb_event(emlxs_hba_t * hba)5776 emlxs_handle_mb_event(emlxs_hba_t *hba)
5777 {
5778 	emlxs_port_t		*port = &PPORT;
5779 	MAILBOX			*mb;
5780 	MAILBOX			swpmb;
5781 	MAILBOX			*mbox;
5782 	MAILBOXQ		*mbq = NULL;
5783 	uint32_t		*word0;
5784 	MATCHMAP		*mbox_bp;
5785 	off_t			offset;
5786 	uint32_t		i;
5787 	int			rc;
5788 
5789 	word0 = (uint32_t *)&swpmb;
5790 
5791 	mutex_enter(&EMLXS_PORT_LOCK);
5792 	switch (hba->mbox_queue_flag) {
5793 	case 0:
5794 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5795 		    "No mailbox active.");
5796 
5797 		mutex_exit(&EMLXS_PORT_LOCK);
5798 		return (0);
5799 
5800 	case MBX_POLL:
5801 
5802 		/* Mark mailbox complete, this should wake up any polling */
5803 		/* threads. This can happen if interrupts are enabled while */
5804 		/* a polled mailbox command is outstanding. If we don't set */
5805 		/* MBQ_COMPLETED here, the polling thread may wait until */
5806 		/* timeout error occurs */
5807 
5808 		mutex_enter(&EMLXS_MBOX_LOCK);
5809 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5810 		if (mbq) {
5811 			port = (emlxs_port_t *)mbq->port;
5812 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5813 			    "Mailbox event. Completing Polled command.");
5814 			mbq->flag |= MBQ_COMPLETED;
5815 		}
5816 		mutex_exit(&EMLXS_MBOX_LOCK);
5817 
5818 		mutex_exit(&EMLXS_PORT_LOCK);
5819 		return (0);
5820 
5821 	case MBX_SLEEP:
5822 	case MBX_NOWAIT:
5823 		/* Check mbox_timer, it acts as a service flag too */
5824 		/* The first to service the mbox queue will clear the timer */
5825 		if (hba->mbox_timer) {
5826 			hba->mbox_timer = 0;
5827 
5828 			mutex_enter(&EMLXS_MBOX_LOCK);
5829 			mbq = (MAILBOXQ *)hba->mbox_mbq;
5830 			mutex_exit(&EMLXS_MBOX_LOCK);
5831 		}
5832 
5833 		if (!mbq) {
5834 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5835 			    "Mailbox event. No service required.");
5836 			mutex_exit(&EMLXS_PORT_LOCK);
5837 			return (0);
5838 		}
5839 
5840 		mb = (MAILBOX *)mbq;
5841 		mutex_exit(&EMLXS_PORT_LOCK);
5842 		break;
5843 
5844 	default:
5845 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5846 		    "Invalid Mailbox flag (%x).");
5847 
5848 		mutex_exit(&EMLXS_PORT_LOCK);
5849 		return (0);
5850 	}
5851 
5852 	/* Set port context */
5853 	port = (emlxs_port_t *)mbq->port;
5854 
5855 	/* Get first word of mailbox */
5856 	if (hba->flag & FC_SLIM2_MODE) {
5857 		mbox = FC_SLIM2_MAILBOX(hba);
5858 		offset = (off_t)((uint64_t)((unsigned long)mbox)
5859 		    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5860 
5861 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5862 		    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5863 		*word0 = *((volatile uint32_t *)mbox);
5864 		*word0 = BE_SWAP32(*word0);
5865 	} else {
5866 		mbox = FC_SLIM1_MAILBOX(hba);
5867 		*word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5868 	}
5869 
5870 	i = 0;
5871 	while (swpmb.mbxOwner == OWN_CHIP) {
5872 		if (i++ > 10000) {
5873 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5874 			    "OWN_CHIP: %s: status=%x",
5875 			    emlxs_mb_cmd_xlate(swpmb.mbxCommand),
5876 			    swpmb.mbxStatus);
5877 
5878 			return (1);
5879 		}
5880 
5881 		/* Get first word of mailbox */
5882 		if (hba->flag & FC_SLIM2_MODE) {
5883 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5884 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5885 			*word0 = *((volatile uint32_t *)mbox);
5886 			*word0 = BE_SWAP32(*word0);
5887 		} else {
5888 			*word0 =
5889 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5890 		}
5891 	}
5892 
5893 	/* Now that we are the owner, DMA Sync entire mailbox if needed */
5894 	if (hba->flag & FC_SLIM2_MODE) {
5895 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5896 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
5897 
5898 		BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
5899 		    MAILBOX_CMD_BSIZE);
5900 	} else {
5901 		READ_SLIM_COPY(hba, (uint32_t *)mb, (uint32_t *)mbox,
5902 		    MAILBOX_CMD_WSIZE);
5903 	}
5904 
5905 #ifdef MBOX_EXT_SUPPORT
5906 	if (mbq->extbuf) {
5907 		uint32_t *mbox_ext =
5908 		    (uint32_t *)((uint8_t *)mbox + MBOX_EXTENSION_OFFSET);
5909 		off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
5910 
5911 		if (hba->flag & FC_SLIM2_MODE) {
5912 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5913 			    offset_ext, mbq->extsize,
5914 			    DDI_DMA_SYNC_FORKERNEL);
5915 			BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
5916 			    (uint8_t *)mbq->extbuf, mbq->extsize);
5917 		} else {
5918 			READ_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
5919 			    mbox_ext, (mbq->extsize / 4));
5920 		}
5921 	}
5922 #endif /* MBOX_EXT_SUPPORT */
5923 
5924 #ifdef FMA_SUPPORT
5925 	if (!(hba->flag & FC_SLIM2_MODE)) {
5926 		/* Access handle validation */
5927 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5928 	}
5929 #endif  /* FMA_SUPPORT */
5930 
5931 	/* Now sync the memory buffer if one was used */
5932 	if (mbq->bp) {
5933 		mbox_bp = (MATCHMAP *)mbq->bp;
5934 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5935 		    DDI_DMA_SYNC_FORKERNEL);
5936 	}
5937 
5938 	/* Mailbox has been completely received at this point */
5939 
5940 	if (mb->mbxCommand == MBX_HEARTBEAT) {
5941 		hba->heartbeat_active = 0;
5942 		goto done;
5943 	}
5944 
5945 	if (hba->mbox_queue_flag == MBX_SLEEP) {
5946 		if (swpmb.mbxCommand != MBX_DOWN_LOAD &&
5947 		    swpmb.mbxCommand != MBX_DUMP_MEMORY) {
5948 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5949 			    "Received.  %s: status=%x Sleep.",
5950 			    emlxs_mb_cmd_xlate(swpmb.mbxCommand),
5951 			    swpmb.mbxStatus);
5952 		}
5953 	} else {
5954 		if (swpmb.mbxCommand != MBX_DOWN_LOAD &&
5955 		    swpmb.mbxCommand != MBX_DUMP_MEMORY) {
5956 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5957 			    "Completed. %s: status=%x",
5958 			    emlxs_mb_cmd_xlate(swpmb.mbxCommand),
5959 			    swpmb.mbxStatus);
5960 		}
5961 	}
5962 
5963 	/* Filter out passthru mailbox */
5964 	if (mbq->flag & MBQ_PASSTHRU) {
5965 		goto done;
5966 	}
5967 
5968 	if (mb->mbxStatus) {
5969 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5970 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5971 		    (uint32_t)mb->mbxStatus);
5972 	}
5973 
5974 	if (mbq->mbox_cmpl) {
5975 		rc = (mbq->mbox_cmpl)(hba, mbq);
5976 		/* If mbox was retried, return immediately */
5977 		if (rc) {
5978 			return (0);
5979 		}
5980 	}
5981 
5982 done:
5983 
5984 	/* Clean up the mailbox area */
5985 	emlxs_mb_fini(hba, mb, mb->mbxStatus);
5986 
5987 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5988 	if (mbq) {
5989 		/* Attempt to send pending mailboxes */
5990 		rc =  emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5991 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5992 			emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
5993 		}
5994 	}
5995 	return (0);
5996 
5997 } /* emlxs_handle_mb_event() */
5998 
5999 
6000 static void
emlxs_sli3_timer(emlxs_hba_t * hba)6001 emlxs_sli3_timer(emlxs_hba_t *hba)
6002 {
6003 	/* Perform SLI3 level timer checks */
6004 
6005 	emlxs_sli3_timer_check_mbox(hba);
6006 
6007 } /* emlxs_sli3_timer() */
6008 
6009 
6010 static void
emlxs_sli3_timer_check_mbox(emlxs_hba_t * hba)6011 emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba)
6012 {
6013 	emlxs_port_t *port = &PPORT;
6014 	emlxs_config_t *cfg = &CFG;
6015 	MAILBOX *mb = NULL;
6016 	MAILBOX swpmb;
6017 	uint32_t *word0;
6018 	uint32_t offset;
6019 	uint32_t ha_copy = 0;
6020 
6021 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
6022 		return;
6023 	}
6024 
6025 	mutex_enter(&EMLXS_PORT_LOCK);
6026 
6027 	/* Return if timer hasn't expired */
6028 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
6029 		mutex_exit(&EMLXS_PORT_LOCK);
6030 		return;
6031 	}
6032 
6033 	/* Mailbox timed out, first check for error attention */
6034 	ha_copy = emlxs_check_attention(hba);
6035 
6036 	if (ha_copy & HA_ERATT) {
6037 		hba->mbox_timer = 0;
6038 		mutex_exit(&EMLXS_PORT_LOCK);
6039 		emlxs_handle_ff_error(hba);
6040 		return;
6041 	}
6042 
6043 	word0 = (uint32_t *)&swpmb;
6044 
6045 	if (hba->mbox_queue_flag) {
6046 		/* Get first word of mailbox */
6047 		if (hba->flag & FC_SLIM2_MODE) {
6048 			mb = FC_SLIM2_MAILBOX(hba);
6049 			offset =
6050 			    (off_t)((uint64_t)((unsigned long)mb) - (uint64_t)
6051 			    ((unsigned long)hba->sli.sli3.slim2.virt));
6052 
6053 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
6054 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
6055 			*word0 = *((volatile uint32_t *)mb);
6056 			*word0 = BE_SWAP32(*word0);
6057 		} else {
6058 			mb = FC_SLIM1_MAILBOX(hba);
6059 			*word0 =
6060 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb));
6061 #ifdef FMA_SUPPORT
6062 			/* Access handle validation */
6063 			EMLXS_CHK_ACC_HANDLE(hba,
6064 			    hba->sli.sli3.slim_acc_handle);
6065 #endif  /* FMA_SUPPORT */
6066 		}
6067 
6068 		mb = &swpmb;
6069 
6070 		/* Check if mailbox has actually completed */
6071 		if (mb->mbxOwner == OWN_HOST) {
6072 			/* Read host attention register to determine */
6073 			/* interrupt source */
6074 			uint32_t ha_copy = emlxs_check_attention(hba);
6075 
6076 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
6077 			    "Mailbox attention missed: %s. Forcing event. "
6078 			    "hc=%x ha=%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
6079 			    hba->sli.sli3.hc_copy, ha_copy);
6080 
6081 			mutex_exit(&EMLXS_PORT_LOCK);
6082 
6083 			(void) emlxs_handle_mb_event(hba);
6084 
6085 			return;
6086 		}
6087 
6088 		/* The first to service the mbox queue will clear the timer */
6089 		/* We will service the mailbox here */
6090 		hba->mbox_timer = 0;
6091 
6092 		mutex_enter(&EMLXS_MBOX_LOCK);
6093 		mb = (MAILBOX *)hba->mbox_mbq;
6094 		mutex_exit(&EMLXS_MBOX_LOCK);
6095 	}
6096 
6097 	if (mb) {
6098 		switch (hba->mbox_queue_flag) {
6099 		case MBX_NOWAIT:
6100 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6101 			    "%s: Nowait.",
6102 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
6103 			break;
6104 
6105 		case MBX_SLEEP:
6106 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6107 			    "%s: mb=%p Sleep.",
6108 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6109 			    mb);
6110 			break;
6111 
6112 		case MBX_POLL:
6113 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6114 			    "%s: mb=%p Polled.",
6115 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6116 			    mb);
6117 			break;
6118 
6119 		default:
6120 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6121 			    "%s: mb=%p (%d).",
6122 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6123 			    mb, hba->mbox_queue_flag);
6124 			break;
6125 		}
6126 	} else {
6127 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
6128 	}
6129 
6130 	hba->flag |= FC_MBOX_TIMEOUT;
6131 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
6132 
6133 	mutex_exit(&EMLXS_PORT_LOCK);
6134 
6135 	/* Perform mailbox cleanup */
6136 	/* This will wake any sleeping or polling threads */
6137 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
6138 
6139 	/* Trigger adapter shutdown */
6140 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
6141 
6142 	return;
6143 
6144 } /* emlxs_sli3_timer_check_mbox() */
6145 
6146 
6147 /*
6148  * emlxs_mb_config_port  Issue a CONFIG_PORT mailbox command
6149  */
6150 static uint32_t
emlxs_mb_config_port(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t sli_mode,uint32_t hbainit)6151 emlxs_mb_config_port(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t sli_mode,
6152     uint32_t hbainit)
6153 {
6154 	MAILBOX		*mb = (MAILBOX *)mbq;
6155 	emlxs_vpd_t	*vpd = &VPD;
6156 	emlxs_port_t	*port = &PPORT;
6157 	emlxs_config_t	*cfg;
6158 	RING		*rp;
6159 	uint64_t	pcb;
6160 	uint64_t	mbx;
6161 	uint64_t	hgp;
6162 	uint64_t	pgp;
6163 	uint64_t	rgp;
6164 	MAILBOX		*mbox;
6165 	SLIM2		*slim;
6166 	SLI2_RDSC	*rdsc;
6167 	uint64_t	offset;
6168 	uint32_t	Laddr;
6169 	uint32_t	i;
6170 
6171 	cfg = &CFG;
6172 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
6173 	mbox = NULL;
6174 	slim = NULL;
6175 
6176 	mb->mbxCommand = MBX_CONFIG_PORT;
6177 	mb->mbxOwner = OWN_HOST;
6178 	mbq->mbox_cmpl = NULL;
6179 
6180 	mb->un.varCfgPort.pcbLen = sizeof (PCB);
6181 	mb->un.varCfgPort.hbainit[0] = hbainit;
6182 
6183 	pcb = hba->sli.sli3.slim2.phys +
6184 	    (uint64_t)((unsigned long)&(slim->pcb));
6185 	mb->un.varCfgPort.pcbLow = PADDR_LO(pcb);
6186 	mb->un.varCfgPort.pcbHigh = PADDR_HI(pcb);
6187 
6188 	/* Set Host pointers in SLIM flag */
6189 	mb->un.varCfgPort.hps = 1;
6190 
6191 	/* Initialize hba structure for assumed default SLI2 mode */
6192 	/* If config port succeeds, then we will update it then   */
6193 	hba->sli_mode = sli_mode;
6194 	hba->vpi_max = 0;
6195 	hba->flag &= ~FC_NPIV_ENABLED;
6196 
6197 	if (sli_mode == EMLXS_HBA_SLI3_MODE) {
6198 		mb->un.varCfgPort.sli_mode = EMLXS_HBA_SLI3_MODE;
6199 		mb->un.varCfgPort.cerbm = 1;
6200 		mb->un.varCfgPort.max_hbq = EMLXS_NUM_HBQ;
6201 
6202 		if (cfg[CFG_NPIV_ENABLE].current) {
6203 			if (vpd->feaLevelHigh >= 0x09) {
6204 				if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
6205 					mb->un.varCfgPort.vpi_max =
6206 					    MAX_VPORTS - 1;
6207 				} else {
6208 					mb->un.varCfgPort.vpi_max =
6209 					    MAX_VPORTS_LIMITED - 1;
6210 				}
6211 
6212 				mb->un.varCfgPort.cmv = 1;
6213 			} else {
6214 				EMLXS_MSGF(EMLXS_CONTEXT,
6215 				    &emlxs_init_debug_msg,
6216 				    "CFGPORT: Firmware does not support NPIV. "
6217 				    "level=%d", vpd->feaLevelHigh);
6218 			}
6219 
6220 		}
6221 	}
6222 
6223 	/*
6224 	 * Now setup pcb
6225 	 */
6226 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.type = TYPE_NATIVE_SLI2;
6227 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.feature = FEATURE_INITIAL_SLI2;
6228 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.maxRing =
6229 	    (hba->sli.sli3.ring_count - 1);
6230 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mailBoxSize =
6231 	    sizeof (MAILBOX) + MBOX_EXTENSION_SIZE;
6232 
6233 	mbx = hba->sli.sli3.slim2.phys +
6234 	    (uint64_t)((unsigned long)&(slim->mbx));
6235 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrHigh = PADDR_HI(mbx);
6236 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrLow = PADDR_LO(mbx);
6237 
6238 
6239 	/*
6240 	 * Set up HGP - Port Memory
6241 	 *
6242 	 * CR0Put   - SLI2(no HBQs) =	0xc0, With HBQs =	0x80
6243 	 * RR0Get			0xc4			0x84
6244 	 * CR1Put			0xc8			0x88
6245 	 * RR1Get			0xcc			0x8c
6246 	 * CR2Put			0xd0			0x90
6247 	 * RR2Get			0xd4			0x94
6248 	 * CR3Put			0xd8			0x98
6249 	 * RR3Get			0xdc			0x9c
6250 	 *
6251 	 * Reserved			0xa0-0xbf
6252 	 *
6253 	 * If HBQs configured:
6254 	 * HBQ 0 Put ptr  0xc0
6255 	 * HBQ 1 Put ptr  0xc4
6256 	 * HBQ 2 Put ptr  0xc8
6257 	 * ...
6258 	 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
6259 	 */
6260 
6261 	if (sli_mode >= EMLXS_HBA_SLI3_MODE) {
6262 		/* ERBM is enabled */
6263 		hba->sli.sli3.hgp_ring_offset = 0x80;
6264 		hba->sli.sli3.hgp_hbq_offset = 0xC0;
6265 
6266 		hba->sli.sli3.iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
6267 		hba->sli.sli3.iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
6268 
6269 	} else { /* SLI2 */
6270 		/* ERBM is disabled */
6271 		hba->sli.sli3.hgp_ring_offset = 0xC0;
6272 		hba->sli.sli3.hgp_hbq_offset = 0;
6273 
6274 		hba->sli.sli3.iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
6275 		hba->sli.sli3.iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
6276 	}
6277 
6278 	/* The Sbus card uses Host Memory. The PCI card uses SLIM POINTER */
6279 	if (hba->bus_type == SBUS_FC) {
6280 		hgp = hba->sli.sli3.slim2.phys +
6281 		    (uint64_t)((unsigned long)&(mbox->us.s2.host));
6282 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6283 		    PADDR_HI(hgp);
6284 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6285 		    PADDR_LO(hgp);
6286 	} else {
6287 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6288 		    (uint32_t)ddi_get32(hba->pci_acc_handle,
6289 		    (uint32_t *)(hba->pci_addr + PCI_BAR_1_REGISTER));
6290 
6291 		Laddr =
6292 		    ddi_get32(hba->pci_acc_handle,
6293 		    (uint32_t *)(hba->pci_addr + PCI_BAR_0_REGISTER));
6294 		Laddr &= ~0x4;
6295 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6296 		    (uint32_t)(Laddr + hba->sli.sli3.hgp_ring_offset);
6297 
6298 #ifdef FMA_SUPPORT
6299 		/* Access handle validation */
6300 		EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6301 #endif  /* FMA_SUPPORT */
6302 
6303 	}
6304 
6305 	pgp = hba->sli.sli3.slim2.phys +
6306 	    (uint64_t)((unsigned long)&(mbox->us.s2.port));
6307 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrHigh =
6308 	    PADDR_HI(pgp);
6309 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrLow =
6310 	    PADDR_LO(pgp);
6311 
6312 	offset = 0;
6313 	for (i = 0; i < 4; i++) {
6314 		rp = &hba->sli.sli3.ring[i];
6315 		rdsc = &((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.rdsc[i];
6316 
6317 		/* Setup command ring */
6318 		rgp = hba->sli.sli3.slim2.phys +
6319 		    (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6320 		rdsc->cmdAddrHigh = PADDR_HI(rgp);
6321 		rdsc->cmdAddrLow = PADDR_LO(rgp);
6322 		rdsc->cmdEntries = rp->fc_numCiocb;
6323 
6324 		rp->fc_cmdringaddr =
6325 		    (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6326 		offset += rdsc->cmdEntries * hba->sli.sli3.iocb_cmd_size;
6327 
6328 		/* Setup response ring */
6329 		rgp = hba->sli.sli3.slim2.phys +
6330 		    (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6331 		rdsc->rspAddrHigh = PADDR_HI(rgp);
6332 		rdsc->rspAddrLow = PADDR_LO(rgp);
6333 		rdsc->rspEntries = rp->fc_numRiocb;
6334 
6335 		rp->fc_rspringaddr =
6336 		    (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6337 		offset += rdsc->rspEntries * hba->sli.sli3.iocb_rsp_size;
6338 	}
6339 
6340 	BE_SWAP32_BCOPY((uint8_t *)
6341 	    (&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6342 	    (uint8_t *)(&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6343 	    sizeof (PCB));
6344 
6345 	offset = ((uint64_t)((unsigned long)
6346 	    &(((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb)) -
6347 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
6348 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, (off_t)offset,
6349 	    sizeof (PCB), DDI_DMA_SYNC_FORDEV);
6350 
6351 	return (0);
6352 
6353 } /* emlxs_mb_config_port() */
6354 
6355 
6356 static uint32_t
emlxs_hbq_setup(emlxs_hba_t * hba,uint32_t hbq_id)6357 emlxs_hbq_setup(emlxs_hba_t *hba, uint32_t hbq_id)
6358 {
6359 	emlxs_port_t *port = &PPORT;
6360 	HBQ_INIT_t *hbq;
6361 	MATCHMAP *mp;
6362 	HBQE_t *hbqE;
6363 	MAILBOX *mb;
6364 	MAILBOXQ *mbq;
6365 	void *ioa2;
6366 	uint32_t j;
6367 	uint32_t count;
6368 	uint32_t size;
6369 	uint32_t ringno;
6370 	uint32_t seg;
6371 
6372 	switch (hbq_id) {
6373 	case EMLXS_ELS_HBQ_ID:
6374 		count = MEM_ELSBUF_COUNT;
6375 		size = MEM_ELSBUF_SIZE;
6376 		ringno = FC_ELS_RING;
6377 		seg = MEM_ELSBUF;
6378 		HBASTATS.ElsUbPosted = count;
6379 		break;
6380 
6381 	case EMLXS_IP_HBQ_ID:
6382 		count = MEM_IPBUF_COUNT;
6383 		size = MEM_IPBUF_SIZE;
6384 		ringno = FC_IP_RING;
6385 		seg = MEM_IPBUF;
6386 		HBASTATS.IpUbPosted = count;
6387 		break;
6388 
6389 	case EMLXS_CT_HBQ_ID:
6390 		count = MEM_CTBUF_COUNT;
6391 		size = MEM_CTBUF_SIZE;
6392 		ringno = FC_CT_RING;
6393 		seg = MEM_CTBUF;
6394 		HBASTATS.CtUbPosted = count;
6395 		break;
6396 
6397 #ifdef SFCT_SUPPORT
6398 	case EMLXS_FCT_HBQ_ID:
6399 		count = MEM_FCTBUF_COUNT;
6400 		size = MEM_FCTBUF_SIZE;
6401 		ringno = FC_FCT_RING;
6402 		seg = MEM_FCTBUF;
6403 		HBASTATS.FctUbPosted = count;
6404 		break;
6405 #endif /* SFCT_SUPPORT */
6406 
6407 	default:
6408 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6409 		    "hbq_setup: Invalid HBQ id. (%x)", hbq_id);
6410 		return (1);
6411 	}
6412 
6413 	/* Configure HBQ */
6414 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6415 	hbq->HBQ_numEntries = count;
6416 
6417 	/* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */
6418 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == 0) {
6419 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6420 		    "hbq_setup: Unable to get mailbox.");
6421 		return (1);
6422 	}
6423 	mb = (MAILBOX *)mbq;
6424 
6425 	/* Allocate HBQ Host buffer and Initialize the HBQEs */
6426 	if (emlxs_hbq_alloc(hba, hbq_id)) {
6427 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6428 		    "hbq_setup: Unable to allocate HBQ.");
6429 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6430 		return (1);
6431 	}
6432 
6433 	hbq->HBQ_recvNotify = 1;
6434 	hbq->HBQ_num_mask = 0;			/* Bind to ring */
6435 	hbq->HBQ_profile = 0;			/* Selection profile */
6436 						/* 0=all, 7=logentry */
6437 	hbq->HBQ_ringMask = 1 << ringno;	/* b0100 * ringno - Binds */
6438 						/* HBQ to a ring */
6439 						/* Ring0=b0001, Ring1=b0010, */
6440 						/* Ring2=b0100 */
6441 	hbq->HBQ_headerLen = 0;			/* 0 if not profile 4 or 5 */
6442 	hbq->HBQ_logEntry = 0;			/* Set to 1 if this HBQ will */
6443 						/* be used for */
6444 	hbq->HBQ_id = hbq_id;
6445 	hbq->HBQ_PutIdx_next = 0;
6446 	hbq->HBQ_PutIdx = hbq->HBQ_numEntries - 1;
6447 	hbq->HBQ_GetIdx = 0;
6448 	hbq->HBQ_PostBufCnt = hbq->HBQ_numEntries;
6449 	bzero(hbq->HBQ_PostBufs, sizeof (hbq->HBQ_PostBufs));
6450 
6451 	/* Fill in POST BUFFERs in HBQE */
6452 	hbqE = (HBQE_t *)hbq->HBQ_host_buf.virt;
6453 	for (j = 0; j < hbq->HBQ_numEntries; j++, hbqE++) {
6454 		/* Allocate buffer to post */
6455 		if ((mp = (MATCHMAP *)emlxs_mem_get(hba,
6456 		    seg)) == 0) {
6457 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6458 			    "hbq_setup: Unable to allocate HBQ buffer. "
6459 			    "cnt=%d", j);
6460 			emlxs_hbq_free_all(hba, hbq_id);
6461 			return (1);
6462 		}
6463 
6464 		hbq->HBQ_PostBufs[j] = mp;
6465 
6466 		hbqE->unt.ext.HBQ_tag = hbq_id;
6467 		hbqE->unt.ext.HBQE_tag = j;
6468 		hbqE->bde.tus.f.bdeSize = size;
6469 		hbqE->bde.tus.f.bdeFlags = 0;
6470 		hbqE->unt.w = BE_SWAP32(hbqE->unt.w);
6471 		hbqE->bde.tus.w = BE_SWAP32(hbqE->bde.tus.w);
6472 		hbqE->bde.addrLow =
6473 		    BE_SWAP32(PADDR_LO(mp->phys));
6474 		hbqE->bde.addrHigh =
6475 		    BE_SWAP32(PADDR_HI(mp->phys));
6476 	}
6477 
6478 	/* Issue CONFIG_HBQ */
6479 	emlxs_mb_config_hbq(hba, mbq, hbq_id);
6480 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
6481 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6482 		    "hbq_setup: Unable to config HBQ. cmd=%x status=%x",
6483 		    mb->mbxCommand, mb->mbxStatus);
6484 
6485 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6486 		emlxs_hbq_free_all(hba, hbq_id);
6487 		return (1);
6488 	}
6489 
6490 	/* Setup HBQ Get/Put indexes */
6491 	ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6492 	    (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6493 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, hbq->HBQ_PutIdx);
6494 
6495 	hba->sli.sli3.hbq_count++;
6496 
6497 	emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6498 
6499 #ifdef FMA_SUPPORT
6500 	/* Access handle validation */
6501 	if (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
6502 	    != DDI_FM_OK) {
6503 		EMLXS_MSGF(EMLXS_CONTEXT,
6504 		    &emlxs_invalid_access_handle_msg, NULL);
6505 		emlxs_hbq_free_all(hba, hbq_id);
6506 		return (1);
6507 	}
6508 #endif  /* FMA_SUPPORT */
6509 
6510 	return (0);
6511 
6512 } /* emlxs_hbq_setup() */
6513 
6514 
6515 extern void
emlxs_hbq_free_all(emlxs_hba_t * hba,uint32_t hbq_id)6516 emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id)
6517 {
6518 	HBQ_INIT_t *hbq;
6519 	MBUF_INFO *buf_info;
6520 	MBUF_INFO bufinfo;
6521 	uint32_t seg;
6522 	uint32_t j;
6523 
6524 	switch (hbq_id) {
6525 	case EMLXS_ELS_HBQ_ID:
6526 		seg = MEM_ELSBUF;
6527 		HBASTATS.ElsUbPosted = 0;
6528 		break;
6529 
6530 	case EMLXS_IP_HBQ_ID:
6531 		seg = MEM_IPBUF;
6532 		HBASTATS.IpUbPosted = 0;
6533 		break;
6534 
6535 	case EMLXS_CT_HBQ_ID:
6536 		seg = MEM_CTBUF;
6537 		HBASTATS.CtUbPosted = 0;
6538 		break;
6539 
6540 #ifdef SFCT_SUPPORT
6541 	case EMLXS_FCT_HBQ_ID:
6542 		seg = MEM_FCTBUF;
6543 		HBASTATS.FctUbPosted = 0;
6544 		break;
6545 #endif /* SFCT_SUPPORT */
6546 
6547 	default:
6548 		return;
6549 	}
6550 
6551 
6552 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6553 
6554 	if (hbq->HBQ_host_buf.virt != 0) {
6555 		for (j = 0; j < hbq->HBQ_PostBufCnt; j++) {
6556 			emlxs_mem_put(hba, seg,
6557 			    (void *)hbq->HBQ_PostBufs[j]);
6558 			hbq->HBQ_PostBufs[j] = NULL;
6559 		}
6560 		hbq->HBQ_PostBufCnt = 0;
6561 
6562 		buf_info = &bufinfo;
6563 		bzero(buf_info, sizeof (MBUF_INFO));
6564 
6565 		buf_info->size = hbq->HBQ_host_buf.size;
6566 		buf_info->virt = hbq->HBQ_host_buf.virt;
6567 		buf_info->phys = hbq->HBQ_host_buf.phys;
6568 		buf_info->dma_handle = hbq->HBQ_host_buf.dma_handle;
6569 		buf_info->data_handle = hbq->HBQ_host_buf.data_handle;
6570 		buf_info->flags = FC_MBUF_DMA;
6571 
6572 		emlxs_mem_free(hba, buf_info);
6573 
6574 		hbq->HBQ_host_buf.virt = NULL;
6575 	}
6576 
6577 	return;
6578 
6579 } /* emlxs_hbq_free_all() */
6580 
6581 
6582 extern void
emlxs_update_HBQ_index(emlxs_hba_t * hba,uint32_t hbq_id)6583 emlxs_update_HBQ_index(emlxs_hba_t *hba, uint32_t hbq_id)
6584 {
6585 #ifdef FMA_SUPPORT
6586 	emlxs_port_t *port = &PPORT;
6587 #endif  /* FMA_SUPPORT */
6588 	void *ioa2;
6589 	uint32_t status;
6590 	uint32_t HBQ_PortGetIdx;
6591 	HBQ_INIT_t *hbq;
6592 
6593 	switch (hbq_id) {
6594 	case EMLXS_ELS_HBQ_ID:
6595 		HBASTATS.ElsUbPosted++;
6596 		break;
6597 
6598 	case EMLXS_IP_HBQ_ID:
6599 		HBASTATS.IpUbPosted++;
6600 		break;
6601 
6602 	case EMLXS_CT_HBQ_ID:
6603 		HBASTATS.CtUbPosted++;
6604 		break;
6605 
6606 #ifdef SFCT_SUPPORT
6607 	case EMLXS_FCT_HBQ_ID:
6608 		HBASTATS.FctUbPosted++;
6609 		break;
6610 #endif /* SFCT_SUPPORT */
6611 
6612 	default:
6613 		return;
6614 	}
6615 
6616 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6617 
6618 	hbq->HBQ_PutIdx =
6619 	    (hbq->HBQ_PutIdx + 1 >=
6620 	    hbq->HBQ_numEntries) ? 0 : hbq->HBQ_PutIdx + 1;
6621 
6622 	if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6623 		HBQ_PortGetIdx =
6624 		    BE_SWAP32(((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.
6625 		    HBQ_PortGetIdx[hbq_id]);
6626 
6627 		hbq->HBQ_GetIdx = HBQ_PortGetIdx;
6628 
6629 		if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6630 			return;
6631 		}
6632 	}
6633 
6634 	ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6635 	    (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6636 	status = hbq->HBQ_PutIdx;
6637 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, status);
6638 
6639 #ifdef FMA_SUPPORT
6640 	/* Access handle validation */
6641 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
6642 #endif  /* FMA_SUPPORT */
6643 
6644 	return;
6645 
6646 } /* emlxs_update_HBQ_index() */
6647 
6648 
6649 static void
emlxs_sli3_enable_intr(emlxs_hba_t * hba)6650 emlxs_sli3_enable_intr(emlxs_hba_t *hba)
6651 {
6652 #ifdef FMA_SUPPORT
6653 	emlxs_port_t *port = &PPORT;
6654 #endif  /* FMA_SUPPORT */
6655 	uint32_t status;
6656 
6657 	/* Enable mailbox, error attention interrupts */
6658 	status = (uint32_t)(HC_MBINT_ENA);
6659 
6660 	/* Enable ring interrupts */
6661 	if (hba->sli.sli3.ring_count >= 4) {
6662 		status |=
6663 		    (HC_R3INT_ENA | HC_R2INT_ENA | HC_R1INT_ENA |
6664 		    HC_R0INT_ENA);
6665 	} else if (hba->sli.sli3.ring_count == 3) {
6666 		status |= (HC_R2INT_ENA | HC_R1INT_ENA | HC_R0INT_ENA);
6667 	} else if (hba->sli.sli3.ring_count == 2) {
6668 		status |= (HC_R1INT_ENA | HC_R0INT_ENA);
6669 	} else if (hba->sli.sli3.ring_count == 1) {
6670 		status |= (HC_R0INT_ENA);
6671 	}
6672 
6673 	hba->sli.sli3.hc_copy = status;
6674 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6675 
6676 #ifdef FMA_SUPPORT
6677 	/* Access handle validation */
6678 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6679 #endif  /* FMA_SUPPORT */
6680 
6681 } /* emlxs_sli3_enable_intr() */
6682 
6683 
6684 static void
emlxs_enable_latt(emlxs_hba_t * hba)6685 emlxs_enable_latt(emlxs_hba_t *hba)
6686 {
6687 #ifdef FMA_SUPPORT
6688 	emlxs_port_t *port = &PPORT;
6689 #endif  /* FMA_SUPPORT */
6690 
6691 	mutex_enter(&EMLXS_PORT_LOCK);
6692 	hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
6693 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6694 #ifdef FMA_SUPPORT
6695 	/* Access handle validation */
6696 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6697 #endif  /* FMA_SUPPORT */
6698 	mutex_exit(&EMLXS_PORT_LOCK);
6699 
6700 } /* emlxs_enable_latt() */
6701 
6702 
6703 static void
emlxs_sli3_disable_intr(emlxs_hba_t * hba,uint32_t att)6704 emlxs_sli3_disable_intr(emlxs_hba_t *hba, uint32_t att)
6705 {
6706 #ifdef FMA_SUPPORT
6707 	emlxs_port_t *port = &PPORT;
6708 #endif  /* FMA_SUPPORT */
6709 
6710 	/* Disable all adapter interrupts */
6711 	hba->sli.sli3.hc_copy = att;
6712 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6713 #ifdef FMA_SUPPORT
6714 	/* Access handle validation */
6715 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6716 #endif  /* FMA_SUPPORT */
6717 
6718 } /* emlxs_sli3_disable_intr() */
6719 
6720 
6721 static uint32_t
emlxs_check_attention(emlxs_hba_t * hba)6722 emlxs_check_attention(emlxs_hba_t *hba)
6723 {
6724 #ifdef FMA_SUPPORT
6725 	emlxs_port_t *port = &PPORT;
6726 #endif  /* FMA_SUPPORT */
6727 	uint32_t ha_copy;
6728 
6729 	ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
6730 #ifdef FMA_SUPPORT
6731 	/* Access handle validation */
6732 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6733 #endif  /* FMA_SUPPORT */
6734 	return (ha_copy);
6735 
6736 } /* emlxs_check_attention() */
6737 
6738 
6739 static void
emlxs_sli3_poll_erratt(emlxs_hba_t * hba)6740 emlxs_sli3_poll_erratt(emlxs_hba_t *hba)
6741 {
6742 	uint32_t ha_copy;
6743 
6744 	ha_copy = emlxs_check_attention(hba);
6745 
6746 	/* Adapter error */
6747 	if (ha_copy & HA_ERATT) {
6748 		HBASTATS.IntrEvent[6]++;
6749 		emlxs_handle_ff_error(hba);
6750 	}
6751 
6752 } /* emlxs_sli3_poll_erratt() */
6753 
6754 
6755 static uint32_t
emlxs_sli3_reg_did_mbcmpl(emlxs_hba_t * hba,MAILBOXQ * mbq)6756 emlxs_sli3_reg_did_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
6757 {
6758 	emlxs_port_t *port = (emlxs_port_t *)mbq->port;
6759 	MAILBOXQ *mboxq;
6760 	MAILBOX *mb;
6761 	MATCHMAP *mp;
6762 	NODELIST *ndlp;
6763 	emlxs_port_t *vport;
6764 	SERV_PARM *sp;
6765 	int32_t i;
6766 	uint32_t  control;
6767 	uint32_t ldata;
6768 	uint32_t ldid;
6769 	uint16_t lrpi;
6770 	uint16_t lvpi;
6771 	uint32_t rval;
6772 
6773 	mb = (MAILBOX *)mbq;
6774 
6775 	if (mb->mbxStatus) {
6776 		if (mb->mbxStatus == MBXERR_NO_RESOURCES) {
6777 			control = mb->un.varRegLogin.un.sp.bdeSize;
6778 			if (control == 0) {
6779 				/* Special handle for vport PLOGI */
6780 				if (mbq->iocbq == (uint8_t *)1) {
6781 					mbq->iocbq = NULL;
6782 				}
6783 				return (0);
6784 			}
6785 			emlxs_mb_retry(hba, mbq);
6786 			return (1);
6787 		}
6788 		if (mb->mbxStatus == MBXERR_RPI_FULL) {
6789 			EMLXS_MSGF(EMLXS_CONTEXT,
6790 			    &emlxs_node_create_failed_msg,
6791 			    "Limit reached. count=%d", port->node_count);
6792 		}
6793 
6794 		/* Special handle for vport PLOGI */
6795 		if (mbq->iocbq == (uint8_t *)1) {
6796 			mbq->iocbq = NULL;
6797 		}
6798 
6799 		return (0);
6800 	}
6801 
6802 	mp = (MATCHMAP *)mbq->bp;
6803 	if (!mp) {
6804 		return (0);
6805 	}
6806 
6807 	ldata = mb->un.varWords[5];
6808 	lvpi = (ldata & 0xffff);
6809 	port = &VPORT(lvpi);
6810 
6811 	/* First copy command data */
6812 	ldata = mb->un.varWords[0];	/* get rpi */
6813 	lrpi = ldata & 0xffff;
6814 
6815 	ldata = mb->un.varWords[1];	/* get did */
6816 	ldid = ldata & MASK_DID;
6817 
6818 	sp = (SERV_PARM *)mp->virt;
6819 
6820 	/* Create or update the node */
6821 	ndlp = emlxs_node_create(port, ldid, lrpi, sp);
6822 
6823 	if (ndlp == NULL) {
6824 		emlxs_ub_priv_t *ub_priv;
6825 
6826 		/*
6827 		 * Fake a mailbox error, so the mbox_fini
6828 		 * can take appropriate action
6829 		 */
6830 		mb->mbxStatus = MBXERR_RPI_FULL;
6831 		if (mbq->ubp) {
6832 			ub_priv = ((fc_unsol_buf_t *)mbq->ubp)->ub_fca_private;
6833 			ub_priv->flags |= EMLXS_UB_REPLY;
6834 		}
6835 
6836 		/* This must be (0xFFFFFE) which was registered by vport */
6837 		if (lrpi == 0) {
6838 			return (0);
6839 		}
6840 
6841 		if (!(mboxq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
6842 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6843 			    "reg_did_mbcmpl:failed. Unable to allocate mbox");
6844 			return (0);
6845 		}
6846 
6847 		mb = (MAILBOX *)mboxq->mbox;
6848 		mb->un.varUnregLogin.rpi = lrpi;
6849 		mb->un.varUnregLogin.vpi = lvpi;
6850 
6851 		mb->mbxCommand = MBX_UNREG_LOGIN;
6852 		mb->mbxOwner = OWN_HOST;
6853 		mboxq->sbp = NULL;
6854 		mboxq->ubp = NULL;
6855 		mboxq->iocbq = NULL;
6856 		mboxq->mbox_cmpl = NULL;
6857 		mboxq->context = NULL;
6858 		mboxq->port = (void *)port;
6859 
6860 		rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mboxq, MBX_NOWAIT, 0);
6861 		if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
6862 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6863 			    "reg_did_mbcmpl:failed. Unable to send request.");
6864 
6865 			emlxs_mem_put(hba, MEM_MBOX, (void *)mboxq);
6866 			return (0);
6867 		}
6868 
6869 		return (0);
6870 	}
6871 
6872 	if (ndlp->nlp_DID == FABRIC_DID) {
6873 		/* FLOGI/FDISC successfully completed on this port */
6874 		mutex_enter(&EMLXS_PORT_LOCK);
6875 		port->flag |= EMLXS_PORT_FLOGI_CMPL;
6876 		mutex_exit(&EMLXS_PORT_LOCK);
6877 
6878 		/* If CLEAR_LA has been sent, then attempt to */
6879 		/* register the vpi now */
6880 		if (hba->state == FC_READY) {
6881 			(void) emlxs_mb_reg_vpi(port, NULL);
6882 		}
6883 
6884 		/*
6885 		 * If NPIV Fabric support has just been established on
6886 		 * the physical port, then notify the vports of the
6887 		 * link up
6888 		 */
6889 		if ((lvpi == 0) &&
6890 		    (hba->flag & FC_NPIV_ENABLED) &&
6891 		    (hba->flag & FC_NPIV_SUPPORTED)) {
6892 			/* Skip the physical port */
6893 			for (i = 1; i < MAX_VPORTS; i++) {
6894 				vport = &VPORT(i);
6895 
6896 				if (!(vport->flag & EMLXS_PORT_BOUND) ||
6897 				    !(vport->flag &
6898 				    EMLXS_PORT_ENABLED)) {
6899 					continue;
6900 				}
6901 
6902 				emlxs_port_online(vport);
6903 			}
6904 		}
6905 	}
6906 
6907 	/* Check for special restricted login flag */
6908 	if (mbq->iocbq == (uint8_t *)1) {
6909 		mbq->iocbq = NULL;
6910 		(void) EMLXS_SLI_UNREG_NODE(port, ndlp, NULL, NULL, NULL);
6911 		return (0);
6912 	}
6913 
6914 	/* Needed for FCT trigger in emlxs_mb_deferred_cmpl */
6915 	if (mbq->sbp) {
6916 		((emlxs_buf_t *)mbq->sbp)->node = ndlp;
6917 	}
6918 
6919 #ifdef DHCHAP_SUPPORT
6920 	if (mbq->sbp || mbq->ubp) {
6921 		if (emlxs_dhc_auth_start(port, ndlp, mbq->sbp,
6922 		    mbq->ubp) == 0) {
6923 			/* Auth started - auth completion will */
6924 			/* handle sbp and ubp now */
6925 			mbq->sbp = NULL;
6926 			mbq->ubp = NULL;
6927 		}
6928 	}
6929 #endif	/* DHCHAP_SUPPORT */
6930 
6931 	return (0);
6932 
6933 } /* emlxs_sli3_reg_did_mbcmpl() */
6934 
6935 
6936 static uint32_t
emlxs_sli3_reg_did(emlxs_port_t * port,uint32_t did,SERV_PARM * param,emlxs_buf_t * sbp,fc_unsol_buf_t * ubp,IOCBQ * iocbq)6937 emlxs_sli3_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param,
6938     emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
6939 {
6940 	emlxs_hba_t	*hba = HBA;
6941 	MATCHMAP	*mp;
6942 	MAILBOXQ	*mbq;
6943 	MAILBOX		*mb;
6944 	uint32_t	rval;
6945 
6946 	/* Check for invalid node ids to register */
6947 	if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
6948 		return (1);
6949 	}
6950 
6951 	if (did & 0xff000000) {
6952 		return (1);
6953 	}
6954 
6955 	if ((rval = emlxs_mb_check_sparm(hba, param))) {
6956 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6957 		    "Invalid service parameters. did=%06x rval=%d", did,
6958 		    rval);
6959 
6960 		return (1);
6961 	}
6962 
6963 	/* Check if the node limit has been reached */
6964 	if (port->node_count >= hba->max_nodes) {
6965 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6966 		    "Limit reached. did=%06x count=%d", did,
6967 		    port->node_count);
6968 
6969 		return (1);
6970 	}
6971 
6972 	if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
6973 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6974 		    "Unable to allocate mailbox. did=%x", did);
6975 
6976 		return (1);
6977 	}
6978 	mb = (MAILBOX *)mbq->mbox;
6979 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
6980 
6981 	/* Build login request */
6982 	if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) {
6983 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6984 
6985 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6986 		    "Unable to allocate buffer. did=%x", did);
6987 		return (1);
6988 	}
6989 	bcopy((void *)param, (void *)mp->virt, sizeof (SERV_PARM));
6990 
6991 	mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (SERV_PARM);
6992 	mb->un.varRegLogin.un.sp64.addrHigh = PADDR_HI(mp->phys);
6993 	mb->un.varRegLogin.un.sp64.addrLow = PADDR_LO(mp->phys);
6994 	mb->un.varRegLogin.did = did;
6995 	mb->un.varWords[30] = 0;	/* flags */
6996 	mb->mbxCommand = MBX_REG_LOGIN64;
6997 	mb->mbxOwner = OWN_HOST;
6998 	mb->un.varRegLogin.vpi = port->vpi;
6999 	mb->un.varRegLogin.rpi = 0;
7000 
7001 	mbq->sbp = (void *)sbp;
7002 	mbq->ubp = (void *)ubp;
7003 	mbq->iocbq = (void *)iocbq;
7004 	mbq->bp = (void *)mp;
7005 	mbq->mbox_cmpl = emlxs_sli3_reg_did_mbcmpl;
7006 	mbq->context = NULL;
7007 	mbq->port = (void *)port;
7008 
7009 	rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
7010 	if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
7011 		emlxs_mem_put(hba, MEM_BUF, (void *)mp);
7012 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
7013 
7014 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
7015 		    "Unable to send mbox. did=%x", did);
7016 		return (1);
7017 	}
7018 
7019 	return (0);
7020 
7021 } /* emlxs_sli3_reg_did() */
7022 
7023 
7024 /*ARGSUSED*/
7025 static uint32_t
emlxs_sli3_unreg_node_mbcmpl(emlxs_hba_t * hba,MAILBOXQ * mbq)7026 emlxs_sli3_unreg_node_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
7027 {
7028 	emlxs_port_t	*port = (emlxs_port_t *)mbq->port;
7029 	MAILBOX		*mb;
7030 	NODELIST	*node;
7031 	uint16_t	rpi;
7032 
7033 	node = (NODELIST *)mbq->context;
7034 	mb = (MAILBOX *)mbq;
7035 	rpi = (node)? node->nlp_Rpi:0xffff;
7036 
7037 	if (mb->mbxStatus) {
7038 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7039 		    "unreg_node_mbcmpl:failed. node=%p rpi=%d status=%x",
7040 		    node, rpi, mb->mbxStatus);
7041 
7042 		return (0);
7043 	}
7044 
7045 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7046 	    "unreg_node_mbcmpl: node=%p rpi=%d",
7047 	    node, rpi);
7048 
7049 	if (node) {
7050 		emlxs_node_rm(port, node);
7051 
7052 	} else {  /* All nodes */
7053 		emlxs_node_destroy_all(port);
7054 	}
7055 
7056 	return (0);
7057 
7058 } /* emlxs_sli3_unreg_node_mbcmpl */
7059 
7060 
7061 static uint32_t
emlxs_sli3_unreg_node(emlxs_port_t * port,NODELIST * node,emlxs_buf_t * sbp,fc_unsol_buf_t * ubp,IOCBQ * iocbq)7062 emlxs_sli3_unreg_node(emlxs_port_t *port, NODELIST *node, emlxs_buf_t *sbp,
7063     fc_unsol_buf_t *ubp, IOCBQ *iocbq)
7064 {
7065 	emlxs_hba_t	*hba = HBA;
7066 	MAILBOXQ	*mbq;
7067 	MAILBOX		*mb;
7068 	uint16_t	rpi;
7069 	uint32_t	rval;
7070 
7071 	if (node) {
7072 		/* Check for base node */
7073 		if (node == &port->node_base) {
7074 			/* just flush base node */
7075 			(void) emlxs_tx_node_flush(port, &port->node_base,
7076 			    0, 0, 0);
7077 			(void) emlxs_chipq_node_flush(port, 0,
7078 			    &port->node_base, 0);
7079 
7080 			port->did = 0;
7081 
7082 			/* Return now */
7083 			return (1);
7084 		}
7085 
7086 		rpi = (uint16_t)node->nlp_Rpi;
7087 
7088 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7089 		    "unreg_node:%p  rpi=%d", node, rpi);
7090 
7091 		/* This node must be (0xFFFFFE) which registered by vport */
7092 		if (rpi == 0) {
7093 			emlxs_node_rm(port, node);
7094 			return (0);
7095 		}
7096 
7097 	} else {	/* Unreg all nodes */
7098 		rpi = 0xffff;
7099 
7100 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7101 		    "unreg_node: All");
7102 	}
7103 
7104 	if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
7105 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7106 		    "unreg_node:failed. Unable to allocate mbox");
7107 		return (1);
7108 	}
7109 
7110 	mb = (MAILBOX *)mbq->mbox;
7111 	mb->un.varUnregLogin.rpi = rpi;
7112 	mb->un.varUnregLogin.vpi = port->vpip->VPI;
7113 
7114 	mb->mbxCommand = MBX_UNREG_LOGIN;
7115 	mb->mbxOwner = OWN_HOST;
7116 	mbq->sbp = (void *)sbp;
7117 	mbq->ubp = (void *)ubp;
7118 	mbq->iocbq = (void *)iocbq;
7119 	mbq->mbox_cmpl = emlxs_sli3_unreg_node_mbcmpl;
7120 	mbq->context = (void *)node;
7121 	mbq->port = (void *)port;
7122 
7123 	rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
7124 	if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
7125 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7126 		    "unreg_node:failed. Unable to send request.");
7127 
7128 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
7129 		return (1);
7130 	}
7131 
7132 	return (0);
7133 
7134 } /* emlxs_sli3_unreg_node() */
7135