xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_mbox.c (revision b6805bf78d2bbbeeaea8909a05623587b42d58b3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_MBOX_C);
32 
33 
34 
35 emlxs_table_t emlxs_mb_cmd_table[] = {
36 	{MBX_SHUTDOWN, "SHUTDOWN"},
37 	{MBX_LOAD_SM, "LOAD_SM"},
38 	{MBX_READ_NV, "READ_NV"},
39 	{MBX_WRITE_NV, "WRITE_NV"},
40 	{MBX_RUN_BIU_DIAG, "RUN_BIU_DIAG"},
41 	{MBX_INIT_LINK, "INIT_LINK"},
42 	{MBX_DOWN_LINK, "DOWN_LINK"},
43 	{MBX_CONFIG_LINK, "CONFIG_LINK"},
44 	{MBX_PART_SLIM, "PART_SLIM"},
45 	{MBX_CONFIG_RING, "CONFIG_RING"},
46 	{MBX_RESET_RING, "RESET_RING"},
47 	{MBX_READ_CONFIG, "READ_CONFIG"},
48 	{MBX_READ_RCONFIG, "READ_RCONFIG"},
49 	{MBX_READ_SPARM, "READ_SPARM"},
50 	{MBX_READ_STATUS, "READ_STATUS"},
51 	{MBX_READ_RPI, "READ_RPI"},
52 	{MBX_READ_XRI, "READ_XRI"},
53 	{MBX_READ_REV, "READ_REV"},
54 	{MBX_READ_LNK_STAT, "READ_LNK_STAT"},
55 	{MBX_REG_LOGIN, "REG_LOGIN"},
56 	{MBX_UNREG_LOGIN, "UNREG_RPI"},
57 	{MBX_READ_LA, "READ_LA"},
58 	{MBX_CLEAR_LA, "CLEAR_LA"},
59 	{MBX_DUMP_MEMORY, "DUMP_MEMORY"},
60 	{MBX_DUMP_CONTEXT, "DUMP_CONTEXT"},
61 	{MBX_RUN_DIAGS, "RUN_DIAGS"},
62 	{MBX_RESTART, "RESTART"},
63 	{MBX_UPDATE_CFG, "UPDATE_CFG"},
64 	{MBX_DOWN_LOAD, "DOWN_LOAD"},
65 	{MBX_DEL_LD_ENTRY, "DEL_LD_ENTRY"},
66 	{MBX_RUN_PROGRAM, "RUN_PROGRAM"},
67 	{MBX_SET_MASK, "SET_MASK"},
68 	{MBX_SET_VARIABLE, "SET_VARIABLE"},
69 	{MBX_UNREG_D_ID, "UNREG_D_ID"},
70 	{MBX_KILL_BOARD, "KILL_BOARD"},
71 	{MBX_CONFIG_FARP, "CONFIG_FARP"},
72 	{MBX_LOAD_AREA, "LOAD_AREA"},
73 	{MBX_RUN_BIU_DIAG64, "RUN_BIU_DIAG64"},
74 	{MBX_CONFIG_PORT, "CONFIG_PORT"},
75 	{MBX_READ_SPARM64, "READ_SPARM64"},
76 	{MBX_READ_RPI64, "READ_RPI64"},
77 	{MBX_CONFIG_MSI, "CONFIG_MSI"},
78 	{MBX_CONFIG_MSIX, "CONFIG_MSIX"},
79 	{MBX_REG_LOGIN64, "REG_RPI"},
80 	{MBX_READ_LA64, "READ_LA64"},
81 	{MBX_FLASH_WR_ULA, "FLASH_WR_ULA"},
82 	{MBX_SET_DEBUG, "SET_DEBUG"},
83 	{MBX_GET_DEBUG, "GET_DEBUG"},
84 	{MBX_LOAD_EXP_ROM, "LOAD_EXP_ROM"},
85 	{MBX_BEACON, "BEACON"},
86 	{MBX_CONFIG_HBQ, "CONFIG_HBQ"},	/* SLI3 */
87 	{MBX_REG_VPI, "REG_VPI"},	/* NPIV */
88 	{MBX_UNREG_VPI, "UNREG_VPI"},	/* NPIV */
89 	{MBX_ASYNC_EVENT, "ASYNC_EVENT"},
90 	{MBX_HEARTBEAT, "HEARTBEAT"},
91 	{MBX_READ_EVENT_LOG_STATUS, "READ_EVENT_LOG_STATUS"},
92 	{MBX_READ_EVENT_LOG, "READ_EVENT_LOG"},
93 	{MBX_WRITE_EVENT_LOG, "WRITE_EVENT_LOG"},
94 	{MBX_NV_LOG, "NV_LOG"},
95 	{MBX_PORT_CAPABILITIES, "PORT_CAPABILITIES"},
96 	{MBX_IOV_CONTROL, "IOV_CONTROL"},
97 	{MBX_IOV_MBX, "IOV_MBX"},
98 	{MBX_SLI_CONFIG, "SLI_CONFIG"},
99 	{MBX_REQUEST_FEATURES, "REQUEST_FEATURES"},
100 	{MBX_RESUME_RPI, "RESUME_RPI"},
101 	{MBX_REG_VFI, "REG_VFI"},
102 	{MBX_REG_FCFI, "REG_FCFI"},
103 	{MBX_UNREG_VFI, "UNREG_VFI"},
104 	{MBX_UNREG_FCFI, "UNREG_FCFI"},
105 	{MBX_INIT_VFI, "INIT_VFI"},
106 	{MBX_INIT_VPI, "INIT_VPI"}
107 };	/* emlxs_mb_cmd_table */
108 
109 
110 /* SLI4 */
111 /*ARGSUSED*/
112 extern void
113 emlxs_mb_resetport(emlxs_hba_t *hba, MAILBOXQ *mbq)
114 {
115 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
116 
117 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
118 	mbq->nonembed = NULL;
119 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
120 	mbq->port = (void *)&PPORT;
121 
122 	/*
123 	 * Signifies an embedded command
124 	 */
125 	mb4->un.varSLIConfig.be.embedded = 1;
126 
127 	mb4->mbxCommand = MBX_SLI_CONFIG;
128 	mb4->mbxOwner = OWN_HOST;
129 	mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
130 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
131 	    IOCTL_SUBSYSTEM_COMMON;
132 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_RESET;
133 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
134 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 0;
135 
136 	return;
137 
138 } /* emlxs_mb_resetport() */
139 
140 
141 /* SLI4 */
142 /*ARGSUSED*/
143 extern void
144 emlxs_mb_request_features(emlxs_hba_t *hba, MAILBOXQ *mbq)
145 {
146 	emlxs_config_t	*cfg = &CFG;
147 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
148 
149 	hba->flag &= ~FC_NPIV_ENABLED;
150 
151 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
152 	mbq->nonembed = NULL;
153 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
154 	mbq->port = (void *)&PPORT;
155 
156 	mb4->mbxCommand = MBX_REQUEST_FEATURES;
157 	mb4->mbxOwner = OWN_HOST;
158 	mb4->un.varReqFeatures.featuresRequested |=
159 	    SLI4_FEATURE_FCP_INITIATOR;
160 
161 	if (cfg[CFG_NPIV_ENABLE].current) {
162 		mb4->un.varReqFeatures.featuresRequested |=
163 		    SLI4_FEATURE_NPIV;
164 	}
165 
166 } /* emlxs_mb_request_features() */
167 
168 
169 /* SLI4 */
170 /*ARGSUSED*/
171 extern void
172 emlxs_mb_noop(emlxs_hba_t *hba, MAILBOXQ *mbq)
173 {
174 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
175 	IOCTL_COMMON_NOP *nop;
176 
177 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
178 	mbq->nonembed = NULL;
179 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
180 	mbq->port = (void *)&PPORT;
181 
182 	/*
183 	 * Signifies an embedded command
184 	 */
185 	mb4->un.varSLIConfig.be.embedded = 1;
186 
187 	mb4->mbxCommand = MBX_SLI_CONFIG;
188 	mb4->mbxOwner = OWN_HOST;
189 	mb4->un.varSLIConfig.be.payload_length = sizeof (IOCTL_COMMON_NOP) +
190 	    IOCTL_HEADER_SZ;
191 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
192 	    IOCTL_SUBSYSTEM_COMMON;
193 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_NOP;
194 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
195 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
196 	    sizeof (IOCTL_COMMON_NOP);
197 	nop = (IOCTL_COMMON_NOP *)&mb4->un.varSLIConfig.payload;
198 	nop->params.request.context = -1;
199 
200 	return;
201 
202 } /* emlxs_mb_noop() */
203 
204 
205 /* SLI4 */
206 /*ARGSUSED*/
207 extern int
208 emlxs_mbext_noop(emlxs_hba_t *hba, MAILBOXQ *mbq)
209 {
210 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
211 	IOCTL_COMMON_NOP *nop;
212 	MATCHMAP *mp;
213 	mbox_req_hdr_t	*hdr_req;
214 
215 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
216 
217 	if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0) {
218 		return (1);
219 	}
220 	/*
221 	 * Save address for completion
222 	 * Signifies a non-embedded command
223 	 */
224 	mb4->un.varSLIConfig.be.embedded = 0;
225 	mbq->nonembed = (void *)mp;
226 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
227 	mbq->port = (void *)&PPORT;
228 
229 	mb4->mbxCommand = MBX_SLI_CONFIG;
230 	mb4->mbxOwner = OWN_HOST;
231 
232 	hdr_req = (mbox_req_hdr_t *)mp->virt;
233 	hdr_req->subsystem = IOCTL_SUBSYSTEM_COMMON;
234 	hdr_req->opcode = COMMON_OPCODE_NOP;
235 	hdr_req->timeout = 0;
236 	hdr_req->req_length = sizeof (IOCTL_COMMON_NOP);
237 	nop = (IOCTL_COMMON_NOP *)(hdr_req + 1);
238 	nop->params.request.context = -1;
239 
240 	return (0);
241 
242 } /* emlxs_mbext_noop() */
243 
244 
245 /* SLI4 */
246 /*ARGSUSED*/
247 extern void
248 emlxs_mb_eq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
249 {
250 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
251 	IOCTL_COMMON_EQ_CREATE *qp;
252 	uint64_t	addr;
253 
254 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
255 	mbq->nonembed = NULL;
256 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
257 	mbq->port = (void *)&PPORT;
258 
259 	/*
260 	 * Signifies an embedded command
261 	 */
262 	mb4->un.varSLIConfig.be.embedded = 1;
263 
264 	mb4->mbxCommand = MBX_SLI_CONFIG;
265 	mb4->mbxOwner = OWN_HOST;
266 	mb4->un.varSLIConfig.be.payload_length =
267 	    sizeof (IOCTL_COMMON_EQ_CREATE) + IOCTL_HEADER_SZ;
268 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
269 	    IOCTL_SUBSYSTEM_COMMON;
270 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_EQ_CREATE;
271 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
272 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
273 	    sizeof (IOCTL_COMMON_EQ_CREATE);
274 	qp = (IOCTL_COMMON_EQ_CREATE *)&mb4->un.varSLIConfig.payload;
275 
276 	/* 1024 * 4 bytes = 4K */
277 	qp->params.request.EQContext.Count = EQ_ELEMENT_COUNT_1024;
278 	qp->params.request.EQContext.Valid = 1;
279 	qp->params.request.EQContext.NoDelay = 0;
280 	qp->params.request.EQContext.DelayMult = EQ_DELAY_MULT;
281 
282 	addr = hba->sli.sli4.eq[num].addr.phys;
283 	qp->params.request.NumPages = 1;
284 	qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
285 	qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
286 
287 	return;
288 
289 } /* emlxs_mb_eq_create() */
290 
291 
292 /* SLI4 */
293 /*ARGSUSED*/
294 extern void
295 emlxs_mb_cq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
296 {
297 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
298 	IOCTL_COMMON_CQ_CREATE *qp;
299 	uint64_t	addr;
300 
301 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
302 	mbq->nonembed = NULL;
303 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
304 	mbq->port = (void *)&PPORT;
305 
306 	/*
307 	 * Signifies an embedded command
308 	 */
309 	mb4->un.varSLIConfig.be.embedded = 1;
310 
311 	mb4->mbxCommand = MBX_SLI_CONFIG;
312 	mb4->mbxOwner = OWN_HOST;
313 	mb4->un.varSLIConfig.be.payload_length =
314 	    sizeof (IOCTL_COMMON_CQ_CREATE) + IOCTL_HEADER_SZ;
315 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
316 	    IOCTL_SUBSYSTEM_COMMON;
317 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_CQ_CREATE;
318 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
319 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
320 	    sizeof (IOCTL_COMMON_CQ_CREATE);
321 	qp = (IOCTL_COMMON_CQ_CREATE *)&mb4->un.varSLIConfig.payload;
322 
323 	/* 256 * 16 bytes = 4K */
324 	qp->params.request.CQContext.Count = CQ_ELEMENT_COUNT_256;
325 	qp->params.request.CQContext.EQId = hba->sli.sli4.cq[num].eqid;
326 	qp->params.request.CQContext.Valid = 1;
327 	qp->params.request.CQContext.Eventable = 1;
328 	qp->params.request.CQContext.NoDelay = 0;
329 
330 	addr = hba->sli.sli4.cq[num].addr.phys;
331 	qp->params.request.NumPages = 1;
332 	qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
333 	qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
334 
335 	return;
336 
337 } /* emlxs_mb_cq_create() */
338 
339 
340 /* SLI4 */
341 /*ARGSUSED*/
342 extern void
343 emlxs_mb_wq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
344 {
345 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
346 	IOCTL_FCOE_WQ_CREATE *qp;
347 	uint64_t addr;
348 	int i;
349 
350 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
351 	mbq->nonembed = NULL;
352 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
353 	mbq->port = (void *)&PPORT;
354 
355 	/*
356 	 * Signifies an embedded command
357 	 */
358 	mb4->un.varSLIConfig.be.embedded = 1;
359 
360 	mb4->mbxCommand = MBX_SLI_CONFIG;
361 	mb4->mbxOwner = OWN_HOST;
362 	mb4->un.varSLIConfig.be.payload_length =
363 	    sizeof (IOCTL_FCOE_WQ_CREATE) + IOCTL_HEADER_SZ;
364 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
365 	    IOCTL_SUBSYSTEM_FCOE;
366 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = FCOE_OPCODE_WQ_CREATE;
367 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
368 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
369 	    sizeof (IOCTL_FCOE_WQ_CREATE);
370 
371 	addr = hba->sli.sli4.wq[num].addr.phys;
372 	qp = (IOCTL_FCOE_WQ_CREATE *)&mb4->un.varSLIConfig.payload;
373 
374 	qp->params.request.CQId = hba->sli.sli4.wq[num].cqid;
375 
376 	qp->params.request.NumPages = EMLXS_NUM_WQ_PAGES;
377 	for (i = 0; i < EMLXS_NUM_WQ_PAGES; i++) {
378 		qp->params.request.Pages[i].addrLow = PADDR_LO(addr);
379 		qp->params.request.Pages[i].addrHigh = PADDR_HI(addr);
380 		addr += 4096;
381 	}
382 
383 	return;
384 
385 } /* emlxs_mb_wq_create() */
386 
387 
388 /* SLI4 */
389 /*ARGSUSED*/
390 extern void
391 emlxs_mb_rq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
392 {
393 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
394 	IOCTL_FCOE_RQ_CREATE *qp;
395 	uint64_t	addr;
396 
397 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
398 	mbq->nonembed = NULL;
399 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
400 	mbq->port = (void *)&PPORT;
401 
402 	/*
403 	 * Signifies an embedded command
404 	 */
405 	mb4->un.varSLIConfig.be.embedded = 1;
406 
407 	mb4->mbxCommand = MBX_SLI_CONFIG;
408 	mb4->mbxOwner = OWN_HOST;
409 	mb4->un.varSLIConfig.be.payload_length =
410 	    sizeof (IOCTL_FCOE_RQ_CREATE) + IOCTL_HEADER_SZ;
411 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
412 	    IOCTL_SUBSYSTEM_FCOE;
413 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = FCOE_OPCODE_RQ_CREATE;
414 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
415 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
416 	    sizeof (IOCTL_FCOE_RQ_CREATE);
417 	addr = hba->sli.sli4.rq[num].addr.phys;
418 
419 	qp = (IOCTL_FCOE_RQ_CREATE *)&mb4->un.varSLIConfig.payload;
420 
421 	qp->params.request.RQContext.RQSize	= RQ_DEPTH_EXPONENT;
422 	qp->params.request.RQContext.BufferSize	= RQB_DATA_SIZE;
423 	qp->params.request.RQContext.CQIdRecv	= hba->sli.sli4.rq[num].cqid;
424 
425 	qp->params.request.NumPages = 1;
426 	qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
427 	qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
428 
429 	return;
430 
431 } /* emlxs_mb_rq_create() */
432 
433 
434 /* SLI4 */
435 /*ARGSUSED*/
436 extern void
437 emlxs_mb_mq_create(emlxs_hba_t *hba, MAILBOXQ *mbq)
438 {
439 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
440 	IOCTL_COMMON_MQ_CREATE *qp;
441 	uint64_t	addr;
442 
443 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
444 	mbq->nonembed = NULL;
445 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
446 	mbq->port = (void *)&PPORT;
447 
448 	/*
449 	 * Signifies an embedded command
450 	 */
451 	mb4->un.varSLIConfig.be.embedded = 1;
452 
453 	mb4->mbxCommand = MBX_SLI_CONFIG;
454 	mb4->mbxOwner = OWN_HOST;
455 	mb4->un.varSLIConfig.be.payload_length =
456 	    sizeof (IOCTL_COMMON_MQ_CREATE) + IOCTL_HEADER_SZ;
457 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
458 	    IOCTL_SUBSYSTEM_COMMON;
459 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_MQ_CREATE;
460 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
461 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
462 	    sizeof (IOCTL_COMMON_MQ_CREATE);
463 
464 	addr = hba->sli.sli4.mq.addr.phys;
465 	qp = (IOCTL_COMMON_MQ_CREATE *)&mb4->un.varSLIConfig.payload;
466 
467 	qp->params.request.MQContext.Size = MQ_ELEMENT_COUNT_16;
468 	qp->params.request.MQContext.Valid = 1;
469 	qp->params.request.MQContext.CQId = hba->sli.sli4.mq.cqid;
470 
471 	qp->params.request.NumPages = 1;
472 	qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
473 	qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
474 
475 	return;
476 
477 } /* emlxs_mb_mq_create() */
478 
479 
480 /* SLI4 */
481 /*ARGSUSED*/
482 extern void
483 emlxs_mb_mcc_create_ext(emlxs_hba_t *hba, MAILBOXQ *mbq)
484 {
485 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
486 	IOCTL_COMMON_MCC_CREATE_EXT *qp;
487 	uint64_t	addr;
488 
489 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
490 	mbq->nonembed = NULL;
491 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
492 	mbq->port = (void *)&PPORT;
493 
494 	/*
495 	 * Signifies an embedded command
496 	 */
497 	mb4->un.varSLIConfig.be.embedded = 1;
498 
499 	mb4->mbxCommand = MBX_SLI_CONFIG;
500 	mb4->mbxOwner = OWN_HOST;
501 	mb4->un.varSLIConfig.be.payload_length =
502 	    sizeof (IOCTL_COMMON_MQ_CREATE) + IOCTL_HEADER_SZ;
503 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
504 	    IOCTL_SUBSYSTEM_COMMON;
505 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
506 	    COMMON_OPCODE_MCC_CREATE_EXT;
507 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
508 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
509 	    sizeof (IOCTL_COMMON_MCC_CREATE_EXT);
510 
511 	addr = hba->sli.sli4.mq.addr.phys;
512 	qp = (IOCTL_COMMON_MCC_CREATE_EXT *)&mb4->un.varSLIConfig.payload;
513 
514 	qp->params.request.num_pages = 1;
515 	qp->params.request.async_event_bitmap =
516 	    ASYNC_LINK_EVENT | ASYNC_FCF_EVENT | ASYNC_GROUP5_EVENT;
517 	qp->params.request.context.Size = MQ_ELEMENT_COUNT_16;
518 	qp->params.request.context.Valid = 1;
519 	qp->params.request.context.CQId = hba->sli.sli4.mq.cqid;
520 
521 	qp->params.request.pages[0].addrLow = PADDR_LO(addr);
522 	qp->params.request.pages[0].addrHigh = PADDR_HI(addr);
523 
524 	return;
525 
526 } /* emlxs_mb_mcc_create_ext() */
527 
528 
529 /*ARGSUSED*/
530 extern void
531 emlxs_mb_async_event(emlxs_hba_t *hba, MAILBOXQ *mbq)
532 {
533 	MAILBOX *mb = (MAILBOX *)mbq;
534 
535 	bzero((void *) mb, MAILBOX_CMD_BSIZE);
536 
537 	mb->mbxCommand = MBX_ASYNC_EVENT;
538 	mb->mbxOwner = OWN_HOST;
539 	mb->un.varWords[0] = hba->channel_els;
540 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
541 	mbq->port = (void *)&PPORT;
542 
543 	return;
544 
545 } /* emlxs_mb_async_event() */
546 
547 
548 /*ARGSUSED*/
549 extern void
550 emlxs_mb_heartbeat(emlxs_hba_t *hba, MAILBOXQ *mbq)
551 {
552 	MAILBOX *mb = (MAILBOX *)mbq;
553 
554 	bzero((void *) mb, MAILBOX_CMD_BSIZE);
555 
556 	mb->mbxCommand = MBX_HEARTBEAT;
557 	mb->mbxOwner = OWN_HOST;
558 	mbq->mbox_cmpl = NULL; /* no cmpl needed for hbeat */
559 	mbq->port = (void *)&PPORT;
560 
561 	return;
562 
563 } /* emlxs_mb_heartbeat() */
564 
565 
566 #ifdef MSI_SUPPORT
567 
568 /*ARGSUSED*/
569 extern void
570 emlxs_mb_config_msi(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t *intr_map,
571     uint32_t intr_count)
572 {
573 	MAILBOX *mb = (MAILBOX *)mbq;
574 	uint16_t i;
575 	uint32_t mask;
576 
577 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
578 
579 	mb->mbxCommand = MBX_CONFIG_MSI;
580 
581 	/* Set the default message id to zero */
582 	mb->un.varCfgMSI.defaultPresent = 1;
583 	mb->un.varCfgMSI.defaultMessageNumber = 0;
584 
585 	for (i = 1; i < intr_count; i++) {
586 		mask = intr_map[i];
587 
588 		mb->un.varCfgMSI.attConditions |= mask;
589 
590 #ifdef EMLXS_BIG_ENDIAN
591 		if (mask & HA_R0ATT) {
592 			mb->un.varCfgMSI.messageNumberByHA[3] = i;
593 		}
594 		if (mask & HA_R1ATT) {
595 			mb->un.varCfgMSI.messageNumberByHA[7] = i;
596 		}
597 		if (mask & HA_R2ATT) {
598 			mb->un.varCfgMSI.messageNumberByHA[11] = i;
599 		}
600 		if (mask & HA_R3ATT) {
601 			mb->un.varCfgMSI.messageNumberByHA[15] = i;
602 		}
603 		if (mask & HA_LATT) {
604 			mb->un.varCfgMSI.messageNumberByHA[29] = i;
605 		}
606 		if (mask & HA_MBATT) {
607 			mb->un.varCfgMSI.messageNumberByHA[30] = i;
608 		}
609 		if (mask & HA_ERATT) {
610 			mb->un.varCfgMSI.messageNumberByHA[31] = i;
611 		}
612 #endif	/* EMLXS_BIG_ENDIAN */
613 
614 #ifdef EMLXS_LITTLE_ENDIAN
615 		/* Accounts for half word swap of LE architecture */
616 		if (mask & HA_R0ATT) {
617 			mb->un.varCfgMSI.messageNumberByHA[2] = i;
618 		}
619 		if (mask & HA_R1ATT) {
620 			mb->un.varCfgMSI.messageNumberByHA[6] = i;
621 		}
622 		if (mask & HA_R2ATT) {
623 			mb->un.varCfgMSI.messageNumberByHA[10] = i;
624 		}
625 		if (mask & HA_R3ATT) {
626 			mb->un.varCfgMSI.messageNumberByHA[14] = i;
627 		}
628 		if (mask & HA_LATT) {
629 			mb->un.varCfgMSI.messageNumberByHA[28] = i;
630 		}
631 		if (mask & HA_MBATT) {
632 			mb->un.varCfgMSI.messageNumberByHA[31] = i;
633 		}
634 		if (mask & HA_ERATT) {
635 			mb->un.varCfgMSI.messageNumberByHA[30] = i;
636 		}
637 #endif	/* EMLXS_LITTLE_ENDIAN */
638 	}
639 
640 	mb->mbxOwner = OWN_HOST;
641 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
642 	mbq->port = (void *)&PPORT;
643 
644 	return;
645 
646 } /* emlxs_mb_config_msi() */
647 
648 
649 /*ARGSUSED*/
650 extern void
651 emlxs_mb_config_msix(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t *intr_map,
652     uint32_t intr_count)
653 {
654 	MAILBOX *mb = (MAILBOX *)mbq;
655 	uint8_t i;
656 	uint32_t mask;
657 
658 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
659 
660 	mb->mbxCommand = MBX_CONFIG_MSIX;
661 
662 	/* Set the default message id to zero */
663 	mb->un.varCfgMSIX.defaultPresent = 1;
664 	mb->un.varCfgMSIX.defaultMessageNumber = 0;
665 
666 	for (i = 1; i < intr_count; i++) {
667 		mask = intr_map[i];
668 
669 		mb->un.varCfgMSIX.attConditions1 |= mask;
670 
671 #ifdef EMLXS_BIG_ENDIAN
672 		if (mask & HA_R0ATT) {
673 			mb->un.varCfgMSIX.messageNumberByHA[3] = i;
674 		}
675 		if (mask & HA_R1ATT) {
676 			mb->un.varCfgMSIX.messageNumberByHA[7] = i;
677 		}
678 		if (mask & HA_R2ATT) {
679 			mb->un.varCfgMSIX.messageNumberByHA[11] = i;
680 		}
681 		if (mask & HA_R3ATT) {
682 			mb->un.varCfgMSIX.messageNumberByHA[15] = i;
683 		}
684 		if (mask & HA_LATT) {
685 			mb->un.varCfgMSIX.messageNumberByHA[29] = i;
686 		}
687 		if (mask & HA_MBATT) {
688 			mb->un.varCfgMSIX.messageNumberByHA[30] = i;
689 		}
690 		if (mask & HA_ERATT) {
691 			mb->un.varCfgMSIX.messageNumberByHA[31] = i;
692 		}
693 #endif	/* EMLXS_BIG_ENDIAN */
694 
695 #ifdef EMLXS_LITTLE_ENDIAN
696 		/* Accounts for word swap of LE architecture */
697 		if (mask & HA_R0ATT) {
698 			mb->un.varCfgMSIX.messageNumberByHA[0] = i;
699 		}
700 		if (mask & HA_R1ATT) {
701 			mb->un.varCfgMSIX.messageNumberByHA[4] = i;
702 		}
703 		if (mask & HA_R2ATT) {
704 			mb->un.varCfgMSIX.messageNumberByHA[8] = i;
705 		}
706 		if (mask & HA_R3ATT) {
707 			mb->un.varCfgMSIX.messageNumberByHA[12] = i;
708 		}
709 		if (mask & HA_LATT) {
710 			mb->un.varCfgMSIX.messageNumberByHA[30] = i;
711 		}
712 		if (mask & HA_MBATT) {
713 			mb->un.varCfgMSIX.messageNumberByHA[29] = i;
714 		}
715 		if (mask & HA_ERATT) {
716 			mb->un.varCfgMSIX.messageNumberByHA[28] = i;
717 		}
718 #endif	/* EMLXS_LITTLE_ENDIAN */
719 	}
720 
721 	mb->mbxOwner = OWN_HOST;
722 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
723 	mbq->port = (void *)&PPORT;
724 
725 	return;
726 
727 } /* emlxs_mb_config_msix() */
728 
729 
730 #endif	/* MSI_SUPPORT */
731 
732 
733 /*ARGSUSED*/
734 extern void
735 emlxs_mb_reset_ring(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t ringno)
736 {
737 	MAILBOX *mb = (MAILBOX *)mbq;
738 
739 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
740 
741 	mb->mbxCommand = MBX_RESET_RING;
742 	mb->un.varRstRing.ring_no = ringno;
743 	mb->mbxOwner = OWN_HOST;
744 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
745 	mbq->port = (void *)&PPORT;
746 
747 	return;
748 
749 } /* emlxs_mb_reset_ring() */
750 
751 
752 /*ARGSUSED*/
753 extern void
754 emlxs_mb_dump_vpd(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset)
755 {
756 
757 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
758 		MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
759 
760 		/* Clear the local dump_region */
761 		bzero(hba->sli.sli4.dump_region.virt,
762 		    hba->sli.sli4.dump_region.size);
763 
764 		bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
765 
766 		mb4->mbxCommand = MBX_DUMP_MEMORY;
767 		mb4->un.varDmp4.type = DMP_NV_PARAMS;
768 		mb4->un.varDmp4.entry_index = offset;
769 		mb4->un.varDmp4.region_id = DMP_VPD_REGION;
770 
771 		mb4->un.varDmp4.available_cnt = hba->sli.sli4.dump_region.size;
772 		mb4->un.varDmp4.addrHigh =
773 		    PADDR_HI(hba->sli.sli4.dump_region.phys);
774 		mb4->un.varDmp4.addrLow =
775 		    PADDR_LO(hba->sli.sli4.dump_region.phys);
776 		mb4->un.varDmp4.rsp_cnt = 0;
777 
778 		mb4->mbxOwner = OWN_HOST;
779 
780 	} else {
781 		MAILBOX *mb = (MAILBOX *)mbq;
782 
783 		bzero((void *)mb, MAILBOX_CMD_BSIZE);
784 
785 		mb->mbxCommand = MBX_DUMP_MEMORY;
786 		mb->un.varDmp.cv = 1;
787 		mb->un.varDmp.type = DMP_NV_PARAMS;
788 		mb->un.varDmp.entry_index = offset;
789 		mb->un.varDmp.region_id = DMP_VPD_REGION;
790 
791 		/* limited by mailbox size */
792 		mb->un.varDmp.word_cnt = DMP_VPD_DUMP_WCOUNT;
793 
794 		mb->un.varDmp.co = 0;
795 		mb->un.varDmp.resp_offset = 0;
796 		mb->mbxOwner = OWN_HOST;
797 	}
798 
799 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
800 	mbq->port = (void *)&PPORT;
801 
802 } /* emlxs_mb_dump_vpd() */
803 
804 
805 /* SLI4 */
806 /*ARGSUSED*/
807 extern void
808 emlxs_mb_dump_fcoe(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset)
809 {
810 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
811 
812 	if (hba->sli_mode < EMLXS_HBA_SLI4_MODE) {
813 		return;
814 	}
815 
816 	/* Clear the local dump_region */
817 	bzero(hba->sli.sli4.dump_region.virt,
818 	    hba->sli.sli4.dump_region.size);
819 
820 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
821 
822 	mb4->mbxCommand = MBX_DUMP_MEMORY;
823 	mb4->un.varDmp4.type = DMP_NV_PARAMS;
824 	mb4->un.varDmp4.entry_index = offset;
825 	mb4->un.varDmp4.region_id = DMP_FCOE_REGION;
826 
827 	mb4->un.varDmp4.available_cnt = hba->sli.sli4.dump_region.size;
828 	mb4->un.varDmp4.addrHigh =
829 	    PADDR_HI(hba->sli.sli4.dump_region.phys);
830 	mb4->un.varDmp4.addrLow =
831 	    PADDR_LO(hba->sli.sli4.dump_region.phys);
832 	mb4->un.varDmp4.rsp_cnt = 0;
833 
834 	mb4->mbxOwner = OWN_HOST;
835 
836 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
837 	mbq->port = (void *)&PPORT;
838 
839 } /* emlxs_mb_dump_fcoe() */
840 
841 
842 /*ARGSUSED*/
843 extern void
844 emlxs_mb_dump(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset, uint32_t words)
845 {
846 
847 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
848 		MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
849 
850 		/* Clear the local dump_region */
851 		bzero(hba->sli.sli4.dump_region.virt,
852 		    hba->sli.sli4.dump_region.size);
853 
854 		bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
855 
856 		mb4->mbxCommand = MBX_DUMP_MEMORY;
857 		mb4->un.varDmp4.type = DMP_MEM_REG;
858 		mb4->un.varDmp4.entry_index = offset;
859 		mb4->un.varDmp4.region_id = 0;
860 
861 		mb4->un.varDmp4.available_cnt = min((words*4),
862 		    hba->sli.sli4.dump_region.size);
863 		mb4->un.varDmp4.addrHigh =
864 		    PADDR_HI(hba->sli.sli4.dump_region.phys);
865 		mb4->un.varDmp4.addrLow =
866 		    PADDR_LO(hba->sli.sli4.dump_region.phys);
867 		mb4->un.varDmp4.rsp_cnt = 0;
868 
869 		mb4->mbxOwner = OWN_HOST;
870 
871 	} else {
872 
873 		MAILBOX *mb = (MAILBOX *)mbq;
874 
875 		bzero((void *)mb, MAILBOX_CMD_BSIZE);
876 
877 		mb->mbxCommand = MBX_DUMP_MEMORY;
878 		mb->un.varDmp.type = DMP_MEM_REG;
879 		mb->un.varDmp.word_cnt = words;
880 		mb->un.varDmp.base_adr = offset;
881 
882 		mb->un.varDmp.co = 0;
883 		mb->un.varDmp.resp_offset = 0;
884 		mb->mbxOwner = OWN_HOST;
885 	}
886 
887 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
888 	mbq->port = (void *)&PPORT;
889 
890 	return;
891 
892 } /* emlxs_mb_dump() */
893 
894 
895 /*
896  *  emlxs_mb_read_nv  Issue a READ NVPARAM mailbox command
897  */
898 /*ARGSUSED*/
899 extern void
900 emlxs_mb_read_nv(emlxs_hba_t *hba, MAILBOXQ *mbq)
901 {
902 	MAILBOX *mb = (MAILBOX *)mbq;
903 
904 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
905 
906 	mb->mbxCommand = MBX_READ_NV;
907 	mb->mbxOwner = OWN_HOST;
908 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
909 	mbq->port = (void *)&PPORT;
910 
911 } /* emlxs_mb_read_nv() */
912 
913 
914 /*
915  * emlxs_mb_read_rev  Issue a READ REV mailbox command
916  */
917 /*ARGSUSED*/
918 extern void
919 emlxs_mb_read_rev(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t v3)
920 {
921 	MAILBOX *mb = (MAILBOX *)mbq;
922 
923 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
924 		bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
925 		mbq->nonembed = NULL;
926 	} else {
927 		bzero((void *)mb, MAILBOX_CMD_BSIZE);
928 
929 		mb->un.varRdRev.cv = 1;
930 
931 		if (v3) {
932 			mb->un.varRdRev.cv3 = 1;
933 		}
934 	}
935 
936 	mb->mbxCommand = MBX_READ_REV;
937 	mb->mbxOwner = OWN_HOST;
938 	mbq->mbox_cmpl = NULL;
939 	mbq->port = (void *)&PPORT;
940 
941 } /* emlxs_mb_read_rev() */
942 
943 
944 /*
945  * emlxs_mb_run_biu_diag  Issue a RUN_BIU_DIAG mailbox command
946  */
947 /*ARGSUSED*/
948 extern uint32_t
949 emlxs_mb_run_biu_diag(emlxs_hba_t *hba, MAILBOXQ *mbq, uint64_t out,
950     uint64_t in)
951 {
952 	MAILBOX *mb = (MAILBOX *)mbq;
953 
954 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
955 
956 	mb->mbxCommand = MBX_RUN_BIU_DIAG64;
957 	mb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize = MEM_ELSBUF_SIZE;
958 	mb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = PADDR_HI(out);
959 	mb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = PADDR_LO(out);
960 	mb->un.varBIUdiag.un.s2.rcv_bde64.tus.f.bdeSize = MEM_ELSBUF_SIZE;
961 	mb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = PADDR_HI(in);
962 	mb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = PADDR_LO(in);
963 	mb->mbxOwner = OWN_HOST;
964 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
965 	mbq->port = (void *)&PPORT;
966 
967 	return (0);
968 } /* emlxs_mb_run_biu_diag() */
969 
970 
971 /* This should only be called with active MBX_NOWAIT mailboxes */
972 void
973 emlxs_mb_retry(emlxs_hba_t *hba, MAILBOXQ *mbq)
974 {
975 	MAILBOX	*mb;
976 	MAILBOX	*mbox;
977 	int rc;
978 
979 	mbox = (MAILBOX *)emlxs_mem_get(hba, MEM_MBOX, 1);
980 	if (!mbox) {
981 		return;
982 	}
983 	mb = (MAILBOX *)mbq;
984 	bcopy((uint8_t *)mb, (uint8_t *)mbox, MAILBOX_CMD_BSIZE);
985 	mbox->mbxOwner = OWN_HOST;
986 	mbox->mbxStatus = 0;
987 
988 	mutex_enter(&EMLXS_PORT_LOCK);
989 
990 	HBASTATS.MboxCompleted++;
991 
992 	if (mb->mbxStatus != 0) {
993 		HBASTATS.MboxError++;
994 	} else {
995 		HBASTATS.MboxGood++;
996 	}
997 
998 	hba->mbox_mbq = NULL;
999 	hba->mbox_queue_flag = 0;
1000 
1001 	mutex_exit(&EMLXS_PORT_LOCK);
1002 
1003 	rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbox, MBX_NOWAIT, 0);
1004 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1005 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
1006 	}
1007 	return;
1008 
1009 } /* emlxs_mb_retry() */
1010 
1011 
1012 /* SLI3 */
1013 static uint32_t
1014 emlxs_read_la_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
1015 {
1016 	emlxs_port_t *port = (emlxs_port_t *)mbq->port;
1017 	MAILBOX *mb;
1018 	MAILBOXQ *mbox;
1019 	MATCHMAP *mp;
1020 	READ_LA_VAR la;
1021 	int i;
1022 	uint32_t  control;
1023 
1024 	mb = (MAILBOX *)mbq;
1025 	if (mb->mbxStatus) {
1026 		if (mb->mbxStatus == MBXERR_NO_RESOURCES) {
1027 			control = mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize;
1028 			if (control == 0) {
1029 				(void) emlxs_mb_read_la(hba, mbq);
1030 			}
1031 			emlxs_mb_retry(hba, mbq);
1032 			return (1);
1033 		}
1034 		/* Enable Link Attention interrupts */
1035 		mutex_enter(&EMLXS_PORT_LOCK);
1036 
1037 		if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1038 			hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1039 			WRITE_CSR_REG(hba, FC_HC_REG(hba),
1040 			    hba->sli.sli3.hc_copy);
1041 #ifdef FMA_SUPPORT
1042 			/* Access handle validation */
1043 			EMLXS_CHK_ACC_HANDLE(hba,
1044 			    hba->sli.sli3.csr_acc_handle);
1045 #endif  /* FMA_SUPPORT */
1046 		}
1047 
1048 		mutex_exit(&EMLXS_PORT_LOCK);
1049 		return (0);
1050 	}
1051 	bcopy((void *)&mb->un.varReadLA, (void *)&la, sizeof (READ_LA_VAR));
1052 
1053 	mp = (MATCHMAP *)mbq->bp;
1054 	if (mp) {
1055 		bcopy((caddr_t)mp->virt, (caddr_t)port->alpa_map, 128);
1056 	} else {
1057 		bzero((caddr_t)port->alpa_map, 128);
1058 	}
1059 
1060 	if (la.attType == AT_LINK_UP) {
1061 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_linkup_atten_msg,
1062 		    "tag=%d -> %d  ALPA=%x",
1063 		    (uint32_t)hba->link_event_tag,
1064 		    (uint32_t)la.eventTag,
1065 		    (uint32_t)la.granted_AL_PA);
1066 	} else {
1067 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_linkdown_atten_msg,
1068 		    "tag=%d -> %d  ALPA=%x",
1069 		    (uint32_t)hba->link_event_tag,
1070 		    (uint32_t)la.eventTag,
1071 		    (uint32_t)la.granted_AL_PA);
1072 	}
1073 
1074 	if (la.pb) {
1075 		hba->flag |= FC_BYPASSED_MODE;
1076 	} else {
1077 		hba->flag &= ~FC_BYPASSED_MODE;
1078 	}
1079 
1080 	if (hba->link_event_tag == la.eventTag) {
1081 		HBASTATS.LinkMultiEvent++;
1082 	} else if (hba->link_event_tag + 1 < la.eventTag) {
1083 		HBASTATS.LinkMultiEvent++;
1084 
1085 		/* Make sure link is declared down */
1086 		emlxs_linkdown(hba);
1087 	}
1088 
1089 	hba->link_event_tag = la.eventTag;
1090 	port->lip_type = 0;
1091 
1092 	/* If link not already up then declare it up now */
1093 	if ((la.attType == AT_LINK_UP) && (hba->state < FC_LINK_UP)) {
1094 
1095 #ifdef MENLO_SUPPORT
1096 		if ((hba->model_info.device_id == PCI_DEVICE_ID_LP21000_M) &&
1097 		    (hba->flag & (FC_ILB_MODE | FC_ELB_MODE))) {
1098 			la.topology = TOPOLOGY_LOOP;
1099 			la.granted_AL_PA = 0;
1100 			port->alpa_map[0] = 1;
1101 			port->alpa_map[1] = 0;
1102 			la.lipType = LT_PORT_INIT;
1103 		}
1104 #endif /* MENLO_SUPPORT */
1105 		/* Save the linkspeed */
1106 		hba->linkspeed = la.UlnkSpeed;
1107 
1108 		/* Check for old model adapters that only */
1109 		/* supported 1Gb */
1110 		if ((hba->linkspeed == 0) &&
1111 		    (hba->model_info.chip & EMLXS_DRAGONFLY_CHIP)) {
1112 			hba->linkspeed = LA_1GHZ_LINK;
1113 		}
1114 
1115 		if ((hba->topology = la.topology) == TOPOLOGY_LOOP) {
1116 			port->did = la.granted_AL_PA;
1117 			port->lip_type = la.lipType;
1118 			if (hba->flag & FC_SLIM2_MODE) {
1119 				i = la.un.lilpBde64.tus.f.bdeSize;
1120 			} else {
1121 				i = la.un.lilpBde.bdeSize;
1122 			}
1123 
1124 			if (i == 0) {
1125 				port->alpa_map[0] = 0;
1126 			} else {
1127 				uint8_t *alpa_map;
1128 				uint32_t j;
1129 
1130 				/* Check number of devices in map */
1131 				if (port->alpa_map[0] > 127) {
1132 					port->alpa_map[0] = 127;
1133 				}
1134 
1135 				alpa_map = (uint8_t *)port->alpa_map;
1136 
1137 				EMLXS_MSGF(EMLXS_CONTEXT,
1138 				    &emlxs_link_atten_msg,
1139 				    "alpa_map: %d device(s):      "
1140 				    "%02x %02x %02x %02x %02x %02x "
1141 				    "%02x", alpa_map[0], alpa_map[1],
1142 				    alpa_map[2], alpa_map[3],
1143 				    alpa_map[4], alpa_map[5],
1144 				    alpa_map[6], alpa_map[7]);
1145 
1146 				for (j = 8; j <= alpa_map[0]; j += 8) {
1147 					EMLXS_MSGF(EMLXS_CONTEXT,
1148 					    &emlxs_link_atten_msg,
1149 					    "alpa_map:             "
1150 					    "%02x %02x %02x %02x %02x "
1151 					    "%02x %02x %02x",
1152 					    alpa_map[j],
1153 					    alpa_map[j + 1],
1154 					    alpa_map[j + 2],
1155 					    alpa_map[j + 3],
1156 					    alpa_map[j + 4],
1157 					    alpa_map[j + 5],
1158 					    alpa_map[j + 6],
1159 					    alpa_map[j + 7]);
1160 				}
1161 			}
1162 		}
1163 #ifdef MENLO_SUPPORT
1164 		/* Check if Menlo maintenance mode is enabled */
1165 		if (hba->model_info.device_id ==
1166 		    PCI_DEVICE_ID_LP21000_M) {
1167 			if (la.mm == 1) {
1168 				EMLXS_MSGF(EMLXS_CONTEXT,
1169 				    &emlxs_link_atten_msg,
1170 				    "Maintenance Mode enabled.");
1171 
1172 				mutex_enter(&EMLXS_PORT_LOCK);
1173 				hba->flag |= FC_MENLO_MODE;
1174 				mutex_exit(&EMLXS_PORT_LOCK);
1175 
1176 				mutex_enter(&EMLXS_LINKUP_LOCK);
1177 				cv_broadcast(&EMLXS_LINKUP_CV);
1178 				mutex_exit(&EMLXS_LINKUP_LOCK);
1179 			} else {
1180 				EMLXS_MSGF(EMLXS_CONTEXT,
1181 				    &emlxs_link_atten_msg,
1182 				    "Maintenance Mode disabled.");
1183 			}
1184 
1185 			/* Check FCoE attention bit */
1186 			if (la.fa == 1) {
1187 				emlxs_thread_spawn(hba,
1188 				    emlxs_fcoe_attention_thread,
1189 				    0, 0);
1190 			}
1191 		}
1192 #endif /* MENLO_SUPPORT */
1193 
1194 		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1195 		    MEM_MBOX, 1))) {
1196 			/* This should turn on DELAYED ABTS for */
1197 			/* ELS timeouts */
1198 			emlxs_mb_set_var(hba, mbox, 0x00052198, 0x1);
1199 
1200 			emlxs_mb_put(hba, mbox);
1201 		}
1202 
1203 		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1204 		    MEM_MBOX, 1))) {
1205 			/* If link not already down then */
1206 			/* declare it down now */
1207 			if (emlxs_mb_read_sparam(hba, mbox) == 0) {
1208 				emlxs_mb_put(hba, mbox);
1209 			} else {
1210 				emlxs_mem_put(hba, MEM_MBOX,
1211 				    (void *)mbox);
1212 			}
1213 		}
1214 
1215 		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1216 		    MEM_MBOX, 1))) {
1217 			emlxs_mb_config_link(hba, mbox);
1218 
1219 			emlxs_mb_put(hba, mbox);
1220 		}
1221 
1222 		/* Declare the linkup here */
1223 		emlxs_linkup(hba);
1224 	}
1225 
1226 	/* If link not already down then declare it down now */
1227 	else if (la.attType == AT_LINK_DOWN) {
1228 		/* Make sure link is declared down */
1229 		emlxs_linkdown(hba);
1230 	}
1231 
1232 	/* Enable Link attention interrupt */
1233 	mutex_enter(&EMLXS_PORT_LOCK);
1234 
1235 	if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1236 		hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1237 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
1238 #ifdef FMA_SUPPORT
1239 		/* Access handle validation */
1240 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
1241 #endif  /* FMA_SUPPORT */
1242 	}
1243 
1244 	mutex_exit(&EMLXS_PORT_LOCK);
1245 
1246 	return (0);
1247 
1248 } /* emlxs_read_la_mbcmpl() */
1249 
1250 
1251 /* SLI3 */
1252 extern uint32_t
1253 emlxs_mb_read_la(emlxs_hba_t *hba, MAILBOXQ *mbq)
1254 {
1255 	MAILBOX *mb = (MAILBOX *)mbq;
1256 	MATCHMAP *mp;
1257 
1258 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1259 
1260 	if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0) {
1261 		mb->mbxCommand = MBX_READ_LA64;
1262 
1263 		return (1);
1264 	}
1265 
1266 	mb->mbxCommand = MBX_READ_LA64;
1267 	mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128;
1268 	mb->un.varReadLA.un.lilpBde64.addrHigh = PADDR_HI(mp->phys);
1269 	mb->un.varReadLA.un.lilpBde64.addrLow = PADDR_LO(mp->phys);
1270 	mb->mbxOwner = OWN_HOST;
1271 	mbq->mbox_cmpl = emlxs_read_la_mbcmpl;
1272 	mbq->port = (void *)&PPORT;
1273 
1274 	/*
1275 	 * save address for completion
1276 	 */
1277 	mbq->bp = (void *)mp;
1278 
1279 	return (0);
1280 
1281 } /* emlxs_mb_read_la() */
1282 
1283 
1284 /* SLI3 */
1285 static uint32_t
1286 emlxs_clear_la_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
1287 {
1288 	emlxs_port_t *port = (emlxs_port_t *)mbq->port;
1289 	MAILBOX *mb;
1290 	MAILBOXQ *mbox;
1291 	emlxs_port_t *vport;
1292 	uint32_t la_enable;
1293 	int i, rc;
1294 
1295 	mb = (MAILBOX *)mbq;
1296 	if (mb->mbxStatus) {
1297 		la_enable = 1;
1298 
1299 		if (mb->mbxStatus == 0x1601) {
1300 			/* Get a buffer which will be used for */
1301 			/* mailbox commands */
1302 			if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1303 			    MEM_MBOX, 1))) {
1304 				/* Get link attention message */
1305 				if (emlxs_mb_read_la(hba, mbox) == 0) {
1306 					rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
1307 					    (MAILBOX *)mbox, MBX_NOWAIT, 0);
1308 					if ((rc != MBX_BUSY) &&
1309 					    (rc != MBX_SUCCESS)) {
1310 						emlxs_mem_put(hba,
1311 						    MEM_MBOX, (void *)mbox);
1312 					}
1313 					la_enable = 0;
1314 				} else {
1315 					emlxs_mem_put(hba, MEM_MBOX,
1316 					    (void *)mbox);
1317 				}
1318 			}
1319 		}
1320 
1321 		mutex_enter(&EMLXS_PORT_LOCK);
1322 		if (la_enable) {
1323 			if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1324 				/* Enable Link Attention interrupts */
1325 				hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1326 				WRITE_CSR_REG(hba, FC_HC_REG(hba),
1327 				    hba->sli.sli3.hc_copy);
1328 #ifdef FMA_SUPPORT
1329 				/* Access handle validation */
1330 				EMLXS_CHK_ACC_HANDLE(hba,
1331 				    hba->sli.sli3.csr_acc_handle);
1332 #endif  /* FMA_SUPPORT */
1333 			}
1334 		} else {
1335 			if (hba->sli.sli3.hc_copy & HC_LAINT_ENA) {
1336 				/* Disable Link Attention interrupts */
1337 				hba->sli.sli3.hc_copy &= ~HC_LAINT_ENA;
1338 				WRITE_CSR_REG(hba, FC_HC_REG(hba),
1339 				    hba->sli.sli3.hc_copy);
1340 #ifdef FMA_SUPPORT
1341 				/* Access handle validation */
1342 				EMLXS_CHK_ACC_HANDLE(hba,
1343 				    hba->sli.sli3.csr_acc_handle);
1344 #endif  /* FMA_SUPPORT */
1345 			}
1346 		}
1347 		mutex_exit(&EMLXS_PORT_LOCK);
1348 
1349 		return (0);
1350 	}
1351 	/* Enable on Link Attention interrupts */
1352 	mutex_enter(&EMLXS_PORT_LOCK);
1353 
1354 	if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1355 		hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1356 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
1357 #ifdef FMA_SUPPORT
1358 		/* Access handle validation */
1359 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
1360 #endif  /* FMA_SUPPORT */
1361 	}
1362 
1363 	if (hba->state >= FC_LINK_UP) {
1364 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_READY);
1365 	}
1366 
1367 	mutex_exit(&EMLXS_PORT_LOCK);
1368 
1369 	/* Adapter is now ready for FCP traffic */
1370 	if (hba->state == FC_READY) {
1371 
1372 		/* Register vpi's for all ports that have did's */
1373 		for (i = 0; i < MAX_VPORTS; i++) {
1374 			vport = &VPORT(i);
1375 
1376 			if (!(vport->flag & EMLXS_PORT_BOUND) ||
1377 			    !(vport->did)) {
1378 				continue;
1379 			}
1380 
1381 			(void) emlxs_mb_reg_vpi(vport, NULL);
1382 		}
1383 
1384 		/* Attempt to send any pending IO */
1385 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[hba->channel_fcp], 0);
1386 	}
1387 	return (0);
1388 
1389 } /* emlxs_clear_la_mbcmpl() */
1390 
1391 
1392 /* SLI3 */
1393 extern void
1394 emlxs_mb_clear_la(emlxs_hba_t *hba, MAILBOXQ *mbq)
1395 {
1396 	MAILBOX *mb = (MAILBOX *)mbq;
1397 
1398 #ifdef FC_RPI_CHECK
1399 	emlxs_rpi_check(hba);
1400 #endif	/* FC_RPI_CHECK */
1401 
1402 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1403 
1404 	mb->un.varClearLA.eventTag = hba->link_event_tag;
1405 	mb->mbxCommand = MBX_CLEAR_LA;
1406 	mb->mbxOwner = OWN_HOST;
1407 	mbq->mbox_cmpl = emlxs_clear_la_mbcmpl;
1408 	mbq->port = (void *)&PPORT;
1409 
1410 	return;
1411 
1412 } /* emlxs_mb_clear_la() */
1413 
1414 
1415 /*
1416  * emlxs_mb_read_status  Issue a READ STATUS mailbox command
1417  */
1418 /*ARGSUSED*/
1419 extern void
1420 emlxs_mb_read_status(emlxs_hba_t *hba, MAILBOXQ *mbq)
1421 {
1422 	MAILBOX *mb = (MAILBOX *)mbq;
1423 
1424 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1425 
1426 	mb->mbxCommand = MBX_READ_STATUS;
1427 	mb->mbxOwner = OWN_HOST;
1428 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1429 	mbq->port = (void *)&PPORT;
1430 
1431 } /* fc_read_status() */
1432 
1433 
1434 /*
1435  * emlxs_mb_read_lnk_stat  Issue a LINK STATUS mailbox command
1436  */
1437 /*ARGSUSED*/
1438 extern void
1439 emlxs_mb_read_lnk_stat(emlxs_hba_t *hba, MAILBOXQ *mbq)
1440 {
1441 	MAILBOX *mb = (MAILBOX *)mbq;
1442 
1443 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1444 
1445 	mb->mbxCommand = MBX_READ_LNK_STAT;
1446 	mb->mbxOwner = OWN_HOST;
1447 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1448 	mbq->port = (void *)&PPORT;
1449 
1450 } /* emlxs_mb_read_lnk_stat() */
1451 
1452 
1453 
1454 
1455 
1456 
1457 /*
1458  * emlxs_mb_config_ring  Issue a CONFIG RING mailbox command
1459  */
1460 extern void
1461 emlxs_mb_config_ring(emlxs_hba_t *hba, int32_t ring, MAILBOXQ *mbq)
1462 {
1463 	MAILBOX *mb = (MAILBOX *)mbq;
1464 	int32_t i;
1465 	int32_t j;
1466 
1467 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1468 
1469 	j = 0;
1470 	for (i = 0; i < ring; i++) {
1471 		j += hba->sli.sli3.ring_masks[i];
1472 	}
1473 
1474 	for (i = 0; i < hba->sli.sli3.ring_masks[ring]; i++) {
1475 		if ((j + i) >= 6) {
1476 			break;
1477 		}
1478 
1479 		mb->un.varCfgRing.rrRegs[i].rval  =
1480 		    hba->sli.sli3.ring_rval[j + i];
1481 		mb->un.varCfgRing.rrRegs[i].rmask =
1482 		    hba->sli.sli3.ring_rmask[j + i];
1483 		mb->un.varCfgRing.rrRegs[i].tval  =
1484 		    hba->sli.sli3.ring_tval[j + i];
1485 		mb->un.varCfgRing.rrRegs[i].tmask =
1486 		    hba->sli.sli3.ring_tmask[j + i];
1487 	}
1488 
1489 	mb->un.varCfgRing.ring = ring;
1490 	mb->un.varCfgRing.profile = 0;
1491 	mb->un.varCfgRing.maxOrigXchg = 0;
1492 	mb->un.varCfgRing.maxRespXchg = 0;
1493 	mb->un.varCfgRing.recvNotify = 1;
1494 	mb->un.varCfgRing.numMask = hba->sli.sli3.ring_masks[ring];
1495 	mb->mbxCommand = MBX_CONFIG_RING;
1496 	mb->mbxOwner = OWN_HOST;
1497 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1498 	mbq->port = (void *)&PPORT;
1499 
1500 	return;
1501 
1502 } /* emlxs_mb_config_ring() */
1503 
1504 
1505 /*
1506  *  emlxs_mb_config_link  Issue a CONFIG LINK mailbox command
1507  */
1508 extern void
1509 emlxs_mb_config_link(emlxs_hba_t *hba, MAILBOXQ *mbq)
1510 {
1511 	MAILBOX	*mb = (MAILBOX *)mbq;
1512 	emlxs_port_t   *port = &PPORT;
1513 	emlxs_config_t *cfg = &CFG;
1514 
1515 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1516 
1517 	/*
1518 	 * NEW_FEATURE SLI-2, Coalescing Response Feature.
1519 	 */
1520 	if (cfg[CFG_CR_DELAY].current) {
1521 		mb->un.varCfgLnk.cr = 1;
1522 		mb->un.varCfgLnk.ci = 1;
1523 		mb->un.varCfgLnk.cr_delay = cfg[CFG_CR_DELAY].current;
1524 		mb->un.varCfgLnk.cr_count = cfg[CFG_CR_COUNT].current;
1525 	}
1526 
1527 	if (cfg[CFG_ACK0].current)
1528 		mb->un.varCfgLnk.ack0_enable = 1;
1529 
1530 	mb->un.varCfgLnk.myId = port->did;
1531 	mb->un.varCfgLnk.edtov = hba->fc_edtov;
1532 	mb->un.varCfgLnk.arbtov = hba->fc_arbtov;
1533 	mb->un.varCfgLnk.ratov = hba->fc_ratov;
1534 	mb->un.varCfgLnk.rttov = hba->fc_rttov;
1535 	mb->un.varCfgLnk.altov = hba->fc_altov;
1536 	mb->un.varCfgLnk.crtov = hba->fc_crtov;
1537 	mb->un.varCfgLnk.citov = hba->fc_citov;
1538 	mb->mbxCommand = MBX_CONFIG_LINK;
1539 	mb->mbxOwner = OWN_HOST;
1540 	mbq->mbox_cmpl = NULL;
1541 	mbq->port = (void *)port;
1542 
1543 	return;
1544 
1545 } /* emlxs_mb_config_link() */
1546 
1547 
1548 static uint32_t
1549 emlxs_init_link_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
1550 {
1551 	emlxs_port_t *port = (emlxs_port_t *)mbq->port;
1552 	emlxs_config_t	*cfg = &CFG;
1553 	MAILBOX *mb;
1554 
1555 	mb = (MAILBOX *)mbq;
1556 	if (mb->mbxStatus) {
1557 		if ((hba->flag & FC_SLIM2_MODE) &&
1558 		    (hba->mbox_queue_flag == MBX_NOWAIT)) {
1559 			/* Retry only MBX_NOWAIT requests */
1560 
1561 			if ((cfg[CFG_LINK_SPEED].current > 0) &&
1562 			    ((mb->mbxStatus == 0x0011) ||
1563 			    (mb->mbxStatus == 0x0500))) {
1564 
1565 				EMLXS_MSGF(EMLXS_CONTEXT,
1566 				    &emlxs_mbox_event_msg,
1567 				    "Retrying.  %s: status=%x. Auto-speed set.",
1568 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
1569 				    (uint32_t)mb->mbxStatus);
1570 
1571 				mb->un.varInitLnk.link_flags &=
1572 				    ~FLAGS_LINK_SPEED;
1573 				mb->un.varInitLnk.link_speed = 0;
1574 
1575 				emlxs_mb_retry(hba, mbq);
1576 				return (1);
1577 			}
1578 		}
1579 	}
1580 	return (0);
1581 
1582 } /* emlxs_init_link_mbcmpl() */
1583 
1584 
1585 /*
1586  *  emlxs_mb_init_link  Issue an INIT LINK mailbox command
1587  */
1588 extern void
1589 emlxs_mb_init_link(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t topology,
1590     uint32_t linkspeed)
1591 {
1592 	MAILBOX *mb = (MAILBOX *)mbq;
1593 	emlxs_vpd_t	*vpd = &VPD;
1594 	emlxs_config_t	*cfg = &CFG;
1595 
1596 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1597 		bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
1598 		mbq->nonembed = NULL;
1599 		mbq->mbox_cmpl = NULL; /* no cmpl needed */
1600 		mbq->port = (void *)&PPORT;
1601 
1602 		mb->mbxCommand = (volatile uint8_t) MBX_INIT_LINK;
1603 		mb->mbxOwner = OWN_HOST;
1604 		return;
1605 	}
1606 
1607 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1608 
1609 	switch (topology) {
1610 	case FLAGS_LOCAL_LB:
1611 		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
1612 		mb->un.varInitLnk.link_flags |= FLAGS_LOCAL_LB;
1613 		break;
1614 	case FLAGS_TOPOLOGY_MODE_LOOP_PT:
1615 		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
1616 		mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
1617 		break;
1618 	case FLAGS_TOPOLOGY_MODE_PT_PT:
1619 		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
1620 		break;
1621 	case FLAGS_TOPOLOGY_MODE_LOOP:
1622 		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
1623 		break;
1624 	case FLAGS_TOPOLOGY_MODE_PT_LOOP:
1625 		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
1626 		mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
1627 		break;
1628 	}
1629 
1630 	if (cfg[CFG_LILP_ENABLE].current == 0) {
1631 		/* Disable LIRP/LILP support */
1632 		mb->un.varInitLnk.link_flags |= FLAGS_LIRP_LILP;
1633 	}
1634 
1635 	/*
1636 	 * Setting up the link speed
1637 	 */
1638 	switch (linkspeed) {
1639 	case 0:
1640 		break;
1641 
1642 	case 1:
1643 		if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
1644 			linkspeed = 0;
1645 		}
1646 		break;
1647 
1648 	case 2:
1649 		if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
1650 			linkspeed = 0;
1651 		}
1652 		break;
1653 
1654 	case 4:
1655 		if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
1656 			linkspeed = 0;
1657 		}
1658 		break;
1659 
1660 	case 8:
1661 		if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
1662 			linkspeed = 0;
1663 		}
1664 		break;
1665 
1666 	case 10:
1667 		if (!(vpd->link_speed & LMT_10GB_CAPABLE)) {
1668 			linkspeed = 0;
1669 		}
1670 		break;
1671 
1672 	default:
1673 		linkspeed = 0;
1674 		break;
1675 
1676 	}
1677 
1678 	if ((linkspeed > 0) && (vpd->feaLevelHigh >= 0x02)) {
1679 		mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
1680 		mb->un.varInitLnk.link_speed = linkspeed;
1681 	}
1682 
1683 	mb->un.varInitLnk.link_flags |= FLAGS_PREABORT_RETURN;
1684 
1685 	mb->un.varInitLnk.fabric_AL_PA =
1686 	    (uint8_t)cfg[CFG_ASSIGN_ALPA].current;
1687 	mb->mbxCommand = (volatile uint8_t) MBX_INIT_LINK;
1688 	mb->mbxOwner = OWN_HOST;
1689 	mbq->mbox_cmpl = emlxs_init_link_mbcmpl;
1690 	mbq->port = (void *)&PPORT;
1691 
1692 
1693 	return;
1694 
1695 } /* emlxs_mb_init_link() */
1696 
1697 
1698 /*
1699  *  emlxs_mb_down_link  Issue a DOWN LINK mailbox command
1700  */
1701 /*ARGSUSED*/
1702 extern void
1703 emlxs_mb_down_link(emlxs_hba_t *hba, MAILBOXQ *mbq)
1704 {
1705 	MAILBOX *mb = (MAILBOX *)mbq;
1706 
1707 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1708 
1709 	mb->mbxCommand = MBX_DOWN_LINK;
1710 	mb->mbxOwner = OWN_HOST;
1711 	mbq->mbox_cmpl = NULL;
1712 	mbq->port = (void *)&PPORT;
1713 
1714 	return;
1715 
1716 } /* emlxs_mb_down_link() */
1717 
1718 
1719 static uint32_t
1720 emlxs_read_sparam_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
1721 {
1722 	emlxs_port_t *port = (emlxs_port_t *)mbq->port;
1723 	MAILBOX *mb;
1724 	MATCHMAP *mp;
1725 	emlxs_port_t *vport;
1726 	int32_t i;
1727 	uint32_t  control;
1728 	uint8_t null_wwn[8];
1729 
1730 	mb = (MAILBOX *)mbq;
1731 	if (mb->mbxStatus) {
1732 		if (mb->mbxStatus == MBXERR_NO_RESOURCES) {
1733 			control = mb->un.varRdSparm.un.sp64.tus.f.bdeSize;
1734 			if (control == 0) {
1735 				(void) emlxs_mb_read_sparam(hba, mbq);
1736 			}
1737 			emlxs_mb_retry(hba, mbq);
1738 			return (1);
1739 		}
1740 		return (0);
1741 	}
1742 	mp = (MATCHMAP *)mbq->bp;
1743 	if (!mp) {
1744 		return (0);
1745 	}
1746 
1747 	bcopy((caddr_t)mp->virt, (caddr_t)&hba->sparam, sizeof (SERV_PARM));
1748 
1749 	/* Initialize the node name and port name only once */
1750 	bzero(null_wwn, 8);
1751 	if ((bcmp((caddr_t)&hba->wwnn, (caddr_t)null_wwn, 8) == 0) &&
1752 	    (bcmp((caddr_t)&hba->wwpn, (caddr_t)null_wwn, 8) == 0)) {
1753 		bcopy((caddr_t)&hba->sparam.nodeName,
1754 		    (caddr_t)&hba->wwnn, sizeof (NAME_TYPE));
1755 
1756 		bcopy((caddr_t)&hba->sparam.portName,
1757 		    (caddr_t)&hba->wwpn, sizeof (NAME_TYPE));
1758 	} else {
1759 		bcopy((caddr_t)&hba->wwnn,
1760 		    (caddr_t)&hba->sparam.nodeName, sizeof (NAME_TYPE));
1761 
1762 		bcopy((caddr_t)&hba->wwpn,
1763 		    (caddr_t)&hba->sparam.portName, sizeof (NAME_TYPE));
1764 	}
1765 
1766 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1767 	    "SPARAM: EDTOV hba=%x mbox_csp=%x BBC=%x",
1768 	    hba->fc_edtov, hba->sparam.cmn.e_d_tov,
1769 	    hba->sparam.cmn.bbCreditlsb);
1770 
1771 	hba->sparam.cmn.e_d_tov = hba->fc_edtov;
1772 
1773 	/* Initialize the physical port */
1774 	bcopy((caddr_t)&hba->sparam,
1775 	    (caddr_t)&port->sparam, sizeof (SERV_PARM));
1776 	bcopy((caddr_t)&hba->wwpn, (caddr_t)&port->wwpn,
1777 	    sizeof (NAME_TYPE));
1778 	bcopy((caddr_t)&hba->wwnn, (caddr_t)&port->wwnn,
1779 	    sizeof (NAME_TYPE));
1780 
1781 	/* Initialize the virtual ports */
1782 	for (i = 1; i < MAX_VPORTS; i++) {
1783 		vport = &VPORT(i);
1784 		if (vport->flag & EMLXS_PORT_BOUND) {
1785 			continue;
1786 		}
1787 
1788 		bcopy((caddr_t)&hba->sparam,
1789 		    (caddr_t)&vport->sparam,
1790 		    sizeof (SERV_PARM));
1791 
1792 		bcopy((caddr_t)&vport->wwnn,
1793 		    (caddr_t)&vport->sparam.nodeName,
1794 		    sizeof (NAME_TYPE));
1795 
1796 		bcopy((caddr_t)&vport->wwpn,
1797 		    (caddr_t)&vport->sparam.portName,
1798 		    sizeof (NAME_TYPE));
1799 	}
1800 
1801 	return (0);
1802 
1803 } /* emlxs_read_sparam_mbcmpl() */
1804 
1805 
1806 /*
1807  * emlxs_mb_read_sparam  Issue a READ SPARAM mailbox command
1808  */
1809 extern uint32_t
1810 emlxs_mb_read_sparam(emlxs_hba_t *hba, MAILBOXQ *mbq)
1811 {
1812 	MAILBOX *mb = (MAILBOX *)mbq;
1813 	MATCHMAP *mp;
1814 
1815 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1816 
1817 	if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0) {
1818 		mb->mbxCommand = MBX_READ_SPARM64;
1819 
1820 		return (1);
1821 	}
1822 
1823 	mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (SERV_PARM);
1824 	mb->un.varRdSparm.un.sp64.addrHigh = PADDR_HI(mp->phys);
1825 	mb->un.varRdSparm.un.sp64.addrLow = PADDR_LO(mp->phys);
1826 	mb->mbxCommand = MBX_READ_SPARM64;
1827 	mb->mbxOwner = OWN_HOST;
1828 	mbq->mbox_cmpl = emlxs_read_sparam_mbcmpl;
1829 	mbq->port = (void *)&PPORT;
1830 
1831 	/*
1832 	 * save address for completion
1833 	 */
1834 	mbq->bp = (void *)mp;
1835 
1836 	return (0);
1837 
1838 } /* emlxs_mb_read_sparam() */
1839 
1840 
1841 /*
1842  * emlxs_mb_read_rpi    Issue a READ RPI mailbox command
1843  */
1844 /*ARGSUSED*/
1845 extern uint32_t
1846 emlxs_mb_read_rpi(emlxs_hba_t *hba, uint32_t rpi, MAILBOXQ *mbq,
1847     uint32_t flag)
1848 {
1849 	MAILBOX *mb = (MAILBOX *)mbq;
1850 
1851 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1852 
1853 	/*
1854 	 * Set flag to issue action on cmpl
1855 	 */
1856 	mb->un.varWords[30] = flag;
1857 	mb->un.varRdRPI.reqRpi = (volatile uint16_t) rpi;
1858 	mb->mbxCommand = MBX_READ_RPI64;
1859 	mb->mbxOwner = OWN_HOST;
1860 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1861 	mbq->port = (void *)&PPORT;
1862 
1863 	return (0);
1864 } /* emlxs_mb_read_rpi() */
1865 
1866 
1867 /*
1868  * emlxs_mb_read_xri    Issue a READ XRI mailbox command
1869  */
1870 /*ARGSUSED*/
1871 extern uint32_t
1872 emlxs_mb_read_xri(emlxs_hba_t *hba, uint32_t xri, MAILBOXQ *mbq,
1873     uint32_t flag)
1874 {
1875 	MAILBOX *mb = (MAILBOX *)mbq;
1876 
1877 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1878 
1879 	/*
1880 	 * Set flag to issue action on cmpl
1881 	 */
1882 	mb->un.varWords[30] = flag;
1883 	mb->un.varRdXRI.reqXri = (volatile uint16_t)xri;
1884 	mb->mbxCommand = MBX_READ_XRI;
1885 	mb->mbxOwner = OWN_HOST;
1886 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1887 	mbq->port = (void *)&PPORT;
1888 
1889 	return (0);
1890 } /* emlxs_mb_read_xri() */
1891 
1892 
1893 /*ARGSUSED*/
1894 extern int32_t
1895 emlxs_mb_check_sparm(emlxs_hba_t *hba, SERV_PARM *nsp)
1896 {
1897 	uint32_t nsp_value;
1898 	uint32_t *iptr;
1899 
1900 	if (nsp->cmn.fPort) {
1901 		return (0);
1902 	}
1903 
1904 	/* Validate the service parameters */
1905 	iptr = (uint32_t *)&nsp->portName;
1906 	if (iptr[0] == 0 && iptr[1] == 0) {
1907 		return (1);
1908 	}
1909 
1910 	iptr = (uint32_t *)&nsp->nodeName;
1911 	if (iptr[0] == 0 && iptr[1] == 0) {
1912 		return (2);
1913 	}
1914 
1915 	if (nsp->cls2.classValid) {
1916 		nsp_value =
1917 		    ((nsp->cls2.rcvDataSizeMsb & 0x0f) << 8) | nsp->cls2.
1918 		    rcvDataSizeLsb;
1919 
1920 		/* If the receive data length is zero then set it to */
1921 		/* the CSP value */
1922 		if (!nsp_value) {
1923 			nsp->cls2.rcvDataSizeMsb = nsp->cmn.bbRcvSizeMsb;
1924 			nsp->cls2.rcvDataSizeLsb = nsp->cmn.bbRcvSizeLsb;
1925 			return (0);
1926 		}
1927 	}
1928 
1929 	if (nsp->cls3.classValid) {
1930 		nsp_value =
1931 		    ((nsp->cls3.rcvDataSizeMsb & 0x0f) << 8) | nsp->cls3.
1932 		    rcvDataSizeLsb;
1933 
1934 		/* If the receive data length is zero then set it to */
1935 		/* the CSP value */
1936 		if (!nsp_value) {
1937 			nsp->cls3.rcvDataSizeMsb = nsp->cmn.bbRcvSizeMsb;
1938 			nsp->cls3.rcvDataSizeLsb = nsp->cmn.bbRcvSizeLsb;
1939 			return (0);
1940 		}
1941 	}
1942 
1943 	return (0);
1944 
1945 } /* emlxs_mb_check_sparm() */
1946 
1947 
1948 /* SLI3 */
1949 static uint32_t
1950 emlxs_reg_did_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
1951 {
1952 	emlxs_port_t *port = (emlxs_port_t *)mbq->port;
1953 	MAILBOX *mb;
1954 	MATCHMAP *mp;
1955 	NODELIST *ndlp;
1956 	emlxs_port_t *vport;
1957 	SERV_PARM *sp;
1958 	int32_t i;
1959 	uint32_t  control;
1960 	uint32_t ldata;
1961 	uint32_t ldid;
1962 	uint16_t lrpi;
1963 	uint16_t lvpi;
1964 
1965 	mb = (MAILBOX *)mbq;
1966 
1967 	if (mb->mbxStatus) {
1968 		if (mb->mbxStatus == MBXERR_NO_RESOURCES) {
1969 			control = mb->un.varRegLogin.un.sp.bdeSize;
1970 			if (control == 0) {
1971 				/* Special handle for vport PLOGI */
1972 				if (mbq->iocbq == (uint8_t *)1) {
1973 					mbq->iocbq = NULL;
1974 				}
1975 				return (0);
1976 			}
1977 			emlxs_mb_retry(hba, mbq);
1978 			return (1);
1979 		}
1980 		if (mb->mbxStatus == MBXERR_RPI_FULL) {
1981 			EMLXS_MSGF(EMLXS_CONTEXT,
1982 			    &emlxs_node_create_failed_msg,
1983 			    "Limit reached. count=%d", port->node_count);
1984 		}
1985 
1986 		/* Special handle for vport PLOGI */
1987 		if (mbq->iocbq == (uint8_t *)1) {
1988 			mbq->iocbq = NULL;
1989 		}
1990 
1991 		return (0);
1992 	}
1993 
1994 	mp = (MATCHMAP *)mbq->bp;
1995 	if (!mp) {
1996 		return (0);
1997 	}
1998 
1999 	ldata = mb->un.varWords[5];
2000 	lvpi = (ldata & 0xffff) - hba->vpi_base;
2001 	port = &VPORT(lvpi);
2002 
2003 	/* First copy command data */
2004 	ldata = mb->un.varWords[0];	/* get rpi */
2005 	lrpi = ldata & 0xffff;
2006 
2007 	ldata = mb->un.varWords[1];	/* get did */
2008 	ldid = ldata & MASK_DID;
2009 
2010 	sp = (SERV_PARM *)mp->virt;
2011 
2012 	/* Create or update the node */
2013 	ndlp = emlxs_node_create(port, ldid, lrpi, sp);
2014 
2015 	if (ndlp->nlp_DID == FABRIC_DID) {
2016 		/* FLOGI/FDISC successfully completed on this port */
2017 		mutex_enter(&EMLXS_PORT_LOCK);
2018 		port->flag |= EMLXS_PORT_FLOGI_CMPL;
2019 		mutex_exit(&EMLXS_PORT_LOCK);
2020 
2021 		/* If CLEAR_LA has been sent, then attempt to */
2022 		/* register the vpi now */
2023 		if (hba->state == FC_READY) {
2024 			(void) emlxs_mb_reg_vpi(port, NULL);
2025 		}
2026 
2027 		/*
2028 		 * If NPIV Fabric support has just been established on
2029 		 * the physical port, then notify the vports of the
2030 		 * link up
2031 		 */
2032 		if ((lvpi == 0) &&
2033 		    (hba->flag & FC_NPIV_ENABLED) &&
2034 		    (hba->flag & FC_NPIV_SUPPORTED)) {
2035 			/* Skip the physical port */
2036 			for (i = 1; i < MAX_VPORTS; i++) {
2037 				vport = &VPORT(i);
2038 
2039 				if (!(vport->flag & EMLXS_PORT_BOUND) ||
2040 				    !(vport->flag &
2041 				    EMLXS_PORT_ENABLE)) {
2042 					continue;
2043 				}
2044 
2045 				emlxs_port_online(vport);
2046 			}
2047 		}
2048 	}
2049 
2050 	/* Check for special restricted login flag */
2051 	if (mbq->iocbq == (uint8_t *)1) {
2052 		mbq->iocbq = NULL;
2053 		(void) emlxs_mb_unreg_node(port, ndlp, NULL, NULL, NULL);
2054 		return (0);
2055 	}
2056 
2057 	/* Needed for FCT trigger in emlxs_mb_deferred_cmpl */
2058 	if (mbq->sbp) {
2059 		((emlxs_buf_t *)mbq->sbp)->node = ndlp;
2060 	}
2061 
2062 #ifdef DHCHAP_SUPPORT
2063 	if (mbq->sbp || mbq->ubp) {
2064 		if (emlxs_dhc_auth_start(port, ndlp, mbq->sbp,
2065 		    mbq->ubp) == 0) {
2066 			/* Auth started - auth completion will */
2067 			/* handle sbp and ubp now */
2068 			mbq->sbp = NULL;
2069 			mbq->ubp = NULL;
2070 		}
2071 	}
2072 #endif	/* DHCHAP_SUPPORT */
2073 
2074 	return (0);
2075 
2076 } /* emlxs_reg_did_mbcmpl() */
2077 
2078 
2079 /* SLI3 */
2080 extern uint32_t
2081 emlxs_mb_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param,
2082     emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
2083 {
2084 	emlxs_hba_t	*hba = HBA;
2085 	MATCHMAP	*mp;
2086 	MAILBOXQ	*mbq;
2087 	MAILBOX		*mb;
2088 	uint32_t	rval;
2089 
2090 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2091 		rval = emlxs_sli4_reg_did(port, did, param, sbp, ubp, iocbq);
2092 		return (rval);
2093 	}
2094 
2095 	/* Check for invalid node ids to register */
2096 	if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
2097 		return (1);
2098 	}
2099 
2100 	if (did & 0xff000000) {
2101 		return (1);
2102 	}
2103 
2104 	if ((rval = emlxs_mb_check_sparm(hba, param))) {
2105 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
2106 		    "Invalid service parameters. did=%06x rval=%d", did,
2107 		    rval);
2108 
2109 		return (1);
2110 	}
2111 
2112 	/* Check if the node limit has been reached */
2113 	if (port->node_count >= hba->max_nodes) {
2114 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
2115 		    "Limit reached. did=%06x count=%d", did,
2116 		    port->node_count);
2117 
2118 		return (1);
2119 	}
2120 
2121 	if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
2122 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
2123 		    "Unable to allocate mailbox. did=%x", did);
2124 
2125 		return (1);
2126 	}
2127 	mb = (MAILBOX *)mbq->mbox;
2128 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2129 
2130 	/* Build login request */
2131 	if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0) {
2132 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
2133 
2134 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
2135 		    "Unable to allocate buffer. did=%x", did);
2136 		return (1);
2137 	}
2138 	bcopy((void *)param, (void *)mp->virt, sizeof (SERV_PARM));
2139 
2140 	mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (SERV_PARM);
2141 	mb->un.varRegLogin.un.sp64.addrHigh = PADDR_HI(mp->phys);
2142 	mb->un.varRegLogin.un.sp64.addrLow = PADDR_LO(mp->phys);
2143 	mb->un.varRegLogin.did = did;
2144 	mb->un.varWords[30] = 0;	/* flags */
2145 	mb->mbxCommand = MBX_REG_LOGIN64;
2146 	mb->mbxOwner = OWN_HOST;
2147 	mb->un.varRegLogin.vpi = port->vpi;
2148 	mb->un.varRegLogin.rpi = 0;
2149 
2150 	mbq->sbp = (void *)sbp;
2151 	mbq->ubp = (void *)ubp;
2152 	mbq->iocbq = (void *)iocbq;
2153 	mbq->bp = (void *)mp;
2154 	mbq->mbox_cmpl = emlxs_reg_did_mbcmpl;
2155 	mbq->context = NULL;
2156 	mbq->port = (void *)port;
2157 
2158 	rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
2159 	if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
2160 		emlxs_mem_put(hba, MEM_BUF, (void *)mp);
2161 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
2162 
2163 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
2164 		    "Unable to send mbox. did=%x", did);
2165 		return (1);
2166 	}
2167 
2168 	return (0);
2169 
2170 } /* emlxs_mb_reg_did() */
2171 
2172 /* SLI3 */
2173 /*ARGSUSED*/
2174 static uint32_t
2175 emlxs_unreg_node_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2176 {
2177 	emlxs_port_t	*port = (emlxs_port_t *)mbq->port;
2178 	MAILBOX		*mb;
2179 	NODELIST	*node;
2180 	uint16_t	rpi;
2181 
2182 	node = (NODELIST *)mbq->context;
2183 	mb = (MAILBOX *)mbq;
2184 	rpi = (node)? node->nlp_Rpi:0xffff;
2185 
2186 	if (mb->mbxStatus) {
2187 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2188 		    "unreg_node_mbcmpl:failed. node=%p rpi=%x status=%x",
2189 		    node, rpi, mb->mbxStatus);
2190 
2191 		return (0);
2192 	}
2193 
2194 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2195 	    "unreg_node_mbcmpl: node=%p rpi=%x",
2196 	    node, rpi);
2197 
2198 	if (node) {
2199 		emlxs_node_rm(port, node);
2200 
2201 	} else {  /* All nodes */
2202 		emlxs_node_destroy_all(port);
2203 	}
2204 
2205 	return (0);
2206 
2207 } /* emlxs_unreg_node_mbcmpl */
2208 
2209 
2210 /* SLI3 */
2211 extern uint32_t
2212 emlxs_mb_unreg_node(emlxs_port_t *port, NODELIST *node, emlxs_buf_t *sbp,
2213     fc_unsol_buf_t *ubp, IOCBQ *iocbq)
2214 {
2215 	emlxs_hba_t	*hba = HBA;
2216 	MAILBOXQ	*mbq;
2217 	MAILBOX		*mb;
2218 	uint16_t	rpi;
2219 	uint32_t	rval;
2220 
2221 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2222 		rval = emlxs_sli4_unreg_node(port, node, sbp, ubp, iocbq);
2223 		return (rval);
2224 	}
2225 
2226 	if (node) {
2227 		/* Check for base node */
2228 		if (node == &port->node_base) {
2229 			/* just flush base node */
2230 			(void) emlxs_tx_node_flush(port, &port->node_base,
2231 			    0, 0, 0);
2232 			(void) emlxs_chipq_node_flush(port, 0,
2233 			    &port->node_base, 0);
2234 
2235 			port->did = 0;
2236 
2237 			/* Return now */
2238 			return (1);
2239 		}
2240 
2241 		rpi = (uint16_t)node->nlp_Rpi;
2242 
2243 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2244 		    "unreg_node:%p  rpi=%d", node, rpi);
2245 
2246 		/* This node must be (0xFFFFFE) which registered by vport */
2247 		if (rpi == 0) {
2248 			emlxs_node_rm(port, node);
2249 			return (0);
2250 		}
2251 
2252 	} else {	/* Unreg all nodes */
2253 		rpi = 0xffff;
2254 
2255 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2256 		    "unreg_node: All");
2257 	}
2258 
2259 	if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
2260 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2261 		    "unreg_node:failed. Unable to allocate mbox");
2262 		return (1);
2263 	}
2264 
2265 	mb = (MAILBOX *)mbq->mbox;
2266 	mb->un.varUnregLogin.rpi = rpi;
2267 	mb->un.varUnregLogin.vpi = port->VPIobj.VPI;
2268 
2269 	mb->mbxCommand = MBX_UNREG_LOGIN;
2270 	mb->mbxOwner = OWN_HOST;
2271 	mbq->sbp = (void *)sbp;
2272 	mbq->ubp = (void *)ubp;
2273 	mbq->iocbq = (void *)iocbq;
2274 	mbq->mbox_cmpl = emlxs_unreg_node_mbcmpl;
2275 	mbq->context = (void *)node;
2276 	mbq->port = (void *)port;
2277 
2278 	rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
2279 	if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
2280 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2281 		    "unreg_node:failed. Unable to send request.");
2282 
2283 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
2284 		return (1);
2285 	}
2286 
2287 	return (0);
2288 
2289 } /* emlxs_mb_unreg_node() */
2290 
2291 
2292 
2293 
2294 /*
2295  * emlxs_mb_set_var   Issue a special debug mbox command to write slim
2296  */
2297 /*ARGSUSED*/
2298 extern void
2299 emlxs_mb_set_var(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t addr,
2300     uint32_t value)
2301 {
2302 	MAILBOX *mb = (MAILBOX *)mbq;
2303 
2304 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2305 
2306 	/* addr = 0x090597 is AUTO ABTS disable for ELS commands */
2307 	/* addr = 0x052198 is DELAYED ABTS enable for ELS commands */
2308 	/* addr = 0x100506 is for setting PCI MAX READ value */
2309 
2310 	/*
2311 	 * Always turn on DELAYED ABTS for ELS timeouts
2312 	 */
2313 	if ((addr == 0x052198) && (value == 0)) {
2314 		value = 1;
2315 	}
2316 
2317 	mb->un.varWords[0] = addr;
2318 	mb->un.varWords[1] = value;
2319 	mb->mbxCommand = MBX_SET_VARIABLE;
2320 	mb->mbxOwner = OWN_HOST;
2321 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
2322 	mbq->port = (void *)&PPORT;
2323 
2324 } /* emlxs_mb_set_var() */
2325 
2326 
2327 /*
2328  * Disable Traffic Cop
2329  */
2330 /*ARGSUSED*/
2331 extern void
2332 emlxs_disable_tc(emlxs_hba_t *hba, MAILBOXQ *mbq)
2333 {
2334 	MAILBOX *mb = (MAILBOX *)mbq;
2335 
2336 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2337 
2338 	mb->un.varWords[0] = 0x50797;
2339 	mb->un.varWords[1] = 0;
2340 	mb->un.varWords[2] = 0xfffffffe;
2341 	mb->mbxCommand = MBX_SET_VARIABLE;
2342 	mb->mbxOwner = OWN_HOST;
2343 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
2344 	mbq->port = (void *)&PPORT;
2345 
2346 } /* emlxs_disable_tc() */
2347 
2348 
2349 extern void
2350 emlxs_mb_config_hbq(emlxs_hba_t *hba, MAILBOXQ *mbq, int hbq_id)
2351 {
2352 	HBQ_INIT_t	*hbq;
2353 	MAILBOX		*mb = (MAILBOX *)mbq;
2354 	int		i;
2355 
2356 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2357 
2358 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
2359 
2360 	mb->un.varCfgHbq.hbqId = hbq_id;
2361 	mb->un.varCfgHbq.numEntries = hbq->HBQ_numEntries;
2362 	mb->un.varCfgHbq.recvNotify = hbq->HBQ_recvNotify;
2363 	mb->un.varCfgHbq.numMask = hbq->HBQ_num_mask;
2364 	mb->un.varCfgHbq.profile = hbq->HBQ_profile;
2365 	mb->un.varCfgHbq.ringMask = hbq->HBQ_ringMask;
2366 	mb->un.varCfgHbq.headerLen = hbq->HBQ_headerLen;
2367 	mb->un.varCfgHbq.logEntry = hbq->HBQ_logEntry;
2368 	mb->un.varCfgHbq.hbqaddrLow = PADDR_LO(hbq->HBQ_host_buf.phys);
2369 	mb->un.varCfgHbq.hbqaddrHigh = PADDR_HI(hbq->HBQ_host_buf.phys);
2370 	mb->mbxCommand = MBX_CONFIG_HBQ;
2371 	mb->mbxOwner = OWN_HOST;
2372 	mbq->mbox_cmpl = NULL;
2373 	mbq->port = (void *)&PPORT;
2374 
2375 	/* Copy info for profiles 2,3,5. Other profiles this area is reserved */
2376 	if ((hbq->HBQ_profile == 2) || (hbq->HBQ_profile == 3) ||
2377 	    (hbq->HBQ_profile == 5)) {
2378 		bcopy(&hbq->profiles.allprofiles,
2379 		    &mb->un.varCfgHbq.profiles.allprofiles,
2380 		    sizeof (hbq->profiles));
2381 	}
2382 
2383 	/* Return if no rctl / type masks for this HBQ */
2384 	if (!hbq->HBQ_num_mask) {
2385 		return;
2386 	}
2387 
2388 	/* Otherwise we setup specific rctl / type masks for this HBQ */
2389 	for (i = 0; i < hbq->HBQ_num_mask; i++) {
2390 		mb->un.varCfgHbq.hbqMasks[i].tmatch =
2391 		    hbq->HBQ_Masks[i].tmatch;
2392 		mb->un.varCfgHbq.hbqMasks[i].tmask = hbq->HBQ_Masks[i].tmask;
2393 		mb->un.varCfgHbq.hbqMasks[i].rctlmatch =
2394 		    hbq->HBQ_Masks[i].rctlmatch;
2395 		mb->un.varCfgHbq.hbqMasks[i].rctlmask =
2396 		    hbq->HBQ_Masks[i].rctlmask;
2397 	}
2398 
2399 	return;
2400 
2401 } /* emlxs_mb_config_hbq() */
2402 
2403 
2404 /* SLI3 */
2405 static uint32_t
2406 emlxs_reg_vpi_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2407 {
2408 	emlxs_port_t *port = (emlxs_port_t *)mbq->port;
2409 	MAILBOX *mb;
2410 
2411 	mb = (MAILBOX *)mbq;
2412 
2413 	mutex_enter(&EMLXS_PORT_LOCK);
2414 
2415 	if (mb->mbxStatus != MBX_SUCCESS) {
2416 		port->flag &= ~EMLXS_PORT_REG_VPI;
2417 		mutex_exit(&EMLXS_PORT_LOCK);
2418 
2419 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2420 		    "cmpl_reg_vpi:%d failed. status=%x",
2421 		    port->vpi, mb->mbxStatus);
2422 		return (0);
2423 	}
2424 
2425 	port->flag |= EMLXS_PORT_REG_VPI_CMPL;
2426 
2427 	mutex_exit(&EMLXS_PORT_LOCK);
2428 
2429 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2430 	    "cmpl_reg_vpi:%d ",
2431 	    port->vpi);
2432 
2433 	return (0);
2434 
2435 } /* emlxs_reg_vpi_mbcmpl */
2436 
2437 
2438 /* SLI3 */
2439 extern uint32_t
2440 emlxs_mb_reg_vpi(emlxs_port_t *port, emlxs_buf_t *sbp)
2441 {
2442 	emlxs_hba_t *hba = HBA;
2443 	MAILBOXQ *mbq;
2444 	MAILBOX	*mb;
2445 	int rval;
2446 
2447 	if (hba->sli_mode > EMLXS_HBA_SLI3_MODE) {
2448 		return (1);
2449 	}
2450 
2451 	if (!(hba->flag & FC_NPIV_ENABLED)) {
2452 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2453 		    "reg_vpi:%d failed. NPIV disabled.",
2454 		    port->vpi);
2455 		return (1);
2456 	}
2457 
2458 	if (port->flag & EMLXS_PORT_REG_VPI) {
2459 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2460 		    "reg_vpi:%d failed. Already registered.",
2461 		    port->vpi);
2462 		return (0);
2463 	}
2464 
2465 	mutex_enter(&EMLXS_PORT_LOCK);
2466 
2467 	/* Can't reg vpi until ClearLA is sent */
2468 	if (hba->state != FC_READY) {
2469 		mutex_exit(&EMLXS_PORT_LOCK);
2470 
2471 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2472 		    "reg_vpi:%d failed. HBA state not READY",
2473 		    port->vpi);
2474 		return (1);
2475 	}
2476 
2477 	/* Must have port id */
2478 	if (!port->did) {
2479 		mutex_exit(&EMLXS_PORT_LOCK);
2480 
2481 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2482 		    "reg_vpi:%d failed. Port did=0",
2483 		    port->vpi);
2484 		return (1);
2485 	}
2486 
2487 	if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
2488 		mutex_exit(&EMLXS_PORT_LOCK);
2489 
2490 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2491 		    "reg_vpi:%d failed. Unable to allocate mbox.",
2492 		    port->vpi);
2493 		return (1);
2494 	}
2495 
2496 	port->flag |= EMLXS_PORT_REG_VPI;
2497 
2498 	mutex_exit(&EMLXS_PORT_LOCK);
2499 
2500 	mb = (MAILBOX *)mbq->mbox;
2501 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2502 
2503 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2504 	    "reg_vpi:%d", port->vpi);
2505 
2506 	mb->un.varRegVpi.vpi = port->vpi;
2507 	mb->un.varRegVpi.sid = port->did;
2508 	mb->mbxCommand = MBX_REG_VPI;
2509 	mb->mbxOwner = OWN_HOST;
2510 
2511 	mbq->sbp = (void *)sbp;
2512 	mbq->mbox_cmpl = emlxs_reg_vpi_mbcmpl;
2513 	mbq->context = NULL;
2514 	mbq->port = (void *)port;
2515 
2516 	rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
2517 	if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
2518 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2519 		    "reg_vpi:%d failed. Unable to send request.",
2520 		    port->vpi);
2521 
2522 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
2523 		return (1);
2524 	}
2525 
2526 	return (0);
2527 
2528 } /* emlxs_mb_reg_vpi() */
2529 
2530 
2531 /* SLI3 */
2532 static uint32_t
2533 emlxs_unreg_vpi_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2534 {
2535 	emlxs_port_t *port = (emlxs_port_t *)mbq->port;
2536 	MAILBOX *mb;
2537 
2538 	mb  = (MAILBOX *)mbq->mbox;
2539 
2540 	if (mb->mbxStatus != MBX_SUCCESS) {
2541 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2542 		    "unreg_vpi_mbcmpl:%d failed. status=%x",
2543 		    port->vpi, mb->mbxStatus);
2544 		return (0);
2545 	}
2546 
2547 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2548 	    "unreg_vpi_mbcmpl:%d", port->vpi);
2549 
2550 	mutex_enter(&EMLXS_PORT_LOCK);
2551 	port->flag &= ~EMLXS_PORT_REG_VPI_CMPL;
2552 	mutex_exit(&EMLXS_PORT_LOCK);
2553 
2554 	return (0);
2555 
2556 } /* emlxs_unreg_vpi_mbcmpl() */
2557 
2558 
2559 /* SLI3 */
2560 extern uint32_t
2561 emlxs_mb_unreg_vpi(emlxs_port_t *port)
2562 {
2563 	emlxs_hba_t	*hba = HBA;
2564 	MAILBOXQ	*mbq;
2565 	MAILBOX		*mb;
2566 	int		rval;
2567 
2568 	if (hba->sli_mode > EMLXS_HBA_SLI3_MODE) {
2569 		return (1);
2570 	}
2571 
2572 	mutex_enter(&EMLXS_PORT_LOCK);
2573 
2574 	if (!(port->flag & EMLXS_PORT_REG_VPI) ||
2575 	    !(port->flag & EMLXS_PORT_REG_VPI_CMPL)) {
2576 
2577 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2578 		    "unreg_vpi:%d failed. Not registered. flag=%x",
2579 		    port->vpi, port->flag);
2580 
2581 		mutex_exit(&EMLXS_PORT_LOCK);
2582 		return (0);
2583 	}
2584 
2585 	if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
2586 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2587 		    "unreg_vpi:%d failed. Unable to allocate mbox.",
2588 		    port->vpi);
2589 
2590 		mutex_exit(&EMLXS_PORT_LOCK);
2591 		return (1);
2592 	}
2593 
2594 	port->flag &= ~EMLXS_PORT_REG_VPI;
2595 
2596 	mutex_exit(&EMLXS_PORT_LOCK);
2597 
2598 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2599 	    "unreg_vpi:%d", port->vpi);
2600 
2601 	mb = (MAILBOX *)mbq->mbox;
2602 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2603 	mb->un.varUnregVpi.vpi = port->vpi;
2604 	mb->mbxCommand = MBX_UNREG_VPI;
2605 	mb->mbxOwner = OWN_HOST;
2606 
2607 	mbq->mbox_cmpl = emlxs_unreg_vpi_mbcmpl;
2608 	mbq->context = NULL;
2609 	mbq->port = (void *)port;
2610 
2611 	rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
2612 	if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
2613 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2614 		    "unreg_vpi:%d failed. Unable to send request.",
2615 		    port->vpi);
2616 
2617 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
2618 		return (1);
2619 	}
2620 
2621 	return (0);
2622 
2623 } /* emlxs_mb_unreg_vpi() */
2624 
2625 
2626 /*
2627  * emlxs_mb_config_farp  Issue a CONFIG FARP mailbox command
2628  */
2629 extern void
2630 emlxs_mb_config_farp(emlxs_hba_t *hba, MAILBOXQ *mbq)
2631 {
2632 	MAILBOX *mb = (MAILBOX *)mbq;
2633 
2634 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2635 
2636 	bcopy((uint8_t *)&hba->wwpn,
2637 	    (uint8_t *)&mb->un.varCfgFarp.portname, sizeof (NAME_TYPE));
2638 
2639 	bcopy((uint8_t *)&hba->wwpn,
2640 	    (uint8_t *)&mb->un.varCfgFarp.nodename, sizeof (NAME_TYPE));
2641 
2642 	mb->un.varCfgFarp.filterEnable = 1;
2643 	mb->un.varCfgFarp.portName = 1;
2644 	mb->un.varCfgFarp.nodeName = 1;
2645 	mb->mbxCommand = MBX_CONFIG_FARP;
2646 	mb->mbxOwner = OWN_HOST;
2647 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
2648 	mbq->port = (void *)&PPORT;
2649 
2650 } /* emlxs_mb_config_farp() */
2651 
2652 
2653 /*
2654  * emlxs_mb_read_nv  Issue a READ CONFIG mailbox command
2655  */
2656 /*ARGSUSED*/
2657 extern void
2658 emlxs_mb_read_config(emlxs_hba_t *hba, MAILBOXQ *mbq)
2659 {
2660 	MAILBOX *mb = (MAILBOX *)mbq;
2661 
2662 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2663 		bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
2664 		mbq->nonembed = NULL;
2665 	} else {
2666 		bzero((void *)mb, MAILBOX_CMD_BSIZE);
2667 	}
2668 
2669 	mb->mbxCommand = MBX_READ_CONFIG;
2670 	mb->mbxOwner = OWN_HOST;
2671 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
2672 	mbq->port = (void *)&PPORT;
2673 
2674 } /* emlxs_mb_read_config() */
2675 
2676 
2677 /*
2678  * NAME:     emlxs_mb_put
2679  *
2680  * FUNCTION: put mailbox cmd onto the mailbox queue.
2681  *
2682  * EXECUTION ENVIRONMENT: process and interrupt level.
2683  *
2684  * NOTES:
2685  *
2686  * CALLED FROM: EMLXS_SLI_ISSUE_MBOX_CMD
2687  *
2688  * INPUT: hba           - pointer to the device info area
2689  *      mbp             - pointer to mailbox queue entry of mailbox cmd
2690  *
2691  * RETURNS: NULL - command queued
2692  */
2693 extern void
2694 emlxs_mb_put(emlxs_hba_t *hba, MAILBOXQ *mbq)
2695 {
2696 
2697 	mutex_enter(&EMLXS_MBOX_LOCK);
2698 
2699 	if (hba->mbox_queue.q_first) {
2700 
2701 		/*
2702 		 * queue command to end of list
2703 		 */
2704 		((MAILBOXQ *)hba->mbox_queue.q_last)->next = mbq;
2705 		hba->mbox_queue.q_last = (uint8_t *)mbq;
2706 		hba->mbox_queue.q_cnt++;
2707 	} else {
2708 
2709 		/*
2710 		 * add command to empty list
2711 		 */
2712 		hba->mbox_queue.q_first = (uint8_t *)mbq;
2713 		hba->mbox_queue.q_last = (uint8_t *)mbq;
2714 		hba->mbox_queue.q_cnt = 1;
2715 	}
2716 
2717 	mbq->next = NULL;
2718 
2719 	mutex_exit(&EMLXS_MBOX_LOCK);
2720 } /* emlxs_mb_put() */
2721 
2722 
2723 /*
2724  * NAME:     emlxs_mb_get
2725  *
2726  * FUNCTION: get a mailbox command from mailbox command queue
2727  *
2728  * EXECUTION ENVIRONMENT: interrupt level.
2729  *
2730  * NOTES:
2731  *
2732  * CALLED FROM: emlxs_handle_mb_event
2733  *
2734  * INPUT: hba       - pointer to the device info area
2735  *
2736  * RETURNS: NULL - no match found mb pointer - pointer to a mailbox command
2737  */
2738 extern MAILBOXQ *
2739 emlxs_mb_get(emlxs_hba_t *hba)
2740 {
2741 	MAILBOXQ	*p_first = NULL;
2742 
2743 	mutex_enter(&EMLXS_MBOX_LOCK);
2744 
2745 	if (hba->mbox_queue.q_first) {
2746 		p_first = (MAILBOXQ *)hba->mbox_queue.q_first;
2747 		hba->mbox_queue.q_first = (uint8_t *)p_first->next;
2748 
2749 		if (hba->mbox_queue.q_first == NULL) {
2750 			hba->mbox_queue.q_last = NULL;
2751 			hba->mbox_queue.q_cnt = 0;
2752 		} else {
2753 			hba->mbox_queue.q_cnt--;
2754 		}
2755 
2756 		p_first->next = NULL;
2757 	}
2758 
2759 	mutex_exit(&EMLXS_MBOX_LOCK);
2760 
2761 	return (p_first);
2762 
2763 } /* emlxs_mb_get() */
2764 
2765 
2766 /* EMLXS_PORT_LOCK must be held when calling this */
2767 void
2768 emlxs_mb_init(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t flag, uint32_t tmo)
2769 {
2770 	MATCHMAP	*mp;
2771 
2772 	HBASTATS.MboxIssued++;
2773 	hba->mbox_queue_flag = flag;
2774 
2775 	/* Set the Mailbox timer */
2776 	hba->mbox_timer = hba->timer_tics + tmo;
2777 
2778 	/* Initialize mailbox */
2779 	mbq->flag &= MBQ_INIT_MASK;
2780 	mbq->next = 0;
2781 
2782 	mutex_enter(&EMLXS_MBOX_LOCK);
2783 	hba->mbox_mbq = (void *)mbq;
2784 	mutex_exit(&EMLXS_MBOX_LOCK);
2785 
2786 	if (mbq->nonembed) {
2787 		mp = (MATCHMAP *) mbq->nonembed;
2788 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2789 		    DDI_DMA_SYNC_FORDEV);
2790 	}
2791 
2792 	if (mbq->bp) {
2793 		mp = (MATCHMAP *) mbq->bp;
2794 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2795 		    DDI_DMA_SYNC_FORDEV);
2796 	}
2797 	return;
2798 
2799 } /* emlxs_mb_init() */
2800 
2801 
2802 extern void
2803 emlxs_mb_fini(emlxs_hba_t *hba, MAILBOX *mb, uint32_t mbxStatus)
2804 {
2805 	emlxs_port_t	*port = &PPORT;
2806 	MATCHMAP	*mbox_nonembed;
2807 	MATCHMAP	*mbox_bp;
2808 	emlxs_buf_t	*mbox_sbp;
2809 	fc_unsol_buf_t	*mbox_ubp;
2810 	IOCBQ		*mbox_iocbq;
2811 	MAILBOXQ	*mbox_mbq;
2812 	MAILBOX		*mbox;
2813 	uint32_t	mbox_queue_flag;
2814 
2815 	mutex_enter(&EMLXS_PORT_LOCK);
2816 
2817 	if (hba->mbox_queue_flag) {
2818 		HBASTATS.MboxCompleted++;
2819 
2820 		if (mbxStatus != MBX_SUCCESS) {
2821 			HBASTATS.MboxError++;
2822 		} else {
2823 			HBASTATS.MboxGood++;
2824 		}
2825 	}
2826 
2827 	mutex_enter(&EMLXS_MBOX_LOCK);
2828 	mbox_queue_flag = hba->mbox_queue_flag;
2829 	mbox_mbq = (MAILBOXQ *)hba->mbox_mbq;
2830 
2831 	if (mbox_mbq) {
2832 		mbox_nonembed = (MATCHMAP *)mbox_mbq->nonembed;
2833 		mbox_bp = (MATCHMAP *)mbox_mbq->bp;
2834 		mbox_sbp = (emlxs_buf_t *)mbox_mbq->sbp;
2835 		mbox_ubp = (fc_unsol_buf_t *)mbox_mbq->ubp;
2836 		mbox_iocbq = (IOCBQ *)mbox_mbq->iocbq;
2837 	} else {
2838 		mbox_nonembed = NULL;
2839 		mbox_bp = NULL;
2840 		mbox_sbp = NULL;
2841 		mbox_ubp = NULL;
2842 		mbox_iocbq = NULL;
2843 	}
2844 
2845 	hba->mbox_mbq = NULL;
2846 	hba->mbox_queue_flag = 0;
2847 	hba->mbox_timer = 0;
2848 	mutex_exit(&EMLXS_MBOX_LOCK);
2849 
2850 	mutex_exit(&EMLXS_PORT_LOCK);
2851 
2852 	if (mbox_queue_flag == MBX_NOWAIT) {
2853 		/* Check for deferred MBUF cleanup */
2854 		if (mbox_bp) {
2855 			emlxs_mem_put(hba, MEM_BUF, (void *)mbox_bp);
2856 		}
2857 		if (mbox_nonembed) {
2858 			emlxs_mem_put(hba, MEM_BUF,
2859 			    (void *)mbox_nonembed);
2860 		}
2861 		if (mbox_mbq) {
2862 			emlxs_mem_put(hba, MEM_MBOX,
2863 			    (void *)mbox_mbq);
2864 		}
2865 	} else {  /* MBX_WAIT */
2866 		if (mbox_mbq) {
2867 			if (mb) {
2868 				/* Copy the local mailbox provided back into */
2869 				/* the original mailbox */
2870 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2871 					bcopy((uint32_t *)mb,
2872 					    (uint32_t *)mbox_mbq,
2873 					    MAILBOX_CMD_SLI4_BSIZE);
2874 				} else {
2875 					bcopy((uint32_t *)mb,
2876 					    (uint32_t *)mbox_mbq,
2877 					    MAILBOX_CMD_BSIZE);
2878 				}
2879 			}
2880 
2881 			mbox = (MAILBOX *)mbox_mbq;
2882 			mbox->mbxStatus = (uint16_t)mbxStatus;
2883 
2884 			/* Mark mailbox complete */
2885 			mbox_mbq->flag |= MBQ_COMPLETED;
2886 		}
2887 
2888 		/* Wake up the sleeping thread */
2889 		if (mbox_queue_flag == MBX_SLEEP) {
2890 			mutex_enter(&EMLXS_MBOX_LOCK);
2891 			cv_broadcast(&EMLXS_MBOX_CV);
2892 			mutex_exit(&EMLXS_MBOX_LOCK);
2893 		}
2894 	}
2895 
2896 #ifdef SFCT_SUPPORT
2897 	if (mb && mbox_sbp && mbox_sbp->fct_cmd) {
2898 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_detail_msg,
2899 		    "FCT mailbox: %s: status=%x",
2900 		    emlxs_mb_cmd_xlate(mb->mbxCommand),
2901 		    (uint32_t)mb->mbxStatus);
2902 	}
2903 #endif /* SFCT_SUPPORT */
2904 
2905 	emlxs_mb_deferred_cmpl(port, mbxStatus, mbox_sbp, mbox_ubp, mbox_iocbq);
2906 
2907 	return;
2908 
2909 } /* emlxs_mb_fini() */
2910 
2911 
2912 extern void
2913 emlxs_mb_deferred_cmpl(emlxs_port_t *port, uint32_t mbxStatus, emlxs_buf_t *sbp,
2914     fc_unsol_buf_t *ubp, IOCBQ *iocbq)
2915 {
2916 	emlxs_hba_t *hba = HBA;
2917 	emlxs_ub_priv_t	*ub_priv;
2918 
2919 #ifdef SFCT_SUPPORT
2920 	if ((mbxStatus == MBX_SUCCESS) && sbp && sbp->fct_cmd) {
2921 		emlxs_buf_t *cmd_sbp = sbp;
2922 
2923 		if ((cmd_sbp->fct_state == EMLXS_FCT_REG_PENDING) &&
2924 		    (cmd_sbp->node)) {
2925 
2926 			mutex_enter(&EMLXS_PKT_LOCK);
2927 			cmd_sbp->fct_flags |= EMLXS_FCT_REGISTERED;
2928 			cv_broadcast(&EMLXS_PKT_CV);
2929 			mutex_exit(&EMLXS_PKT_LOCK);
2930 
2931 			sbp = NULL;
2932 		}
2933 	}
2934 #endif /* SFCT_SUPPORT */
2935 
2936 	/* Check for deferred pkt completion */
2937 	if (sbp) {
2938 		if (mbxStatus != MBX_SUCCESS) {
2939 			/* Set error status */
2940 			sbp->pkt_flags &= ~PACKET_STATE_VALID;
2941 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2942 			    IOERR_NO_RESOURCES, 1);
2943 		}
2944 
2945 		emlxs_pkt_complete(sbp, -1, 0, 1);
2946 	}
2947 
2948 	/* Check for deferred ub completion */
2949 	if (ubp) {
2950 		ub_priv = ubp->ub_fca_private;
2951 
2952 		if (mbxStatus == MBX_SUCCESS) {
2953 			emlxs_ub_callback(ub_priv->port, ubp);
2954 		} else {
2955 			(void) emlxs_fca_ub_release(ub_priv->port, 1,
2956 			    &ubp->ub_token);
2957 		}
2958 	}
2959 
2960 	/* Special handling for restricted login */
2961 	if (iocbq == (IOCBQ *)1) {
2962 		iocbq = NULL;
2963 	}
2964 
2965 	/* Check for deferred iocb tx */
2966 	if (iocbq) {
2967 		/* Check for driver special codes */
2968 		/* These indicate the mailbox is being flushed */
2969 		if (mbxStatus >= MBX_DRIVER_RESERVED) {
2970 			/* Set the error status and return it */
2971 			iocbq->iocb.ULPSTATUS = IOSTAT_LOCAL_REJECT;
2972 			iocbq->iocb.un.grsp.perr.statLocalError =
2973 			    IOERR_ABORT_REQUESTED;
2974 
2975 			emlxs_proc_channel_event(hba, iocbq->channel,
2976 			    iocbq);
2977 		} else {
2978 			EMLXS_SLI_ISSUE_IOCB_CMD(hba, iocbq->channel,
2979 			    iocbq);
2980 		}
2981 	}
2982 
2983 	return;
2984 
2985 } /* emlxs_mb_deferred_cmpl() */
2986 
2987 
2988 extern void
2989 emlxs_mb_flush(emlxs_hba_t *hba)
2990 {
2991 	MAILBOXQ	*mbq;
2992 	uint32_t	mbxStatus;
2993 
2994 	mbxStatus = (hba->flag & FC_HARDWARE_ERROR) ?
2995 	    MBX_HARDWARE_ERROR : MBX_NOT_FINISHED;
2996 
2997 	/* Flush out the active mbox command */
2998 	emlxs_mb_fini(hba, NULL, mbxStatus);
2999 
3000 	/* Flush out the queued mbox commands */
3001 	while (mbq = (MAILBOXQ *)emlxs_mb_get(hba)) {
3002 		mutex_enter(&EMLXS_MBOX_LOCK);
3003 		hba->mbox_queue_flag = MBX_NOWAIT;
3004 		hba->mbox_mbq = (void *)mbq;
3005 		mutex_exit(&EMLXS_MBOX_LOCK);
3006 
3007 		emlxs_mb_fini(hba, NULL, mbxStatus);
3008 	}
3009 
3010 	return;
3011 
3012 } /* emlxs_mb_flush */
3013 
3014 
3015 extern char *
3016 emlxs_mb_cmd_xlate(uint8_t cmd)
3017 {
3018 	static char	buffer[32];
3019 	uint32_t	i;
3020 	uint32_t	count;
3021 
3022 	count = sizeof (emlxs_mb_cmd_table) / sizeof (emlxs_table_t);
3023 	for (i = 0; i < count; i++) {
3024 		if (cmd == emlxs_mb_cmd_table[i].code) {
3025 			return (emlxs_mb_cmd_table[i].string);
3026 		}
3027 	}
3028 
3029 	(void) sprintf(buffer, "Cmd=0x%x", cmd);
3030 	return (buffer);
3031 
3032 } /* emlxs_mb_cmd_xlate() */
3033