xref: /illumos-gate/usr/src/uts/common/io/comstar/port/qlt/qlt.c (revision d67944fbe3fa0b31893a7116a09b0718eecf6078)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/ddi.h>
28 #include <sys/stat.h>
29 #include <sys/pci.h>
30 #include <sys/sunddi.h>
31 #include <sys/modctl.h>
32 #include <sys/file.h>
33 #include <sys/cred.h>
34 #include <sys/byteorder.h>
35 #include <sys/atomic.h>
36 #include <sys/scsi/scsi.h>
37 
38 #include <stmf_defines.h>
39 #include <fct_defines.h>
40 #include <stmf.h>
41 #include <portif.h>
42 #include <fct.h>
43 #include <qlt.h>
44 #include <qlt_dma.h>
45 #include <qlt_ioctl.h>
46 #include <stmf_ioctl.h>
47 
48 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
49 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
50 static fct_status_t qlt_reset_chip_and_download_fw(qlt_state_t *qlt,
51     int reset_only);
52 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
53     uint32_t word_count, uint32_t risc_addr);
54 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
55 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
56 					uint32_t dma_size);
57 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
58 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
59 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
60 static fct_status_t qlt_initialize_adapter(fct_local_port_t *port);
61 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
62     stmf_state_change_info_t *ssci);
63 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
64 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
65 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
66 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp);
67 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
68 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
69 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
70 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
71     uint8_t *rsp);
72 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
73 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp);
74 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp);
75 static fct_status_t qlt_reset_chip_and_download_fw(qlt_state_t *qlt,
76     int reset_only);
77 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
78     uint32_t word_count, uint32_t risc_addr);
79 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
80 fct_status_t qlt_port_start(caddr_t arg);
81 fct_status_t qlt_port_stop(caddr_t arg);
82 fct_status_t qlt_port_online(qlt_state_t *qlt);
83 fct_status_t qlt_port_offline(qlt_state_t *qlt);
84 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
85     fct_link_info_t *li);
86 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
87 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
88 						fct_flogi_xchg_t *fx);
89 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
90 void qlt_handle_resp_queue_update(qlt_state_t *qlt);
91 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
92     fct_remote_port_t *rp, fct_cmd_t *login);
93 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
94     fct_remote_port_t *rp);
95 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
96 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
97 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
98     fct_cmd_t *cmd, int terminate);
99 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
100 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
101 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
102     fct_cmd_t *cmd, uint32_t flags);
103 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
104 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
105 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
106 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
107 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
108 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
109 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
110     stmf_data_buf_t *dbuf, uint32_t ioflags);
111 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
112 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
113 static void qlt_release_intr(qlt_state_t *qlt);
114 static int qlt_setup_interrupts(qlt_state_t *qlt);
115 static void qlt_destroy_mutex(qlt_state_t *qlt);
116 
117 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
118     uint32_t words);
119 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
120     caddr_t buf, int size_left);
121 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
122     caddr_t buf, int size_left);
123 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
124     int count, int size_left);
125 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
126     cred_t *credp, int *rval);
127 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
128 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
129 
130 #define	SETELSBIT(bmp, els)	(bmp)[((els) >> 3) & 0x1F] |= \
131 				    ((uint8_t)1) << ((els) & 7)
132 
133 int qlt_enable_msix = 0;
134 
135 /* Array to quickly calculate next free buf index to use */
136 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
137 
138 static struct cb_ops qlt_cb_ops = {
139 	qlt_open,
140 	qlt_close,
141 	nodev,
142 	nodev,
143 	nodev,
144 	nodev,
145 	nodev,
146 	qlt_ioctl,
147 	nodev,
148 	nodev,
149 	nodev,
150 	nochpoll,
151 	ddi_prop_op,
152 	0,
153 	D_MP | D_NEW
154 };
155 
156 static struct dev_ops qlt_ops = {
157 	DEVO_REV,
158 	0,
159 	nodev,
160 	nulldev,
161 	nulldev,
162 	qlt_attach,
163 	qlt_detach,
164 	nodev,
165 	&qlt_cb_ops,
166 	NULL,
167 	ddi_power
168 };
169 
170 #define	QLT_NAME    "COMSTAR QLT"
171 #define	QLT_VERSION "1.0"
172 
173 static struct modldrv modldrv = {
174 	&mod_driverops,
175 	QLT_NAME,
176 	&qlt_ops,
177 };
178 
179 static struct modlinkage modlinkage = {
180 	MODREV_1, &modldrv, NULL
181 };
182 
183 void *qlt_state = NULL;
184 kmutex_t qlt_global_lock;
185 static uint32_t qlt_loaded_counter = 0;
186 
187 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
188 			"-X Mode 1 133", "--Invalid--",
189 			"-X Mode 2 66", "-X Mode 2 100",
190 			"-X Mode 2 133", " 66" };
191 
192 /* Always use 64 bit DMA. */
193 static ddi_dma_attr_t qlt_queue_dma_attr = {
194 	DMA_ATTR_V0,		/* dma_attr_version */
195 	0,			/* low DMA address range */
196 	0xffffffffffffffff,	/* high DMA address range */
197 	0xffffffff,		/* DMA counter register */
198 	64,			/* DMA address alignment */
199 	0xff,			/* DMA burstsizes */
200 	1,			/* min effective DMA size */
201 	0xffffffff,		/* max DMA xfer size */
202 	0xffffffff,		/* segment boundary */
203 	1,			/* s/g list length */
204 	1,			/* granularity of device */
205 	0			/* DMA transfer flags */
206 };
207 
208 /* qlogic logging */
209 int enable_extended_logging = 0;
210 
211 static char qlt_provider_name[] = "qlt";
212 static struct stmf_port_provider *qlt_pp;
213 
214 int
215 _init(void)
216 {
217 	int ret;
218 
219 	ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
220 	if (ret == 0) {
221 		mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
222 		qlt_pp = (stmf_port_provider_t *)stmf_alloc(
223 		    STMF_STRUCT_PORT_PROVIDER, 0, 0);
224 		qlt_pp->pp_portif_rev = PORTIF_REV_1;
225 		qlt_pp->pp_name = qlt_provider_name;
226 		if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
227 			stmf_free(qlt_pp);
228 			mutex_destroy(&qlt_global_lock);
229 			ddi_soft_state_fini(&qlt_state);
230 			return (EIO);
231 		}
232 		ret = mod_install(&modlinkage);
233 		if (ret != 0) {
234 			(void) stmf_deregister_port_provider(qlt_pp);
235 			stmf_free(qlt_pp);
236 			mutex_destroy(&qlt_global_lock);
237 			ddi_soft_state_fini(&qlt_state);
238 		}
239 	}
240 	return (ret);
241 }
242 
243 int
244 _fini(void)
245 {
246 	int ret;
247 
248 	if (qlt_loaded_counter)
249 		return (EBUSY);
250 	ret = mod_remove(&modlinkage);
251 	if (ret == 0) {
252 		(void) stmf_deregister_port_provider(qlt_pp);
253 		stmf_free(qlt_pp);
254 		mutex_destroy(&qlt_global_lock);
255 		ddi_soft_state_fini(&qlt_state);
256 	}
257 	return (ret);
258 }
259 
260 int
261 _info(struct modinfo *modinfop)
262 {
263 	return (mod_info(&modlinkage, modinfop));
264 }
265 
266 int
267 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval)
268 {
269 	return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip,
270 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval));
271 }
272 
273 static int
274 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
275 {
276 	int		instance;
277 	qlt_state_t	*qlt;
278 	ddi_device_acc_attr_t	dev_acc_attr;
279 	uint16_t	did;
280 	uint16_t	val;
281 	uint16_t	mr;
282 	size_t		discard;
283 	uint_t		ncookies;
284 	int		max_read_size;
285 	int		max_payload_size;
286 	fct_status_t	ret;
287 
288 	/* No support for suspend resume yet */
289 	if (cmd != DDI_ATTACH)
290 		return (DDI_FAILURE);
291 	instance = ddi_get_instance(dip);
292 
293 	if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
294 		return (DDI_FAILURE);
295 	}
296 
297 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance))
298 	    == NULL) {
299 		goto attach_fail_1;
300 	}
301 	qlt->instance = instance;
302 	qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
303 	qlt->dip = dip;
304 	if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
305 		goto attach_fail_2;
306 	}
307 	did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
308 	if ((did != 0x2422) && (did != 0x2432) &&
309 	    (did != 0x2522) && (did != 0x2532)) {
310 		cmn_err(CE_WARN, "qlt(%d): unknwon devid(%x), failing attach",
311 		    instance, did);
312 		goto attach_fail_4;
313 	}
314 	if ((did & 0xFF00) == 0x2500)
315 		qlt->qlt_25xx_chip = 1;
316 
317 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
318 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
319 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
320 	if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
321 	    &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
322 		goto attach_fail_4;
323 	}
324 	if (did == 0x2422) {
325 		uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
326 		uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
327 		pci_bits >>= 8;
328 		pci_bits &= 0xf;
329 		if ((pci_bits == 3) || (pci_bits == 7)) {
330 			cmn_err(CE_NOTE,
331 			    "!qlt(%d): HBA running at PCI%sMHz (%d)",
332 			    instance, pci_speeds[pci_bits], pci_bits);
333 		} else {
334 			cmn_err(CE_WARN,
335 			    "qlt(%d): HBA running at PCI%sMHz %s(%d)",
336 			    instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
337 			    "(Invalid)", ((pci_bits == 0) ||
338 			    (pci_bits == 8)) ? (slot ? "64 bit slot " :
339 			    "32 bit slot ") : "", pci_bits);
340 		}
341 	}
342 	if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
343 		cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
344 		    (unsigned long long)ret);
345 		goto attach_fail_5;
346 	}
347 
348 	if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
349 	    0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
350 		goto attach_fail_5;
351 	}
352 	if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
353 	    &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
354 	    &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
355 	    DDI_SUCCESS) {
356 		goto attach_fail_6;
357 	}
358 	if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
359 	    qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
360 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
361 	    &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
362 		goto attach_fail_7;
363 	}
364 	if (ncookies != 1)
365 		goto attach_fail_8;
366 	qlt->req_ptr = qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
367 	qlt->resp_ptr = qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
368 	qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
369 	qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
370 
371 	/* mutex are inited in this function */
372 	if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
373 		goto attach_fail_8;
374 
375 	(void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
376 	    "qlt%d", instance);
377 	(void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
378 	    "%s,0", qlt->qlt_minor_name);
379 
380 	if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
381 	    instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
382 		goto attach_fail_9;
383 	}
384 
385 	cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
386 	cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
387 	mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
388 
389 	/* Setup PCI cfg space registers */
390 	max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
391 	if (max_read_size == 11)
392 		goto over_max_read_xfer_setting;
393 	if (did == 0x2422) {
394 		if (max_read_size == 512)
395 			val = 0;
396 		else if (max_read_size == 1024)
397 			val = 1;
398 		else if (max_read_size == 2048)
399 			val = 2;
400 		else if (max_read_size == 4096)
401 			val = 3;
402 		else {
403 			cmn_err(CE_WARN, "qlt(%d) malformed "
404 			    "pci-max-read-request in qlt.conf. Valid values "
405 			    "for this HBA are 512/1024/2048/4096", instance);
406 			goto over_max_read_xfer_setting;
407 		}
408 		mr = PCICFG_RD16(qlt, 0x4E);
409 		mr &= 0xfff3;
410 		mr |= (val << 2);
411 		PCICFG_WR16(qlt, 0x4E, mr);
412 	} else if ((did == 0x2432) || (did == 0x2532)) {
413 		if (max_read_size == 128)
414 			val = 0;
415 		else if (max_read_size == 256)
416 			val = 1;
417 		else if (max_read_size == 512)
418 			val = 2;
419 		else if (max_read_size == 1024)
420 			val = 3;
421 		else if (max_read_size == 2048)
422 			val = 4;
423 		else if (max_read_size == 4096)
424 			val = 5;
425 		else {
426 			cmn_err(CE_WARN, "qlt(%d) malformed "
427 			    "pci-max-read-request in qlt.conf. Valid values "
428 			    "for this HBA are 128/256/512/1024/2048/4096",
429 			    instance);
430 			goto over_max_read_xfer_setting;
431 		}
432 		mr = PCICFG_RD16(qlt, 0x54);
433 		mr &= 0x8fff;
434 		mr |= (val << 12);
435 		PCICFG_WR16(qlt, 0x54, mr);
436 	} else {
437 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
438 		    "pci-max-read-request for this device (%x)",
439 		    instance, did);
440 	}
441 over_max_read_xfer_setting:;
442 
443 	max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
444 	if (max_payload_size == 11)
445 		goto over_max_payload_setting;
446 	if ((did == 0x2432) || (did == 0x2532)) {
447 		if (max_payload_size == 128)
448 			val = 0;
449 		else if (max_payload_size == 256)
450 			val = 1;
451 		else if (max_payload_size == 512)
452 			val = 2;
453 		else if (max_payload_size == 1024)
454 			val = 3;
455 		else {
456 			cmn_err(CE_WARN, "qlt(%d) malformed "
457 			    "pcie-max-payload-size in qlt.conf. Valid values "
458 			    "for this HBA are 128/256/512/1024",
459 			    instance);
460 			goto over_max_payload_setting;
461 		}
462 		mr = PCICFG_RD16(qlt, 0x54);
463 		mr &= 0xff1f;
464 		mr |= (val << 5);
465 		PCICFG_WR16(qlt, 0x54, mr);
466 	} else {
467 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
468 		    "pcie-max-payload-size for this device (%x)",
469 		    instance, did);
470 	}
471 
472 over_max_payload_setting:;
473 
474 	if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS)
475 		goto attach_fail_10;
476 
477 	ddi_report_dev(dip);
478 	return (DDI_SUCCESS);
479 
480 attach_fail_10:;
481 	mutex_destroy(&qlt->qlt_ioctl_lock);
482 	cv_destroy(&qlt->mbox_cv);
483 	cv_destroy(&qlt->rp_dereg_cv);
484 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
485 attach_fail_9:;
486 	qlt_destroy_mutex(qlt);
487 	qlt_release_intr(qlt);
488 attach_fail_8:;
489 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
490 attach_fail_7:;
491 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
492 attach_fail_6:;
493 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
494 attach_fail_5:;
495 	ddi_regs_map_free(&qlt->regs_acc_handle);
496 attach_fail_4:;
497 	pci_config_teardown(&qlt->pcicfg_acc_handle);
498 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
499 attach_fail_2:;
500 attach_fail_1:;
501 	ddi_soft_state_free(qlt_state, instance);
502 	return (DDI_FAILURE);
503 }
504 
505 #define	FCT_I_EVENT_BRING_PORT_OFFLINE	0x83
506 
507 /* ARGSUSED */
508 static int
509 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
510 {
511 	qlt_state_t *qlt;
512 
513 	int instance;
514 
515 	instance = ddi_get_instance(dip);
516 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance))
517 	    == NULL) {
518 		return (DDI_FAILURE);
519 	}
520 
521 	if (qlt->fw_code01) {
522 		return (DDI_FAILURE);
523 	}
524 
525 	if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
526 	    qlt->qlt_state_not_acked) {
527 		return (DDI_FAILURE);
528 	}
529 	if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS)
530 		return (DDI_FAILURE);
531 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
532 	qlt_destroy_mutex(qlt);
533 	qlt_release_intr(qlt);
534 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
535 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
536 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
537 	ddi_regs_map_free(&qlt->regs_acc_handle);
538 	pci_config_teardown(&qlt->pcicfg_acc_handle);
539 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
540 	cv_destroy(&qlt->mbox_cv);
541 	cv_destroy(&qlt->rp_dereg_cv);
542 	ddi_soft_state_free(qlt_state, instance);
543 
544 	return (DDI_SUCCESS);
545 }
546 
547 static void
548 qlt_enable_intr(qlt_state_t *qlt)
549 {
550 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
551 		(void) ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
552 	} else {
553 		int i;
554 		for (i = 0; i < qlt->intr_cnt; i++)
555 			(void) ddi_intr_enable(qlt->htable[i]);
556 	}
557 }
558 
559 static void
560 qlt_disable_intr(qlt_state_t *qlt)
561 {
562 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
563 		(void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
564 	} else {
565 		int i;
566 		for (i = 0; i < qlt->intr_cnt; i++)
567 			(void) ddi_intr_disable(qlt->htable[i]);
568 	}
569 }
570 
571 static void
572 qlt_release_intr(qlt_state_t *qlt)
573 {
574 	if (qlt->htable) {
575 		int i;
576 		for (i = 0; i < qlt->intr_cnt; i++) {
577 			(void) ddi_intr_remove_handler(qlt->htable[i]);
578 			(void) ddi_intr_free(qlt->htable[i]);
579 		}
580 		kmem_free(qlt->htable, qlt->intr_size);
581 	}
582 	qlt->htable = NULL;
583 	qlt->intr_pri = 0;
584 	qlt->intr_cnt = 0;
585 	qlt->intr_size = 0;
586 	qlt->intr_cap = 0;
587 }
588 
589 
590 static void
591 qlt_init_mutex(qlt_state_t *qlt)
592 {
593 	mutex_init(&qlt->req_lock, 0, MUTEX_DRIVER,
594 	    INT2PTR(qlt->intr_pri, void *));
595 	mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
596 	    INT2PTR(qlt->intr_pri, void *));
597 	mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
598 	    INT2PTR(qlt->intr_pri, void *));
599 	mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
600 	    INT2PTR(qlt->intr_pri, void *));
601 }
602 
603 static void
604 qlt_destroy_mutex(qlt_state_t *qlt)
605 {
606 	mutex_destroy(&qlt->req_lock);
607 	mutex_destroy(&qlt->preq_lock);
608 	mutex_destroy(&qlt->mbox_lock);
609 	mutex_destroy(&qlt->intr_lock);
610 }
611 
612 
613 static int
614 qlt_setup_msix(qlt_state_t *qlt)
615 {
616 	int count, avail, actual;
617 	int ret;
618 	int itype = DDI_INTR_TYPE_MSIX;
619 	int i;
620 
621 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
622 	if (ret != DDI_SUCCESS || count == 0) {
623 		return (DDI_FAILURE);
624 	}
625 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
626 	if (ret != DDI_SUCCESS || avail == 0) {
627 		return (DDI_FAILURE);
628 	}
629 	if (avail < count) {
630 		stmf_trace(qlt->qlt_port_alias,
631 		    "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
632 	}
633 
634 	qlt->intr_size = count * sizeof (ddi_intr_handle_t);
635 	qlt->htable = kmem_zalloc(qlt->intr_size, KM_SLEEP);
636 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
637 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
638 	/* we need at least 2 interrupt vectors */
639 	if (ret != DDI_SUCCESS || actual < 2) {
640 		ret = DDI_FAILURE;
641 		goto release_intr;
642 	}
643 	if (actual < count) {
644 		QLT_LOG(qlt->qlt_port_alias, "qlt_setup_msix: "
645 		    "requested: %d, received: %d\n",
646 		    count, actual);
647 	}
648 
649 	qlt->intr_cnt = actual;
650 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
651 	if (ret != DDI_SUCCESS) {
652 		ret = DDI_FAILURE;
653 		goto release_intr;
654 	}
655 	qlt_init_mutex(qlt);
656 	for (i = 0; i < actual; i++) {
657 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
658 		    qlt, INT2PTR(i, void *));
659 		if (ret != DDI_SUCCESS)
660 			goto release_mutex;
661 	}
662 
663 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
664 	qlt->intr_flags |= QLT_INTR_MSIX;
665 	return (DDI_SUCCESS);
666 
667 release_mutex:
668 	qlt_destroy_mutex(qlt);
669 release_intr:
670 	for (i = 0; i < actual; i++)
671 		(void) ddi_intr_free(qlt->htable[i]);
672 free_mem:
673 	kmem_free(qlt->htable, qlt->intr_size);
674 	qlt->htable = NULL;
675 	qlt_release_intr(qlt);
676 	return (ret);
677 }
678 
679 
680 static int
681 qlt_setup_msi(qlt_state_t *qlt)
682 {
683 	int count, avail, actual;
684 	int itype = DDI_INTR_TYPE_MSI;
685 	int ret;
686 	int i;
687 
688 	/* get the # of interrupts */
689 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
690 	if (ret != DDI_SUCCESS || count == 0) {
691 		return (DDI_FAILURE);
692 	}
693 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
694 	if (ret != DDI_SUCCESS || avail == 0) {
695 		return (DDI_FAILURE);
696 	}
697 	if (avail < count) {
698 		QLT_LOG(qlt->qlt_port_alias,
699 		    "qlt_setup_msi: nintrs=%d, avail=%d", count, avail);
700 	}
701 	/* MSI requires only 1 interrupt. */
702 	count = 1;
703 
704 	/* allocate interrupt */
705 	qlt->intr_size = count * sizeof (ddi_intr_handle_t);
706 	qlt->htable = kmem_zalloc(qlt->intr_size, KM_SLEEP);
707 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
708 	    0, count, &actual, DDI_INTR_ALLOC_NORMAL);
709 	if (ret != DDI_SUCCESS || actual == 0) {
710 		ret = DDI_FAILURE;
711 		goto free_mem;
712 	}
713 	if (actual < count) {
714 		QLT_LOG(qlt->qlt_port_alias, "qlt_setup_msi: "
715 		    "requested: %d, received:%d",
716 		    count, actual);
717 	}
718 	qlt->intr_cnt = actual;
719 
720 	/*
721 	 * Get priority for first msi, assume remaining are all the same.
722 	 */
723 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
724 	if (ret != DDI_SUCCESS) {
725 		ret = DDI_FAILURE;
726 		goto release_intr;
727 	}
728 	qlt_init_mutex(qlt);
729 
730 	/* add handler */
731 	for (i = 0; i < actual; i++) {
732 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
733 		    qlt, INT2PTR(i, void *));
734 		if (ret != DDI_SUCCESS)
735 			goto release_mutex;
736 	}
737 
738 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
739 	qlt->intr_flags |= QLT_INTR_MSI;
740 	return (DDI_SUCCESS);
741 
742 release_mutex:
743 	qlt_destroy_mutex(qlt);
744 release_intr:
745 	for (i = 0; i < actual; i++)
746 		(void) ddi_intr_free(qlt->htable[i]);
747 free_mem:
748 	kmem_free(qlt->htable, qlt->intr_size);
749 	qlt->htable = NULL;
750 	qlt_release_intr(qlt);
751 	return (ret);
752 }
753 
754 static int
755 qlt_setup_fixed(qlt_state_t *qlt)
756 {
757 	int count;
758 	int actual;
759 	int ret;
760 	int itype = DDI_INTR_TYPE_FIXED;
761 
762 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
763 	/* Fixed interrupts can only have one interrupt. */
764 	if (ret != DDI_SUCCESS || count != 1) {
765 		return (DDI_FAILURE);
766 	}
767 
768 	qlt->intr_size = sizeof (ddi_intr_handle_t);
769 	qlt->htable = kmem_zalloc(qlt->intr_size, KM_SLEEP);
770 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
771 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
772 	if (ret != DDI_SUCCESS || actual != 1) {
773 		ret = DDI_FAILURE;
774 		goto free_mem;
775 	}
776 
777 	qlt->intr_cnt = actual;
778 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
779 	if (ret != DDI_SUCCESS) {
780 		ret = DDI_FAILURE;
781 		goto release_intr;
782 	}
783 	qlt_init_mutex(qlt);
784 	ret = ddi_intr_add_handler(qlt->htable[0], qlt_isr, qlt, 0);
785 	if (ret != DDI_SUCCESS)
786 		goto release_mutex;
787 
788 	qlt->intr_flags |= QLT_INTR_FIXED;
789 	return (DDI_SUCCESS);
790 
791 release_mutex:
792 	qlt_destroy_mutex(qlt);
793 release_intr:
794 	(void) ddi_intr_free(qlt->htable[0]);
795 free_mem:
796 	kmem_free(qlt->htable, qlt->intr_size);
797 	qlt->htable = NULL;
798 	qlt_release_intr(qlt);
799 	return (ret);
800 }
801 
802 
803 static int
804 qlt_setup_interrupts(qlt_state_t *qlt)
805 {
806 #if defined(__sparc)
807 	int itypes = 0;
808 #endif
809 
810 /*
811  * x86 has a bug in the ddi_intr_block_enable/disable area (6562198). So use
812  * MSI for sparc only for now.
813  */
814 #if defined(__sparc)
815 	if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
816 		itypes = DDI_INTR_TYPE_FIXED;
817 	}
818 
819 	if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
820 		if (qlt_setup_msix(qlt) == DDI_SUCCESS)
821 			return (DDI_SUCCESS);
822 	}
823 	if (itypes & DDI_INTR_TYPE_MSI) {
824 		if (qlt_setup_msi(qlt) == DDI_SUCCESS)
825 			return (DDI_SUCCESS);
826 	}
827 #endif
828 	return (qlt_setup_fixed(qlt));
829 }
830 
831 /*
832  * Filling the hba attributes
833  */
834 void
835 qlt_populate_hba_fru_details(struct fct_local_port *port,
836     struct fct_port_attrs *port_attrs)
837 {
838 	caddr_t	bufp;
839 	int len;
840 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
841 
842 	(void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
843 	    "QLogic Corp.");
844 	(void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
845 	    "%s", QLT_NAME);
846 	(void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
847 	    "%s", QLT_VERSION);
848 	port_attrs->serial_number[0] = '\0';
849 	port_attrs->hardware_version[0] = '\0';
850 
851 	(void) snprintf(port_attrs->firmware_version,
852 	    FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
853 	    qlt->fw_minor, qlt->fw_subminor);
854 
855 	/* Get FCode version */
856 	if (ddi_getlongprop(DDI_DEV_T_ANY, qlt->dip, PROP_LEN_AND_VAL_ALLOC |
857 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
858 	    (int *)&len) == DDI_PROP_SUCCESS) {
859 		(void) snprintf(port_attrs->option_rom_version,
860 		    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
861 		kmem_free(bufp, len);
862 		bufp = NULL;
863 	} else {
864 #ifdef __sparc
865 #define	FCHBA_OPTION_ROM_ERR_TEXT	"No Fcode found"
866 #else
867 #define	FCHBA_OPTION_ROM_ERR_TEXT	"N/A"
868 #endif
869 		(void) snprintf(port_attrs->option_rom_version,
870 		    FCHBA_OPTION_ROM_VERSION_LEN, "%s",
871 		    FCHBA_OPTION_ROM_ERR_TEXT);
872 	}
873 	port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
874 	    qlt->nvram->subsystem_vendor_id[1] << 8;
875 
876 	port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
877 	    qlt->nvram->max_frame_length[0];
878 
879 	port_attrs->supported_cos = 0x10000000;
880 	port_attrs->supported_speed = PORT_SPEED_1G |
881 	    PORT_SPEED_2G | PORT_SPEED_4G;
882 	if (qlt->qlt_25xx_chip)
883 		port_attrs->supported_speed |= PORT_SPEED_8G;
884 
885 	(void) snprintf(port_attrs->model, FCHBA_MODEL_LEN, "%s",
886 	    qlt->nvram->model_name);
887 	(void) snprintf(port_attrs->model_description,
888 	    FCHBA_MODEL_DESCRIPTION_LEN, "%s", qlt->nvram->model_name);
889 }
890 
891 fct_status_t
892 qlt_port_start(caddr_t arg)
893 {
894 	qlt_state_t *qlt = (qlt_state_t *)arg;
895 	fct_local_port_t *port;
896 	fct_dbuf_store_t *fds;
897 
898 	if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
899 		return (FCT_FAILURE);
900 	}
901 	port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
902 	if (port == NULL) {
903 		goto qlt_pstart_fail_1;
904 	}
905 	fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
906 	if (fds == NULL) {
907 		goto qlt_pstart_fail_2;
908 	}
909 	qlt->qlt_port = port;
910 	fds->fds_alloc_data_buf = qlt_dmem_alloc;
911 	fds->fds_free_data_buf = qlt_dmem_free;
912 	fds->fds_fca_private = (void *)qlt;
913 	/*
914 	 * Since we keep everything in the state struct and dont allocate any
915 	 * port private area, just use that pointer to point to the
916 	 * state struct.
917 	 */
918 	port->port_fca_private = qlt;
919 	port->port_fca_abort_timeout = 5 * 1000;	/* 5 seconds */
920 	bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
921 	bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
922 	fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn);
923 	fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn);
924 	port->port_default_alias = qlt->qlt_port_alias;
925 	port->port_pp = qlt_pp;
926 	port->port_fds = fds;
927 	port->port_max_logins = QLT_MAX_LOGINS;
928 	port->port_max_xchges = QLT_MAX_XCHGES;
929 	port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
930 	port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
931 	port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
932 	port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
933 	port->port_get_link_info = qlt_get_link_info;
934 	port->port_register_remote_port = qlt_register_remote_port;
935 	port->port_deregister_remote_port = qlt_deregister_remote_port;
936 	port->port_send_cmd = qlt_send_cmd;
937 	port->port_xfer_scsi_data = qlt_xfer_scsi_data;
938 	port->port_send_cmd_response = qlt_send_cmd_response;
939 	port->port_abort_cmd = qlt_abort_cmd;
940 	port->port_ctl = qlt_ctl;
941 	port->port_flogi_xchg = qlt_do_flogi;
942 	port->port_populate_hba_details = qlt_populate_hba_fru_details;
943 
944 	if (fct_register_local_port(port) != FCT_SUCCESS) {
945 		goto qlt_pstart_fail_2_5;
946 	}
947 
948 	return (QLT_SUCCESS);
949 
950 qlt_pstart_fail_3:
951 	(void) fct_deregister_local_port(port);
952 qlt_pstart_fail_2_5:
953 	fct_free(fds);
954 qlt_pstart_fail_2:
955 	fct_free(port);
956 	qlt->qlt_port = NULL;
957 qlt_pstart_fail_1:
958 	qlt_dmem_fini(qlt);
959 	return (QLT_FAILURE);
960 }
961 
962 fct_status_t
963 qlt_port_stop(caddr_t arg)
964 {
965 	qlt_state_t *qlt = (qlt_state_t *)arg;
966 
967 	if (fct_deregister_local_port(qlt->qlt_port) != FCT_SUCCESS)
968 		return (QLT_FAILURE);
969 	fct_free(qlt->qlt_port->port_fds);
970 	fct_free(qlt->qlt_port);
971 	qlt->qlt_port = NULL;
972 	qlt_dmem_fini(qlt);
973 	return (QLT_SUCCESS);
974 }
975 
976 /*
977  * Called by framework to init the HBA.
978  * Can be called in the middle of I/O. (Why ??)
979  * Should make sure sane state both before and after the initialization
980  */
981 fct_status_t
982 qlt_port_online(qlt_state_t *qlt)
983 {
984 	uint64_t	da;
985 	int		instance;
986 	fct_status_t	ret;
987 	uint16_t	rcount;
988 	caddr_t		icb;
989 	mbox_cmd_t	*mcp;
990 	uint8_t		*elsbmp;
991 
992 	instance = ddi_get_instance(qlt->dip);
993 
994 	/* XXX Make sure a sane state */
995 
996 	if ((ret = qlt_reset_chip_and_download_fw(qlt, 0)) != QLT_SUCCESS) {
997 		cmn_err(CE_NOTE, "reset chip failed %llx", (long long)ret);
998 		return (ret);
999 	}
1000 
1001 	bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
1002 
1003 	/* Get resource count */
1004 	REG_WR16(qlt, REG_MBOX(0), 0x42);
1005 	ret = qlt_raw_mailbox_command(qlt);
1006 	rcount = REG_RD16(qlt, REG_MBOX(3));
1007 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1008 	if (ret != QLT_SUCCESS)
1009 		return (ret);
1010 
1011 	/* Enable PUREX */
1012 	REG_WR16(qlt, REG_MBOX(0), 0x38);
1013 	REG_WR16(qlt, REG_MBOX(1), 0x0400);
1014 	REG_WR16(qlt, REG_MBOX(2), 0x0);
1015 	REG_WR16(qlt, REG_MBOX(3), 0x0);
1016 	ret = qlt_raw_mailbox_command(qlt);
1017 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1018 	if (ret != QLT_SUCCESS) {
1019 		cmn_err(CE_NOTE, "Enable PUREX failed");
1020 		return (ret);
1021 	}
1022 
1023 	/* Pass ELS bitmap to fw */
1024 	REG_WR16(qlt, REG_MBOX(0), 0x59);
1025 	REG_WR16(qlt, REG_MBOX(1), 0x0500);
1026 	elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
1027 	bzero(elsbmp, 32);
1028 	da = qlt->queue_mem_cookie.dmac_laddress;
1029 	da += MBOX_DMA_MEM_OFFSET;
1030 	REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
1031 	da >>= 16;
1032 	REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
1033 	da >>= 16;
1034 	REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
1035 	da >>= 16;
1036 	REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
1037 	SETELSBIT(elsbmp, ELS_OP_PLOGI);
1038 	SETELSBIT(elsbmp, ELS_OP_LOGO);
1039 	SETELSBIT(elsbmp, ELS_OP_ABTX);
1040 	SETELSBIT(elsbmp, ELS_OP_ECHO);
1041 	SETELSBIT(elsbmp, ELS_OP_PRLI);
1042 	SETELSBIT(elsbmp, ELS_OP_PRLO);
1043 	SETELSBIT(elsbmp, ELS_OP_SCN);
1044 	SETELSBIT(elsbmp, ELS_OP_TPRLO);
1045 	SETELSBIT(elsbmp, ELS_OP_PDISC);
1046 	SETELSBIT(elsbmp, ELS_OP_ADISC);
1047 	SETELSBIT(elsbmp, ELS_OP_RSCN);
1048 	SETELSBIT(elsbmp, ELS_OP_RNID);
1049 	(void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
1050 	    DDI_DMA_SYNC_FORDEV);
1051 	ret = qlt_raw_mailbox_command(qlt);
1052 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1053 	if (ret != QLT_SUCCESS) {
1054 		cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
1055 		    "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
1056 		    elsbmp[1]);
1057 		return (ret);
1058 	}
1059 
1060 	/* Init queue pointers */
1061 	REG_WR32(qlt, REG_REQ_IN_PTR, 0);
1062 	REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
1063 	REG_WR32(qlt, REG_RESP_IN_PTR, 0);
1064 	REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
1065 	REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
1066 	REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
1067 	REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
1068 	REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
1069 	qlt->req_ndx_to_fw = qlt->req_ndx_from_fw = 0;
1070 	qlt->req_available = REQUEST_QUEUE_ENTRIES - 1;
1071 	qlt->resp_ndx_to_fw = qlt->resp_ndx_from_fw = 0;
1072 	qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
1073 	qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
1074 
1075 	/*
1076 	 * XXX support for tunables. Also should we cache icb ?
1077 	 */
1078 	mcp = qlt_alloc_mailbox_command(qlt, 0x80);
1079 	if (mcp == NULL) {
1080 		return (STMF_ALLOC_FAILURE);
1081 	}
1082 	icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
1083 	bzero(icb, 0x80);
1084 	da = qlt->queue_mem_cookie.dmac_laddress;
1085 	DMEM_WR16(qlt, icb, 1);		/* Version */
1086 	DMEM_WR16(qlt, icb+4, 2112);	/* Max frame length */
1087 	DMEM_WR16(qlt, icb+6, 16);	/* Execution throttle */
1088 	DMEM_WR16(qlt, icb+8, rcount);	/* Xchg count */
1089 	DMEM_WR16(qlt, icb+0x0a, 0x00);	/* Hard address (not used) */
1090 	bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
1091 	bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
1092 	DMEM_WR16(qlt, icb+0x20, 3);	/* Login retry count */
1093 	DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
1094 	DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
1095 	DMEM_WR16(qlt, icb+0x28, 100);	/* ms of NOS/OLS for Link down */
1096 	DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
1097 	DMEM_WR64(qlt, icb+0x2c, da+REQUEST_QUEUE_OFFSET);
1098 	DMEM_WR64(qlt, icb+0x34, da+RESPONSE_QUEUE_OFFSET);
1099 	DMEM_WR64(qlt, icb+0x3c, da+PRIORITY_QUEUE_OFFSET);
1100 	DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
1101 	DMEM_WR64(qlt, icb+0x50, da+ATIO_QUEUE_OFFSET);
1102 	DMEM_WR16(qlt, icb+0x58, 2);	/* Interrupt delay Timer */
1103 	DMEM_WR16(qlt, icb+0x5a, 4);	/* Login timeout (secs) */
1104 	DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
1105 	    BIT_2 | BIT_1 | BIT_0);
1106 	DMEM_WR32(qlt, icb+0x60, BIT_5);
1107 	DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 | BIT_4);
1108 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
1109 	mcp->to_fw[0] = 0x60;
1110 
1111 	/*
1112 	 * This is the 1st command adter adapter initialize which will
1113 	 * use interrupts and regular mailbox interface.
1114 	 */
1115 	qlt->mbox_io_state = MBOX_STATE_READY;
1116 	qlt_enable_intr(qlt);
1117 	qlt->qlt_intr_enabled = 1;
1118 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
1119 	/* Issue mailbox to firmware */
1120 	ret = qlt_mailbox_command(qlt, mcp);
1121 	if (ret != QLT_SUCCESS) {
1122 		cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
1123 		    instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
1124 	}
1125 
1126 	mcp->to_fw_mask = BIT_0;
1127 	mcp->from_fw_mask = BIT_0 | BIT_1;
1128 	mcp->to_fw[0] = 0x28;
1129 	ret = qlt_mailbox_command(qlt, mcp);
1130 	if (ret != QLT_SUCCESS) {
1131 		cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
1132 		    (long long)ret);
1133 	}
1134 
1135 	qlt_free_mailbox_command(qlt, mcp);
1136 	if (ret != QLT_SUCCESS)
1137 		return (ret);
1138 	return (FCT_SUCCESS);
1139 }
1140 
1141 fct_status_t
1142 qlt_port_offline(qlt_state_t *qlt)
1143 {
1144 	int		retries;
1145 
1146 	mutex_enter(&qlt->mbox_lock);
1147 
1148 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
1149 		mutex_exit(&qlt->mbox_lock);
1150 		goto poff_mbox_done;
1151 	}
1152 
1153 	/* Wait to grab the mailboxes */
1154 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
1155 	    retries++) {
1156 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
1157 		if ((retries > 5) ||
1158 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
1159 			qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1160 			mutex_exit(&qlt->mbox_lock);
1161 			goto poff_mbox_done;
1162 		}
1163 	}
1164 	qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1165 	mutex_exit(&qlt->mbox_lock);
1166 poff_mbox_done:;
1167 	qlt->intr_sneak_counter = 10;
1168 	qlt_disable_intr(qlt);
1169 	mutex_enter(&qlt->intr_lock);
1170 	qlt->qlt_intr_enabled = 0;
1171 	(void) qlt_reset_chip_and_download_fw(qlt, 1);
1172 	drv_usecwait(20);
1173 	qlt->intr_sneak_counter = 0;
1174 	mutex_exit(&qlt->intr_lock);
1175 
1176 	return (FCT_SUCCESS);
1177 }
1178 
1179 static fct_status_t
1180 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
1181 {
1182 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1183 	mbox_cmd_t *mcp;
1184 	fct_status_t fc_ret;
1185 	fct_status_t ret;
1186 	clock_t et;
1187 
1188 	et = ddi_get_lbolt() + drv_usectohz(5000000);
1189 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1190 link_info_retry:
1191 	mcp->to_fw[0] = 0x20;
1192 	mcp->to_fw_mask |= BIT_0;
1193 	mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
1194 	/* Issue mailbox to firmware */
1195 	ret = qlt_mailbox_command(qlt, mcp);
1196 	if (ret != QLT_SUCCESS) {
1197 		if ((mcp->from_fw[0] == 0x4005) && (mcp->from_fw[1] == 7)) {
1198 			/* Firmware is not ready */
1199 			if (ddi_get_lbolt() < et) {
1200 				delay(drv_usectohz(50000));
1201 				goto link_info_retry;
1202 			}
1203 		}
1204 		stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
1205 		    "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
1206 		fc_ret = FCT_FAILURE;
1207 	} else {
1208 		li->portid = ((uint32_t)(mcp->from_fw[2])) |
1209 		    (((uint32_t)(mcp->from_fw[3])) << 16);
1210 
1211 		li->port_speed = qlt->link_speed;
1212 		switch (mcp->from_fw[6]) {
1213 		case 1:
1214 			li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
1215 			li->port_fca_flogi_done = 1;
1216 			break;
1217 		case 0:
1218 			li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
1219 			li->port_no_fct_flogi = 1;
1220 			break;
1221 		case 3:
1222 			li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
1223 			li->port_fca_flogi_done = 1;
1224 			break;
1225 		case 2: /*FALLTHROUGH*/
1226 		case 4:
1227 			li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
1228 			li->port_fca_flogi_done = 1;
1229 			break;
1230 		default:
1231 			li->port_topology = PORT_TOPOLOGY_UNKNOWN;
1232 			QLT_LOG(qlt->qlt_port_alias, "Unknown link speed "
1233 			    "reported by fw %x", mcp->from_fw[6]);
1234 		}
1235 		qlt->cur_topology = li->port_topology;
1236 		fc_ret = FCT_SUCCESS;
1237 	}
1238 	qlt_free_mailbox_command(qlt, mcp);
1239 
1240 	if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
1241 		mcp = qlt_alloc_mailbox_command(qlt, 64);
1242 		mcp->to_fw[0] = 0x64;
1243 		mcp->to_fw[1] = 0x7FE;
1244 		mcp->to_fw[10] = 0;
1245 		mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_10;
1246 		fc_ret = qlt_mailbox_command(qlt, mcp);
1247 		if (fc_ret != QLT_SUCCESS) {
1248 			stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
1249 			    "database for F_port failed, ret = %llx", fc_ret);
1250 		} else {
1251 			uint8_t *p;
1252 
1253 			qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1254 			p = mcp->dbuf->db_sglist[0].seg_addr;
1255 			bcopy(p + 0x18, li->port_rpwwn, 8);
1256 			bcopy(p + 0x20, li->port_rnwwn, 8);
1257 		}
1258 		qlt_free_mailbox_command(qlt, mcp);
1259 	}
1260 	return (fc_ret);
1261 }
1262 
1263 static int
1264 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
1265 {
1266 	int		instance;
1267 	qlt_state_t	*qlt;
1268 
1269 	if (otype != OTYP_CHR) {
1270 		return (EINVAL);
1271 	}
1272 
1273 	/*
1274 	 * Since this is for debugging only, only allow root to issue ioctl now
1275 	 */
1276 	if (drv_priv(credp)) {
1277 		return (EPERM);
1278 	}
1279 
1280 	instance = (int)getminor(*devp);
1281 	qlt = ddi_get_soft_state(qlt_state, instance);
1282 	if (qlt == NULL) {
1283 		return (ENXIO);
1284 	}
1285 
1286 	mutex_enter(&qlt->qlt_ioctl_lock);
1287 	if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_EXCL) {
1288 		/*
1289 		 * It is already open for exclusive access.
1290 		 * So shut the door on this caller.
1291 		 */
1292 		mutex_exit(&qlt->qlt_ioctl_lock);
1293 		return (EBUSY);
1294 	}
1295 
1296 	if (flag & FEXCL) {
1297 		if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) {
1298 			/*
1299 			 * Exclusive operation not possible
1300 			 * as it is already opened
1301 			 */
1302 			mutex_exit(&qlt->qlt_ioctl_lock);
1303 			return (EBUSY);
1304 		}
1305 		qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_EXCL;
1306 	}
1307 	qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_OPEN;
1308 	mutex_exit(&qlt->qlt_ioctl_lock);
1309 
1310 	return (0);
1311 }
1312 
1313 /* ARGSUSED */
1314 static int
1315 qlt_close(dev_t dev, int flag, int otype, cred_t *credp)
1316 {
1317 	int		instance;
1318 	qlt_state_t	*qlt;
1319 
1320 	if (otype != OTYP_CHR) {
1321 		return (EINVAL);
1322 	}
1323 
1324 	instance = (int)getminor(dev);
1325 	qlt = ddi_get_soft_state(qlt_state, instance);
1326 	if (qlt == NULL) {
1327 		return (ENXIO);
1328 	}
1329 
1330 	mutex_enter(&qlt->qlt_ioctl_lock);
1331 	if ((qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) == 0) {
1332 		mutex_exit(&qlt->qlt_ioctl_lock);
1333 		return (ENODEV);
1334 	}
1335 
1336 	/*
1337 	 * It looks there's one hole here, maybe there could several concurrent
1338 	 * shareed open session, but we never check this case.
1339 	 * But it will not hurt too much, disregard it now.
1340 	 */
1341 	qlt->qlt_ioctl_flags &= ~QLT_IOCTL_FLAG_MASK;
1342 	mutex_exit(&qlt->qlt_ioctl_lock);
1343 
1344 	return (0);
1345 }
1346 
1347 /*
1348  * All of these ioctls are unstable interfaces which are meant to be used
1349  * in a controlled lab env. No formal testing will be (or needs to be) done
1350  * for these ioctls. Specially note that running with an additional
1351  * uploaded firmware is not supported and is provided here for test
1352  * purposes only.
1353  */
1354 /* ARGSUSED */
1355 static int
1356 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
1357     cred_t *credp, int *rval)
1358 {
1359 	qlt_state_t	*qlt;
1360 	int		ret = 0;
1361 #ifdef _LITTLE_ENDIAN
1362 	int		i;
1363 #endif
1364 	stmf_iocdata_t	*iocd;
1365 	void		*ibuf = NULL;
1366 	void		*obuf = NULL;
1367 	uint32_t	*intp;
1368 	qlt_fw_info_t	*fwi;
1369 	mbox_cmd_t	*mcp;
1370 	fct_status_t	st;
1371 	char		info[80];
1372 
1373 	if (drv_priv(credp) != 0)
1374 		return (EPERM);
1375 
1376 	qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
1377 	ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
1378 	if (ret)
1379 		return (ret);
1380 	iocd->stmf_error = 0;
1381 
1382 	switch (cmd) {
1383 	case QLT_IOCTL_FETCH_FWDUMP:
1384 		if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
1385 			ret = EINVAL;
1386 			break;
1387 		}
1388 		mutex_enter(&qlt->qlt_ioctl_lock);
1389 		if (!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)) {
1390 			mutex_exit(&qlt->qlt_ioctl_lock);
1391 			ret = ENODATA;
1392 			iocd->stmf_error = QLTIO_NO_DUMP;
1393 			break;
1394 		}
1395 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
1396 			mutex_exit(&qlt->qlt_ioctl_lock);
1397 			ret = EBUSY;
1398 			iocd->stmf_error = QLTIO_DUMP_INPROGRESS;
1399 			break;
1400 		}
1401 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
1402 			mutex_exit(&qlt->qlt_ioctl_lock);
1403 			ret = EEXIST;
1404 			iocd->stmf_error = QLTIO_ALREADY_FETCHED;
1405 			break;
1406 		}
1407 		bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
1408 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
1409 		mutex_exit(&qlt->qlt_ioctl_lock);
1410 
1411 		break;
1412 
1413 	case QLT_IOCTL_TRIGGER_FWDUMP:
1414 		if (qlt->qlt_state != FCT_STATE_ONLINE) {
1415 			ret = EACCES;
1416 			iocd->stmf_error = QLTIO_NOT_ONLINE;
1417 			break;
1418 		}
1419 		(void) snprintf(info, 80, "qlt_ioctl: qlt-%p, "
1420 		    "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
1421 		info[79] = 0;
1422 		if (fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_USER_REQUEST |
1423 		    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP,
1424 		    info) != FCT_SUCCESS) {
1425 			ret = EIO;
1426 		}
1427 		break;
1428 	case QLT_IOCTL_UPLOAD_FW:
1429 		if ((iocd->stmf_ibuf_size < 1024) ||
1430 		    (iocd->stmf_ibuf_size & 3)) {
1431 			ret = EINVAL;
1432 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1433 			break;
1434 		}
1435 		intp = (uint32_t *)ibuf;
1436 #ifdef _LITTLE_ENDIAN
1437 		for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
1438 			intp[i] = BSWAP_32(intp[i]);
1439 		}
1440 #endif
1441 		if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
1442 		    (((intp[intp[3] + 3] + intp[3]) << 2) !=
1443 		    iocd->stmf_ibuf_size)) {
1444 			ret = EINVAL;
1445 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1446 			break;
1447 		}
1448 		if ((qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
1449 		    (!qlt->qlt_25xx_chip && ((intp[8] & 3) == 0))) {
1450 			ret = EACCES;
1451 			iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
1452 			break;
1453 		}
1454 
1455 		/* Everything looks ok, lets copy this firmware */
1456 		if (qlt->fw_code01) {
1457 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1458 			    qlt->fw_length02) << 2);
1459 			qlt->fw_code01 = NULL;
1460 		} else {
1461 			atomic_add_32(&qlt_loaded_counter, 1);
1462 		}
1463 		qlt->fw_length01 = intp[3];
1464 		qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
1465 		    KM_SLEEP);
1466 		bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
1467 		qlt->fw_addr01 = intp[2];
1468 		qlt->fw_code02 = &qlt->fw_code01[intp[3]];
1469 		qlt->fw_addr02 = qlt->fw_code02[2];
1470 		qlt->fw_length02 = qlt->fw_code02[3];
1471 		break;
1472 
1473 	case QLT_IOCTL_CLEAR_FW:
1474 		if (qlt->fw_code01) {
1475 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1476 			    qlt->fw_length02) << 2);
1477 			qlt->fw_code01 = NULL;
1478 			atomic_add_32(&qlt_loaded_counter, -1);
1479 		}
1480 		break;
1481 
1482 	case QLT_IOCTL_GET_FW_INFO:
1483 		if (iocd->stmf_obuf_size != sizeof (qlt_fw_info_t)) {
1484 			ret = EINVAL;
1485 			break;
1486 		}
1487 		fwi = (qlt_fw_info_t *)obuf;
1488 		if (qlt->qlt_stay_offline) {
1489 			fwi->fwi_stay_offline = 1;
1490 		}
1491 		if (qlt->qlt_state == FCT_STATE_ONLINE) {
1492 			fwi->fwi_port_active = 1;
1493 		}
1494 		fwi->fwi_active_major = qlt->fw_major;
1495 		fwi->fwi_active_minor = qlt->fw_minor;
1496 		fwi->fwi_active_subminor = qlt->fw_subminor;
1497 		fwi->fwi_active_attr = qlt->fw_attr;
1498 		if (qlt->fw_code01) {
1499 			fwi->fwi_fw_uploaded = 1;
1500 			fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
1501 			fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
1502 			fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
1503 			fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
1504 		}
1505 		if (qlt->qlt_25xx_chip) {
1506 			fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
1507 			fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
1508 			fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
1509 			fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
1510 		} else {
1511 			fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
1512 			fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
1513 			fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
1514 			fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
1515 		}
1516 		break;
1517 
1518 	case QLT_IOCTL_STAY_OFFLINE:
1519 		if (!iocd->stmf_ibuf_size) {
1520 			ret = EINVAL;
1521 			break;
1522 		}
1523 		if (*((char *)ibuf)) {
1524 			qlt->qlt_stay_offline = 1;
1525 		} else {
1526 			qlt->qlt_stay_offline = 0;
1527 		}
1528 		break;
1529 
1530 	case QLT_IOCTL_MBOX:
1531 		if ((iocd->stmf_ibuf_size < sizeof (qlt_ioctl_mbox_t)) ||
1532 		    (iocd->stmf_obuf_size < sizeof (qlt_ioctl_mbox_t))) {
1533 			ret = EINVAL;
1534 			break;
1535 		}
1536 		mcp = qlt_alloc_mailbox_command(qlt, 0);
1537 		if (mcp == NULL) {
1538 			ret = ENOMEM;
1539 			break;
1540 		}
1541 		bcopy(ibuf, mcp, sizeof (qlt_ioctl_mbox_t));
1542 		st = qlt_mailbox_command(qlt, mcp);
1543 		bcopy(mcp, obuf, sizeof (qlt_ioctl_mbox_t));
1544 		qlt_free_mailbox_command(qlt, mcp);
1545 		if (st != QLT_SUCCESS) {
1546 			if ((st & (~((uint64_t)(0xFFFF)))) == QLT_MBOX_FAILED)
1547 				st = QLT_SUCCESS;
1548 		}
1549 		if (st != QLT_SUCCESS) {
1550 			ret = EIO;
1551 			switch (st) {
1552 			case QLT_MBOX_NOT_INITIALIZED:
1553 				iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
1554 				break;
1555 			case QLT_MBOX_BUSY:
1556 				iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
1557 				break;
1558 			case QLT_MBOX_TIMEOUT:
1559 				iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
1560 				break;
1561 			case QLT_MBOX_ABORTED:
1562 				iocd->stmf_error = QLTIO_MBOX_ABORTED;
1563 				break;
1564 			}
1565 		}
1566 		break;
1567 
1568 	default:
1569 		QLT_LOG(qlt->qlt_port_alias, "qlt_ioctl: ioctl-0x%02X", cmd);
1570 		ret = ENOTTY;
1571 	}
1572 
1573 	if (ret == 0) {
1574 		ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1575 	} else if (iocd->stmf_error) {
1576 		(void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1577 	}
1578 	if (obuf) {
1579 		kmem_free(obuf, iocd->stmf_obuf_size);
1580 		obuf = NULL;
1581 	}
1582 	if (ibuf) {
1583 		kmem_free(ibuf, iocd->stmf_ibuf_size);
1584 		ibuf = NULL;
1585 	}
1586 	kmem_free(iocd, sizeof (stmf_iocdata_t));
1587 	return (ret);
1588 }
1589 
1590 static void
1591 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
1592 {
1593 	stmf_change_status_t		st;
1594 	stmf_state_change_info_t	*ssci = (stmf_state_change_info_t *)arg;
1595 	qlt_state_t			*qlt;
1596 
1597 	ASSERT((cmd == FCT_CMD_PORT_ONLINE) ||
1598 	    (cmd == FCT_CMD_PORT_OFFLINE) ||
1599 	    (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) ||
1600 	    (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE));
1601 
1602 	qlt = (qlt_state_t *)port->port_fca_private;
1603 	st.st_completion_status = FCT_SUCCESS;
1604 	st.st_additional_info = NULL;
1605 
1606 	switch (cmd) {
1607 	case FCT_CMD_PORT_ONLINE:
1608 		if (qlt->qlt_state == FCT_STATE_ONLINE)
1609 			st.st_completion_status = STMF_ALREADY;
1610 		else if (qlt->qlt_state != FCT_STATE_OFFLINE)
1611 			st.st_completion_status = FCT_FAILURE;
1612 		if (st.st_completion_status == FCT_SUCCESS) {
1613 			qlt->qlt_state = FCT_STATE_ONLINING;
1614 			qlt->qlt_state_not_acked = 1;
1615 			st.st_completion_status = qlt_port_online(qlt);
1616 			if (st.st_completion_status != STMF_SUCCESS) {
1617 				qlt->qlt_state = FCT_STATE_OFFLINE;
1618 				qlt->qlt_state_not_acked = 0;
1619 			} else {
1620 				qlt->qlt_state = FCT_STATE_ONLINE;
1621 			}
1622 		}
1623 		fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
1624 		qlt->qlt_change_state_flags = 0;
1625 		break;
1626 
1627 	case FCT_CMD_PORT_OFFLINE:
1628 		if (qlt->qlt_state == FCT_STATE_OFFLINE) {
1629 			st.st_completion_status = STMF_ALREADY;
1630 		} else if (qlt->qlt_state != FCT_STATE_ONLINE) {
1631 			st.st_completion_status = FCT_FAILURE;
1632 		}
1633 		if (st.st_completion_status == FCT_SUCCESS) {
1634 			qlt->qlt_state = FCT_STATE_OFFLINING;
1635 			qlt->qlt_state_not_acked = 1;
1636 
1637 			if (ssci->st_rflags & STMF_RFLAG_COLLECT_DEBUG_DUMP) {
1638 				(void) qlt_firmware_dump(port, ssci);
1639 			}
1640 			qlt->qlt_change_state_flags = ssci->st_rflags;
1641 			st.st_completion_status = qlt_port_offline(qlt);
1642 			if (st.st_completion_status != STMF_SUCCESS) {
1643 				qlt->qlt_state = FCT_STATE_ONLINE;
1644 				qlt->qlt_state_not_acked = 0;
1645 			} else {
1646 				qlt->qlt_state = FCT_STATE_OFFLINE;
1647 			}
1648 		}
1649 		fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st);
1650 		break;
1651 
1652 	case FCT_ACK_PORT_ONLINE_COMPLETE:
1653 		qlt->qlt_state_not_acked = 0;
1654 		break;
1655 
1656 	case FCT_ACK_PORT_OFFLINE_COMPLETE:
1657 		qlt->qlt_state_not_acked = 0;
1658 		if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
1659 		    (qlt->qlt_stay_offline == 0)) {
1660 			if (fct_port_initialize(port,
1661 			    qlt->qlt_change_state_flags,
1662 			    "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
1663 			    "with RLFLAG_RESET") != FCT_SUCCESS) {
1664 				cmn_err(CE_WARN, "qlt_ctl: "
1665 				    "fct_port_initialize failed, please use "
1666 				    "stmfstate to start the port-%s manualy",
1667 				    qlt->qlt_port_alias);
1668 			}
1669 		}
1670 		break;
1671 	}
1672 }
1673 
1674 /* ARGSUSED */
1675 static fct_status_t
1676 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
1677 {
1678 	cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
1679 	return (FCT_FAILURE);
1680 }
1681 
1682 /*
1683  * Return a pointer to n entries in the request queue. Assumes that
1684  * request queue lock is held. Does a very short busy wait if
1685  * less/zero entries are available. Retuns NULL if it still cannot
1686  * fullfill the request.
1687  * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
1688  */
1689 caddr_t
1690 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n)
1691 {
1692 	int try = 0;
1693 
1694 	while (qlt->req_available < n) {
1695 		uint32_t val1, val2, val3;
1696 		val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
1697 		val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
1698 		val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
1699 		if ((val1 != val2) || (val2 != val3))
1700 			continue;
1701 
1702 		qlt->req_ndx_from_fw = val1;
1703 		qlt->req_available = REQUEST_QUEUE_ENTRIES - 1 -
1704 		    ((qlt->req_ndx_to_fw - qlt->req_ndx_from_fw) &
1705 		    (REQUEST_QUEUE_ENTRIES - 1));
1706 		if (qlt->req_available < n) {
1707 			if (try < 2) {
1708 				drv_usecwait(100);
1709 				try++;
1710 				continue;
1711 			} else {
1712 				stmf_trace(qlt->qlt_port_alias,
1713 				    "Req Q is full");
1714 				return (NULL);
1715 			}
1716 		}
1717 		break;
1718 	}
1719 	/* We dont change anything until the entries are sumitted */
1720 	return (&qlt->req_ptr[qlt->req_ndx_to_fw << 6]);
1721 }
1722 
1723 /*
1724  * updates the req in ptr to fw. Assumes that req lock is held.
1725  */
1726 void
1727 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n)
1728 {
1729 	ASSERT(n >= 1);
1730 	qlt->req_ndx_to_fw += n;
1731 	qlt->req_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
1732 	qlt->req_available -= n;
1733 	REG_WR32(qlt, REG_REQ_IN_PTR, qlt->req_ndx_to_fw);
1734 }
1735 
1736 
1737 /*
1738  * Return a pointer to n entries in the priority request queue. Assumes that
1739  * priority request queue lock is held. Does a very short busy wait if
1740  * less/zero entries are available. Retuns NULL if it still cannot
1741  * fullfill the request.
1742  * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
1743  */
1744 caddr_t
1745 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
1746 {
1747 	int try = 0;
1748 	uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
1749 	    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
1750 	    (PRIORITY_QUEUE_ENTRIES - 1));
1751 
1752 	while (req_available < n) {
1753 		uint32_t val1, val2, val3;
1754 		val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
1755 		val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
1756 		val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
1757 		if ((val1 != val2) || (val2 != val3))
1758 			continue;
1759 
1760 		qlt->preq_ndx_from_fw = val1;
1761 		req_available = PRIORITY_QUEUE_ENTRIES - 1 -
1762 		    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
1763 		    (PRIORITY_QUEUE_ENTRIES - 1));
1764 		if (req_available < n) {
1765 			if (try < 2) {
1766 				drv_usecwait(100);
1767 				try++;
1768 				continue;
1769 			} else {
1770 				return (NULL);
1771 			}
1772 		}
1773 		break;
1774 	}
1775 	/* We dont change anything until the entries are sumitted */
1776 	return (&qlt->preq_ptr[qlt->preq_ndx_to_fw << 6]);
1777 }
1778 
1779 /*
1780  * updates the req in ptr to fw. Assumes that req lock is held.
1781  */
1782 void
1783 qlt_submit_preq_entries(qlt_state_t *qlt, uint32_t n)
1784 {
1785 	ASSERT(n >= 1);
1786 	qlt->preq_ndx_to_fw += n;
1787 	qlt->preq_ndx_to_fw &= PRIORITY_QUEUE_ENTRIES - 1;
1788 	REG_WR32(qlt, REG_PREQ_IN_PTR, qlt->preq_ndx_to_fw);
1789 }
1790 
1791 /*
1792  * - Should not be called from Interrupt.
1793  * - A very hardware specific function. Does not touch driver state.
1794  * - Assumes that interrupts are disabled or not there.
1795  * - Expects that the caller makes sure that all activity has stopped
1796  *   and its ok now to go ahead and reset the chip. Also the caller
1797  *   takes care of post reset damage control.
1798  * - called by initialize adapter() and dump_fw(for reset only).
1799  * - During attach() nothing much is happening and during initialize_adapter()
1800  *   the function (caller) does all the housekeeping so that this function
1801  *   can execute in peace.
1802  * - Returns 0 on success.
1803  */
1804 static fct_status_t
1805 qlt_reset_chip_and_download_fw(qlt_state_t *qlt, int reset_only)
1806 {
1807 	int cntr;
1808 	uint32_t start_addr;
1809 	fct_status_t ret;
1810 
1811 	/* XXX: Switch off LEDs */
1812 
1813 	/* Disable Interrupts */
1814 	REG_WR32(qlt, REG_INTR_CTRL, 0);
1815 	(void) REG_RD32(qlt, REG_INTR_CTRL);
1816 	/* Stop DMA */
1817 	REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
1818 
1819 	/* Wait for DMA to be stopped */
1820 	cntr = 0;
1821 	while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
1822 		delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
1823 		cntr++;
1824 		/* 3 sec should be more than enough */
1825 		if (cntr == 300)
1826 			return (QLT_DMA_STUCK);
1827 	}
1828 
1829 	/* Reset the Chip */
1830 	REG_WR32(qlt, REG_CTRL_STATUS,
1831 	    DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
1832 
1833 	qlt->qlt_link_up = 0;
1834 
1835 	drv_usecwait(100);
1836 
1837 	/* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
1838 	cntr = 0;
1839 	while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
1840 		delay(drv_usectohz(10000));
1841 		cntr++;
1842 		/* 3 sec should be more than enough */
1843 		if (cntr == 300)
1844 			return (QLT_ROM_STUCK);
1845 	}
1846 	/* Disable Interrupts (Probably not needed) */
1847 	REG_WR32(qlt, REG_INTR_CTRL, 0);
1848 	if (reset_only)
1849 		return (QLT_SUCCESS);
1850 
1851 	/* Load the two segments */
1852 	if (qlt->fw_code01 != NULL) {
1853 		ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
1854 		    qlt->fw_addr01);
1855 		if (ret == QLT_SUCCESS) {
1856 			ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
1857 			    qlt->fw_length02, qlt->fw_addr02);
1858 		}
1859 		start_addr = qlt->fw_addr01;
1860 	} else if (qlt->qlt_25xx_chip) {
1861 		ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
1862 		    fw2500_addr01);
1863 		if (ret == QLT_SUCCESS) {
1864 			ret = qlt_load_risc_ram(qlt, fw2500_code02,
1865 			    fw2500_length02, fw2500_addr02);
1866 		}
1867 		start_addr = fw2500_addr01;
1868 	} else {
1869 		ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
1870 		    fw2400_addr01);
1871 		if (ret == QLT_SUCCESS) {
1872 			ret = qlt_load_risc_ram(qlt, fw2400_code02,
1873 			    fw2400_length02, fw2400_addr02);
1874 		}
1875 		start_addr = fw2400_addr01;
1876 	}
1877 	if (ret != QLT_SUCCESS)
1878 		return (ret);
1879 
1880 	/* Verify Checksum */
1881 	REG_WR16(qlt, REG_MBOX(0), 7);
1882 	REG_WR16(qlt, REG_MBOX(1), (start_addr >> 16) & 0xffff);
1883 	REG_WR16(qlt, REG_MBOX(2),  start_addr & 0xffff);
1884 	ret = qlt_raw_mailbox_command(qlt);
1885 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1886 	if (ret != QLT_SUCCESS)
1887 		return (ret);
1888 
1889 	/* Execute firmware */
1890 	REG_WR16(qlt, REG_MBOX(0), 2);
1891 	REG_WR16(qlt, REG_MBOX(1), (start_addr >> 16) & 0xffff);
1892 	REG_WR16(qlt, REG_MBOX(2),  start_addr & 0xffff);
1893 	REG_WR16(qlt, REG_MBOX(3), 0);
1894 	REG_WR16(qlt, REG_MBOX(4), 1);	/* 25xx enable additional credits */
1895 	ret = qlt_raw_mailbox_command(qlt);
1896 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1897 	if (ret != QLT_SUCCESS)
1898 		return (ret);
1899 
1900 	/* Get revisions (About Firmware) */
1901 	REG_WR16(qlt, REG_MBOX(0), 8);
1902 	ret = qlt_raw_mailbox_command(qlt);
1903 	qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
1904 	qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
1905 	qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
1906 	qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
1907 	qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
1908 	qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
1909 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1910 	if (ret != QLT_SUCCESS)
1911 		return (ret);
1912 
1913 	return (QLT_SUCCESS);
1914 }
1915 
1916 /*
1917  * Used only from qlt_reset_chip_and_download_fw().
1918  */
1919 static fct_status_t
1920 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
1921 				uint32_t word_count, uint32_t risc_addr)
1922 {
1923 	uint32_t words_sent = 0;
1924 	uint32_t words_being_sent;
1925 	uint32_t *cur_host_addr;
1926 	uint32_t cur_risc_addr;
1927 	uint64_t da;
1928 	fct_status_t ret;
1929 
1930 	while (words_sent < word_count) {
1931 		cur_host_addr = &(host_addr[words_sent]);
1932 		cur_risc_addr = risc_addr + (words_sent << 2);
1933 		words_being_sent = min(word_count - words_sent,
1934 		    TOTAL_DMA_MEM_SIZE >> 2);
1935 		ddi_rep_put32(qlt->queue_mem_acc_handle, cur_host_addr,
1936 		    (uint32_t *)qlt->queue_mem_ptr, words_being_sent,
1937 		    DDI_DEV_AUTOINCR);
1938 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, 0,
1939 		    words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
1940 		da = qlt->queue_mem_cookie.dmac_laddress;
1941 		REG_WR16(qlt, REG_MBOX(0), 0x0B);
1942 		REG_WR16(qlt, REG_MBOX(1), risc_addr & 0xffff);
1943 		REG_WR16(qlt, REG_MBOX(8), ((cur_risc_addr >> 16) & 0xffff));
1944 		REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
1945 		da >>= 16;
1946 		REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
1947 		da >>= 16;
1948 		REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
1949 		da >>= 16;
1950 		REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
1951 		REG_WR16(qlt, REG_MBOX(5), words_being_sent & 0xffff);
1952 		REG_WR16(qlt, REG_MBOX(4), (words_being_sent >> 16) & 0xffff);
1953 		ret = qlt_raw_mailbox_command(qlt);
1954 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1955 		if (ret != QLT_SUCCESS)
1956 			return (ret);
1957 		words_sent += words_being_sent;
1958 	}
1959 	return (QLT_SUCCESS);
1960 }
1961 
1962 /*
1963  * Not used during normal operation. Only during driver init.
1964  * Assumes that interrupts are disabled and mailboxes are loaded.
1965  * Just triggers the mailbox command an waits for the completion.
1966  * Also expects that There is nothing else going on and we will only
1967  * get back a mailbox completion from firmware.
1968  * ---DOES NOT CLEAR INTERRUPT---
1969  * Used only from the code path originating from
1970  * qlt_reset_chip_and_download_fw()
1971  */
1972 static fct_status_t
1973 qlt_raw_mailbox_command(qlt_state_t *qlt)
1974 {
1975 	int cntr = 0;
1976 	uint32_t status;
1977 
1978 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR);
1979 	while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_INTR_REQUEST) == 0) {
1980 		cntr++;
1981 		if (cntr == 100)
1982 			return (QLT_MAILBOX_STUCK);
1983 		delay(drv_usectohz(10000));
1984 	}
1985 	status = (REG_RD32(qlt, REG_RISC_STATUS) & 0xff);
1986 	if ((status == 1) || (status == 2) ||
1987 	    (status == 0x10) || (status == 0x11)) {
1988 		uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
1989 		if (mbox0 == 0x4000)
1990 			return (QLT_SUCCESS);
1991 		else
1992 			return (QLT_MBOX_FAILED | mbox0);
1993 	}
1994 	/* This is unexpected, dump a message */
1995 	cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
1996 	    ddi_get_instance(qlt->dip), (unsigned long long)status);
1997 	return (QLT_UNEXPECTED_RESPONSE);
1998 }
1999 
2000 static mbox_cmd_t *
2001 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
2002 {
2003 	mbox_cmd_t *mcp;
2004 
2005 	mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
2006 	if (dma_size) {
2007 		qlt_dmem_bctl_t *bctl;
2008 		uint64_t da;
2009 
2010 		mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
2011 		if (mcp->dbuf == NULL) {
2012 			kmem_free(mcp, sizeof (*mcp));
2013 			return (NULL);
2014 		}
2015 		mcp->dbuf->db_data_size = dma_size;
2016 		ASSERT(mcp->dbuf->db_sglist_length == 1);
2017 
2018 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
2019 		da = bctl->bctl_dev_addr;
2020 		/* This is the most common initialization of dma ptrs */
2021 		mcp->to_fw[3] = da & 0xffff;
2022 		da >>= 16;
2023 		mcp->to_fw[2] = da & 0xffff;
2024 		da >>= 16;
2025 		mcp->to_fw[7] = da & 0xffff;
2026 		da >>= 16;
2027 		mcp->to_fw[6] = da & 0xffff;
2028 		mcp->to_fw_mask |= BIT_2 | BIT_3 | BIT_7 | BIT_6;
2029 	}
2030 	mcp->to_fw_mask |= BIT_0;
2031 	mcp->from_fw_mask |= BIT_0;
2032 	return (mcp);
2033 }
2034 
2035 void
2036 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2037 {
2038 	if (mcp->dbuf)
2039 		qlt_i_dmem_free(qlt, mcp->dbuf);
2040 	kmem_free(mcp, sizeof (*mcp));
2041 }
2042 
2043 /*
2044  * This can sleep. Should never be called from interrupt context.
2045  */
2046 static fct_status_t
2047 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2048 {
2049 	int	retries;
2050 	int	i;
2051 	char	info[80];
2052 
2053 	if (curthread->t_flag & T_INTR_THREAD) {
2054 		ASSERT(0);
2055 		return (QLT_MBOX_FAILED);
2056 	}
2057 
2058 	mutex_enter(&qlt->mbox_lock);
2059 	/* See if mailboxes are still uninitialized */
2060 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2061 		mutex_exit(&qlt->mbox_lock);
2062 		return (QLT_MBOX_NOT_INITIALIZED);
2063 	}
2064 
2065 	/* Wait to grab the mailboxes */
2066 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2067 	    retries++) {
2068 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2069 		if ((retries > 5) ||
2070 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2071 			mutex_exit(&qlt->mbox_lock);
2072 			return (QLT_MBOX_BUSY);
2073 		}
2074 	}
2075 	/* Make sure we always ask for mailbox 0 */
2076 	mcp->from_fw_mask |= BIT_0;
2077 
2078 	/* Load mailboxes, set state and generate RISC interrupt */
2079 	qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
2080 	qlt->mcp = mcp;
2081 	for (i = 0; i < MAX_MBOXES; i++) {
2082 		if (mcp->to_fw_mask & ((uint32_t)1 << i))
2083 			REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
2084 	}
2085 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR);
2086 
2087 qlt_mbox_wait_loop:;
2088 	/* Wait for mailbox command completion */
2089 	if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
2090 	    + drv_usectohz(MBOX_TIMEOUT)) < 0) {
2091 		(void) snprintf(info, 80, "qlt_mailbox_command: qlt-%p, "
2092 		    "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
2093 		info[79] = 0;
2094 		qlt->mcp = NULL;
2095 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2096 		mutex_exit(&qlt->mbox_lock);
2097 
2098 		/*
2099 		 * XXX Throw HBA fatal error event
2100 		 */
2101 		(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
2102 		    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2103 		return (QLT_MBOX_TIMEOUT);
2104 	}
2105 	if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
2106 		goto qlt_mbox_wait_loop;
2107 
2108 	qlt->mcp = NULL;
2109 
2110 	/* Make sure its a completion */
2111 	if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
2112 		ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
2113 		mutex_exit(&qlt->mbox_lock);
2114 		return (QLT_MBOX_ABORTED);
2115 	}
2116 
2117 	/* MBox command completed. Clear state, retuen based on mbox 0 */
2118 	/* Mailboxes are already loaded by interrupt routine */
2119 	qlt->mbox_io_state = MBOX_STATE_READY;
2120 	mutex_exit(&qlt->mbox_lock);
2121 	if (mcp->from_fw[0] != 0x4000)
2122 		return (QLT_MBOX_FAILED | mcp->from_fw[0]);
2123 
2124 	return (QLT_SUCCESS);
2125 }
2126 
2127 /*
2128  * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
2129  */
2130 /* ARGSUSED */
2131 static uint_t
2132 qlt_isr(caddr_t arg, caddr_t arg2)
2133 {
2134 	qlt_state_t	*qlt = (qlt_state_t *)arg;
2135 	int		instance;
2136 	uint32_t	risc_status, intr_type;
2137 	int		i;
2138 	int		intr_loop_count;
2139 	char		info[80];
2140 
2141 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2142 	if (!mutex_tryenter(&qlt->intr_lock)) {
2143 		/*
2144 		 * Normally we will always get this lock. If tryenter is
2145 		 * failing then it means that driver is trying to do
2146 		 * some cleanup and is masking the intr but some intr
2147 		 * has sneaked in between. See if our device has generated
2148 		 * this intr. If so then wait a bit and return claimed.
2149 		 * If not then return claimed if this is the 1st instance
2150 		 * of a interrupt after driver has grabbed the lock.
2151 		 */
2152 		if (risc_status & BIT_15) {
2153 			drv_usecwait(10);
2154 			return (DDI_INTR_CLAIMED);
2155 		} else if (qlt->intr_sneak_counter) {
2156 			qlt->intr_sneak_counter--;
2157 			return (DDI_INTR_CLAIMED);
2158 		} else {
2159 			return (DDI_INTR_UNCLAIMED);
2160 		}
2161 	}
2162 	if (((risc_status & BIT_15) == 0) ||
2163 	    (qlt->qlt_intr_enabled == 0)) {
2164 		/*
2165 		 * This might be a pure coincedence that we are operating
2166 		 * in a interrupt disabled mode and another device
2167 		 * sharing the interrupt line has generated an interrupt
2168 		 * while an interrupt from our device might be pending. Just
2169 		 * ignore it and let the code handling the interrupt
2170 		 * disabled mode handle it.
2171 		 */
2172 		mutex_exit(&qlt->intr_lock);
2173 		return (DDI_INTR_UNCLAIMED);
2174 	}
2175 
2176 	/*
2177 	 * XXX take care for MSI case. disable intrs
2178 	 * Its gonna be complicated becasue of the max iterations.
2179 	 * as hba will have posted the intr which did not go on PCI
2180 	 * but we did not service it either becasue of max iterations.
2181 	 * Maybe offload the intr on a different thread.
2182 	 */
2183 	instance = ddi_get_instance(qlt->dip);
2184 	intr_loop_count = 0;
2185 
2186 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2187 
2188 intr_again:;
2189 	/* First check for high performance path */
2190 	intr_type = risc_status & 0xff;
2191 	if (intr_type == 0x1C) {
2192 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2193 		qlt->atio_ndx_from_fw = risc_status >> 16;
2194 		qlt_handle_atio_queue_update(qlt);
2195 	} else if (intr_type == 0x13) {
2196 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2197 		qlt->resp_ndx_from_fw = risc_status >> 16;
2198 		qlt_handle_resp_queue_update(qlt);
2199 		/* XXX what about priority queue */
2200 	} else if (intr_type == 0x1D) {
2201 		qlt->atio_ndx_from_fw = REG_RD32(qlt, REG_ATIO_IN_PTR);
2202 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2203 		qlt->resp_ndx_from_fw = risc_status >> 16;
2204 		qlt_handle_atio_queue_update(qlt);
2205 		qlt_handle_resp_queue_update(qlt);
2206 	} else if (intr_type == 0x12) {
2207 		uint16_t code = risc_status >> 16;
2208 		uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
2209 		uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
2210 		uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
2211 		uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
2212 
2213 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2214 		stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
2215 		    " mb5=%x, mb6=%x", code, mbox1, mbox2, mbox5, mbox6);
2216 		cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x,"
2217 		    " mb5=%x, mb6=%x", instance, code, mbox1, mbox2, mbox5,
2218 		    mbox6);
2219 
2220 		if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
2221 			if (qlt->qlt_link_up) {
2222 				fct_handle_event(qlt->qlt_port,
2223 				    FCT_EVENT_LINK_RESET, 0, 0);
2224 			}
2225 		} else if (code == 0x8012) {
2226 			qlt->qlt_link_up = 0;
2227 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
2228 			    0, 0);
2229 		} else if (code == 0x8011) {
2230 			switch (mbox1) {
2231 			case 0: qlt->link_speed = PORT_SPEED_1G;
2232 				break;
2233 			case 1: qlt->link_speed = PORT_SPEED_2G;
2234 				break;
2235 			case 3: qlt->link_speed = PORT_SPEED_4G;
2236 				break;
2237 			case 4: qlt->link_speed = PORT_SPEED_8G;
2238 				break;
2239 			default:
2240 				qlt->link_speed = PORT_SPEED_UNKNOWN;
2241 			}
2242 			qlt->qlt_link_up = 1;
2243 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
2244 			    0, 0);
2245 		} else if (code == 0x8002) {
2246 			(void) snprintf(info, 80,
2247 			    "Got 8002, mb1=%x mb2=%x mb5=%x mb6=%x",
2248 			    mbox1, mbox2, mbox5, mbox6);
2249 			info[79] = 0;
2250 			(void) fct_port_shutdown(qlt->qlt_port,
2251 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2252 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2253 		}
2254 	} else if ((intr_type == 0x10) || (intr_type == 0x11)) {
2255 		/* Handle mailbox completion */
2256 		mutex_enter(&qlt->mbox_lock);
2257 		if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
2258 			cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
2259 			    " when driver wasn't waiting for it %d",
2260 			    instance, qlt->mbox_io_state);
2261 		} else {
2262 			for (i = 0; i < MAX_MBOXES; i++) {
2263 				if (qlt->mcp->from_fw_mask &
2264 				    (((uint32_t)1) << i)) {
2265 					qlt->mcp->from_fw[i] =
2266 					    REG_RD16(qlt, REG_MBOX(i));
2267 				}
2268 			}
2269 			qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
2270 		}
2271 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2272 		cv_broadcast(&qlt->mbox_cv);
2273 		mutex_exit(&qlt->mbox_lock);
2274 	} else {
2275 		cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
2276 		    instance, intr_type);
2277 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2278 	}
2279 
2280 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting */
2281 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2282 	if ((risc_status & BIT_15) &&
2283 	    (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
2284 		goto intr_again;
2285 	}
2286 
2287 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2288 
2289 	mutex_exit(&qlt->intr_lock);
2290 	return (DDI_INTR_CLAIMED);
2291 }
2292 
2293 /* **************** NVRAM Functions ********************** */
2294 
2295 fct_status_t
2296 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
2297 {
2298 	uint32_t	timer;
2299 
2300 	/* Clear access error flag */
2301 	REG_WR32(qlt, REG_CTRL_STATUS,
2302 	    REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
2303 
2304 	REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
2305 
2306 	/* Wait for READ cycle to complete. */
2307 	for (timer = 3000; timer; timer--) {
2308 		if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
2309 			break;
2310 		}
2311 		drv_usecwait(10);
2312 	}
2313 	if (timer == 0) {
2314 		return (QLT_FLASH_TIMEOUT);
2315 	} else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
2316 		return (QLT_FLASH_ACCESS_ERROR);
2317 	}
2318 
2319 	*bp = REG_RD32(qlt, REG_FLASH_DATA);
2320 
2321 	return (QLT_SUCCESS);
2322 }
2323 
2324 fct_status_t
2325 qlt_read_nvram(qlt_state_t *qlt)
2326 {
2327 	uint32_t		index, addr, chksum;
2328 	uint32_t		val, *ptr;
2329 	fct_status_t		ret;
2330 	qlt_nvram_t		*nv;
2331 	uint64_t		empty_node_name = 0;
2332 
2333 	if (qlt->qlt_25xx_chip) {
2334 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2335 		    QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
2336 	} else {
2337 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2338 		    NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
2339 	}
2340 	mutex_enter(&qlt_global_lock);
2341 
2342 	/* Pause RISC. */
2343 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_RISC_PAUSE);
2344 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2345 
2346 	/* Get NVRAM data and calculate checksum. */
2347 	ptr = (uint32_t *)qlt->nvram;
2348 	chksum = 0;
2349 	for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
2350 		ret = qlt_read_flash_word(qlt, addr++, &val);
2351 		if (ret != QLT_SUCCESS) {
2352 			mutex_exit(&qlt_global_lock);
2353 			return (ret);
2354 		}
2355 		chksum += val;
2356 		*ptr = LE_32(val);
2357 		ptr++;
2358 	}
2359 
2360 	/* Release RISC Pause */
2361 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_PAUSE);
2362 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2363 
2364 	mutex_exit(&qlt_global_lock);
2365 
2366 	/* Sanity check NVRAM Data */
2367 	nv = qlt->nvram;
2368 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2369 	    nv->id[2] != 'P' || nv->id[3] != ' ' ||
2370 	    (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
2371 		return (QLT_BAD_NVRAM_DATA);
2372 	}
2373 
2374 	/* If node name is zero, hand craft it from port name */
2375 	if (bcmp(nv->node_name, &empty_node_name, 8) == 0) {
2376 		bcopy(nv->port_name, nv->node_name, 8);
2377 		nv->node_name[0] = nv->node_name[0] & ~BIT_0;
2378 		nv->port_name[0] = nv->node_name[0] | BIT_0;
2379 	}
2380 
2381 	return (QLT_SUCCESS);
2382 }
2383 
2384 uint32_t
2385 qlt_sync_atio_queue(qlt_state_t *qlt)
2386 {
2387 	uint32_t total_ent;
2388 
2389 	if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) {
2390 		total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw;
2391 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2392 		    + (qlt->atio_ndx_to_fw << 6), total_ent << 6,
2393 		    DDI_DMA_SYNC_FORCPU);
2394 	} else {
2395 		total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw +
2396 		    qlt->atio_ndx_from_fw;
2397 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2398 		    + (qlt->atio_ndx_to_fw << 6), (ATIO_QUEUE_ENTRIES -
2399 		    qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2400 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2401 		    ATIO_QUEUE_OFFSET,
2402 		    qlt->atio_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
2403 	}
2404 	return (total_ent);
2405 }
2406 
2407 void
2408 qlt_handle_atio_queue_update(qlt_state_t *qlt)
2409 {
2410 	uint32_t total_ent;
2411 
2412 	if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw)
2413 		return;
2414 
2415 	total_ent = qlt_sync_atio_queue(qlt);
2416 
2417 	do {
2418 		uint8_t *atio = (uint8_t *)&qlt->atio_ptr[
2419 		    qlt->atio_ndx_to_fw << 6];
2420 		uint32_t ent_cnt;
2421 
2422 		ent_cnt = (uint32_t)(atio[1]);
2423 		if (ent_cnt > total_ent) {
2424 			break;
2425 		}
2426 		switch ((uint8_t)(atio[0])) {
2427 		case 0x0d:	/* INOT */
2428 			qlt_handle_inot(qlt, atio);
2429 			break;
2430 		case 0x06:	/* ATIO */
2431 			qlt_handle_atio(qlt, atio);
2432 			break;
2433 		default:
2434 			cmn_err(CE_WARN, "qlt_handle_atio_queue_update: "
2435 			    "atio[0] is %x, qlt-%p", atio[0], (void *)qlt);
2436 			break;
2437 		}
2438 		qlt->atio_ndx_to_fw = (qlt->atio_ndx_to_fw + ent_cnt) &
2439 		    (ATIO_QUEUE_ENTRIES - 1);
2440 		total_ent -= ent_cnt;
2441 	} while (total_ent > 0);
2442 	REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
2443 }
2444 
2445 uint32_t
2446 qlt_sync_resp_queue(qlt_state_t *qlt)
2447 {
2448 	uint32_t total_ent;
2449 
2450 	if (qlt->resp_ndx_from_fw > qlt->resp_ndx_to_fw) {
2451 		total_ent = qlt->resp_ndx_from_fw - qlt->resp_ndx_to_fw;
2452 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2453 		    RESPONSE_QUEUE_OFFSET
2454 		    + (qlt->resp_ndx_to_fw << 6), total_ent << 6,
2455 		    DDI_DMA_SYNC_FORCPU);
2456 	} else {
2457 		total_ent = RESPONSE_QUEUE_ENTRIES - qlt->resp_ndx_to_fw +
2458 		    qlt->resp_ndx_from_fw;
2459 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2460 		    RESPONSE_QUEUE_OFFSET
2461 		    + (qlt->resp_ndx_to_fw << 6), (RESPONSE_QUEUE_ENTRIES -
2462 		    qlt->resp_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2463 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2464 		    RESPONSE_QUEUE_OFFSET,
2465 		    qlt->resp_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
2466 	}
2467 	return (total_ent);
2468 }
2469 
2470 void
2471 qlt_handle_resp_queue_update(qlt_state_t *qlt)
2472 {
2473 	uint32_t total_ent;
2474 	uint8_t c;
2475 
2476 	if (qlt->resp_ndx_to_fw == qlt->resp_ndx_from_fw)
2477 		return;
2478 
2479 	total_ent = qlt_sync_resp_queue(qlt);
2480 
2481 	do {
2482 		caddr_t resp = &qlt->resp_ptr[qlt->resp_ndx_to_fw << 6];
2483 		uint32_t ent_cnt;
2484 
2485 		ent_cnt = (uint32_t)(resp[1]);
2486 		if (ent_cnt > total_ent) {
2487 			break;
2488 		}
2489 		switch ((uint8_t)(resp[0])) {
2490 		case 0x12:	/* CTIO completion */
2491 			qlt_handle_ctio_completion(qlt, (uint8_t *)resp);
2492 			break;
2493 		case 0x0e:	/* NACK */
2494 			/* Do Nothing */
2495 			break;
2496 		case 0x29:	/* CT PassThrough */
2497 			qlt_handle_ct_completion(qlt, (uint8_t *)resp);
2498 			break;
2499 		case 0x33:	/* Abort IO IOCB completion */
2500 			qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp);
2501 			break;
2502 		case 0x51:	/* PUREX */
2503 			qlt_handle_purex(qlt, (uint8_t *)resp);
2504 			break;
2505 		case 0x52:
2506 			qlt_handle_dereg_completion(qlt, (uint8_t *)resp);
2507 			break;
2508 		case 0x53:	/* ELS passthrough */
2509 			c = ((uint8_t)resp[0x1f]) >> 5;
2510 			if (c == 0) {
2511 				qlt_handle_sol_els_completion(qlt,
2512 				    (uint8_t *)resp);
2513 			} else if (c == 3) {
2514 				qlt_handle_unsol_els_abort_completion(qlt,
2515 				    (uint8_t *)resp);
2516 			} else {
2517 				qlt_handle_unsol_els_completion(qlt,
2518 				    (uint8_t *)resp);
2519 			}
2520 			break;
2521 		case 0x54:	/* ABTS received */
2522 			qlt_handle_rcvd_abts(qlt, (uint8_t *)resp);
2523 			break;
2524 		case 0x55:	/* ABTS completion */
2525 			qlt_handle_abts_completion(qlt, (uint8_t *)resp);
2526 			break;
2527 		}
2528 		qlt->resp_ndx_to_fw = (qlt->resp_ndx_to_fw + ent_cnt) &
2529 		    (RESPONSE_QUEUE_ENTRIES - 1);
2530 		total_ent -= ent_cnt;
2531 	} while (total_ent > 0);
2532 	REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->resp_ndx_to_fw);
2533 }
2534 
2535 fct_status_t
2536 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle,
2537 				uint16_t *ret_handle)
2538 {
2539 	fct_status_t ret;
2540 	mbox_cmd_t *mcp;
2541 	uint16_t n;
2542 	uint16_t h;
2543 	uint32_t ent_id;
2544 	uint8_t *p;
2545 	int found = 0;
2546 
2547 	mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8);
2548 	if (mcp == NULL) {
2549 		return (STMF_ALLOC_FAILURE);
2550 	}
2551 	mcp->to_fw[0] = 0x7C;	/* GET ID LIST */
2552 	mcp->to_fw[8] = 2048 * 8;
2553 	mcp->to_fw_mask |= BIT_8;
2554 	mcp->from_fw_mask |= BIT_1 | BIT_2;
2555 
2556 	ret = qlt_mailbox_command(qlt, mcp);
2557 	if (ret != QLT_SUCCESS) {
2558 		cmn_err(CE_WARN, "GET ID list failed, ret = %llx, mb0=%x, "
2559 		    "mb1=%x, mb2=%x", (long long)ret, mcp->from_fw[0],
2560 		    mcp->from_fw[1], mcp->from_fw[2]);
2561 		qlt_free_mailbox_command(qlt, mcp);
2562 		return (ret);
2563 	}
2564 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
2565 	p = mcp->dbuf->db_sglist[0].seg_addr;
2566 	for (n = 0; n < mcp->from_fw[1]; n++) {
2567 		ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF;
2568 		h = (uint16_t)p[4] | (((uint16_t)p[5]) << 8);
2569 		if (ent_id == id) {
2570 			found = 1;
2571 			*ret_handle = h;
2572 			if ((cmd_handle != FCT_HANDLE_NONE) &&
2573 			    (cmd_handle != h)) {
2574 				cmn_err(CE_WARN, "login for portid %x came in "
2575 				    "with handle %x, while the portid was "
2576 				    "already using a different handle %x",
2577 				    id, cmd_handle, h);
2578 				qlt_free_mailbox_command(qlt, mcp);
2579 				return (QLT_FAILURE);
2580 			}
2581 			break;
2582 		}
2583 		if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) {
2584 			cmn_err(CE_WARN, "login for portid %x came in with "
2585 			    "handle %x, while the handle was already in use "
2586 			    "for portid %x", id, cmd_handle, ent_id);
2587 			qlt_free_mailbox_command(qlt, mcp);
2588 			return (QLT_FAILURE);
2589 		}
2590 		p += 8;
2591 	}
2592 	if (!found) {
2593 		*ret_handle = cmd_handle;
2594 	}
2595 	qlt_free_mailbox_command(qlt, mcp);
2596 	return (FCT_SUCCESS);
2597 }
2598 
2599 /* ARGSUSED */
2600 fct_status_t
2601 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp,
2602 				fct_cmd_t *login)
2603 {
2604 	uint8_t *p;
2605 
2606 	p = ((fct_els_t *)login->cmd_specific)->els_req_payload;
2607 	p[0] = ELS_OP_PLOGI;
2608 	*((uint16_t *)(&p[4])) = 0x2020;
2609 	p[7] = 3;
2610 	p[8] = 0x88;
2611 	p[10] = 8;
2612 	p[13] = 0xff; p[15] = 0x1f;
2613 	p[18] = 7; p[19] = 0xd0;
2614 
2615 	bcopy(port->port_pwwn, p + 20, 8);
2616 	bcopy(port->port_nwwn, p + 28, 8);
2617 
2618 	p[68] = 0x80;
2619 	p[74] = 8;
2620 	p[77] = 0xff;
2621 	p[81] = 1;
2622 
2623 	return (FCT_SUCCESS);
2624 }
2625 
2626 /* ARGSUSED */
2627 fct_status_t
2628 qlt_fill_plogi_resp(fct_local_port_t *port, fct_remote_port_t *rp,
2629 				fct_cmd_t *login)
2630 {
2631 	return (FCT_SUCCESS);
2632 }
2633 
2634 fct_status_t
2635 qlt_register_remote_port(fct_local_port_t *port, fct_remote_port_t *rp,
2636 				fct_cmd_t *login)
2637 {
2638 	uint16_t h;
2639 	fct_status_t ret;
2640 
2641 	switch (rp->rp_id) {
2642 	case 0xFFFFFC:	h = 0x7FC; break;
2643 	case 0xFFFFFD:	h = 0x7FD; break;
2644 	case 0xFFFFFE:	h = 0x7FE; break;
2645 	case 0xFFFFFF:	h = 0x7FF; break;
2646 	default:
2647 		ret = qlt_portid_to_handle(
2648 		    (qlt_state_t *)port->port_fca_private, rp->rp_id,
2649 		    login->cmd_rp_handle, &h);
2650 		if (ret != FCT_SUCCESS)
2651 			return (ret);
2652 	}
2653 
2654 	if (login->cmd_type == FCT_CMD_SOL_ELS) {
2655 		ret = qlt_fill_plogi_req(port, rp, login);
2656 	} else {
2657 		ret = qlt_fill_plogi_resp(port, rp, login);
2658 	}
2659 
2660 	if (ret != FCT_SUCCESS)
2661 		return (ret);
2662 
2663 	if (h == FCT_HANDLE_NONE)
2664 		return (FCT_SUCCESS);
2665 
2666 	if (rp->rp_handle == FCT_HANDLE_NONE) {
2667 		rp->rp_handle = h;
2668 		return (FCT_SUCCESS);
2669 	}
2670 
2671 	if (rp->rp_handle == h)
2672 		return (FCT_SUCCESS);
2673 
2674 	return (FCT_FAILURE);
2675 }
2676 /* invoked in single thread */
2677 fct_status_t
2678 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
2679 {
2680 	uint8_t *req;
2681 	qlt_state_t *qlt;
2682 	clock_t	dereg_req_timer;
2683 	fct_status_t ret;
2684 
2685 	qlt = (qlt_state_t *)port->port_fca_private;
2686 
2687 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
2688 	    (qlt->qlt_state == FCT_STATE_OFFLINING))
2689 		return (FCT_SUCCESS);
2690 	ASSERT(qlt->rp_id_in_dereg == 0);
2691 
2692 	mutex_enter(&qlt->preq_lock);
2693 	req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
2694 	if (req == NULL) {
2695 		mutex_exit(&qlt->preq_lock);
2696 		return (FCT_BUSY);
2697 	}
2698 	bzero(req, IOCB_SIZE);
2699 	req[0] = 0x52; req[1] = 1;
2700 	/* QMEM_WR32(qlt, (&req[4]), 0xffffffff);  */
2701 	QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle);
2702 	QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */
2703 	QMEM_WR32(qlt, (&req[0x10]), rp->rp_id);
2704 	qlt->rp_id_in_dereg = rp->rp_id;
2705 	qlt_submit_preq_entries(qlt, 1);
2706 
2707 	dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT);
2708 	if (cv_timedwait(&qlt->rp_dereg_cv,
2709 	    &qlt->preq_lock, dereg_req_timer) > 0) {
2710 		ret = qlt->rp_dereg_status;
2711 	} else {
2712 		ret = FCT_BUSY;
2713 	}
2714 	qlt->rp_dereg_status = 0;
2715 	qlt->rp_id_in_dereg = 0;
2716 	mutex_exit(&qlt->preq_lock);
2717 	return (ret);
2718 }
2719 
2720 /*
2721  * Pass received ELS up to framework.
2722  */
2723 static void
2724 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp)
2725 {
2726 	fct_cmd_t		*cmd;
2727 	fct_els_t		*els;
2728 	qlt_cmd_t		*qcmd;
2729 	uint32_t		payload_size;
2730 	uint32_t		remote_portid;
2731 	uint8_t			*pldptr, *bndrptr;
2732 	int			i, off;
2733 	uint16_t		iocb_flags;
2734 	char			info[160];
2735 
2736 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
2737 	    ((uint32_t)(resp[0x1A])) << 16;
2738 	iocb_flags = QMEM_RD16(qlt, (&resp[8]));
2739 	if (iocb_flags & BIT_15) {
2740 		payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24;
2741 	} else {
2742 		payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24;
2743 	}
2744 
2745 	if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) {
2746 		cmn_err(CE_WARN, "handle_purex: payload is too large");
2747 		goto cmd_null;
2748 	}
2749 
2750 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS, payload_size +
2751 	    GET_STRUCT_SIZE(qlt_cmd_t), 0);
2752 	if (cmd == NULL) {
2753 cmd_null:;
2754 		(void) snprintf(info, 160, "qlt_handle_purex: qlt-%p, can't "
2755 		    "allocate space for fct_cmd", (void *)qlt);
2756 		info[159] = 0;
2757 		(void) fct_port_shutdown(qlt->qlt_port,
2758 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
2759 		return;
2760 	}
2761 
2762 	cmd->cmd_port = qlt->qlt_port;
2763 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa);
2764 	if (cmd->cmd_rp_handle == 0xFFFF) {
2765 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
2766 	}
2767 
2768 	els = (fct_els_t *)cmd->cmd_specific;
2769 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
2770 	els->els_req_size = payload_size;
2771 	els->els_req_payload = GET_BYTE_OFFSET(qcmd,
2772 	    GET_STRUCT_SIZE(qlt_cmd_t));
2773 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10]));
2774 	cmd->cmd_rportid = remote_portid;
2775 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
2776 	    ((uint32_t)(resp[0x16])) << 16;
2777 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
2778 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
2779 	pldptr = &resp[0x2C];
2780 	bndrptr = (uint8_t *)(qlt->resp_ptr + (RESPONSE_QUEUE_ENTRIES << 6));
2781 	for (i = 0, off = 0x2c; i < payload_size; i += 4) {
2782 		/* Take care of fw's swapping of payload */
2783 		els->els_req_payload[i] = pldptr[3];
2784 		els->els_req_payload[i+1] = pldptr[2];
2785 		els->els_req_payload[i+2] = pldptr[1];
2786 		els->els_req_payload[i+3] = pldptr[0];
2787 		pldptr += 4;
2788 		if (pldptr == bndrptr)
2789 			pldptr = (uint8_t *)qlt->resp_ptr;
2790 		off += 4;
2791 		if (off >= IOCB_SIZE) {
2792 			off = 4;
2793 			pldptr += 4;
2794 		}
2795 	}
2796 	fct_post_rcvd_cmd(cmd, 0);
2797 }
2798 
2799 fct_status_t
2800 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags)
2801 {
2802 	qlt_state_t	*qlt;
2803 	char		info[160];
2804 
2805 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
2806 
2807 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
2808 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
2809 			goto fatal_panic;
2810 		} else {
2811 			return (qlt_send_status(qlt, cmd));
2812 		}
2813 	}
2814 
2815 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
2816 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
2817 			goto fatal_panic;
2818 		} else {
2819 			return (qlt_send_els_response(qlt, cmd));
2820 		}
2821 	}
2822 
2823 	if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
2824 		cmd->cmd_handle = 0;
2825 	}
2826 
2827 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
2828 		return (qlt_send_abts_response(qlt, cmd, 0));
2829 	} else {
2830 		ASSERT(0);
2831 		return (FCT_FAILURE);
2832 	}
2833 
2834 fatal_panic:;
2835 	(void) snprintf(info, 160, "qlt_send_cmd_response: can not handle "
2836 	    "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd,
2837 	    ioflags);
2838 	info[159] = 0;
2839 	(void) fct_port_shutdown(qlt->qlt_port,
2840 	    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
2841 	return (FCT_FAILURE);
2842 }
2843 
2844 /* ARGSUSED */
2845 fct_status_t
2846 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
2847 {
2848 	qlt_dmem_bctl_t *bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
2849 	qlt_state_t *qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
2850 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
2851 	uint8_t *req;
2852 	uint16_t flags;
2853 
2854 	if (dbuf->db_handle == 0)
2855 		qcmd->dbuf = dbuf;
2856 	flags = ((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5;
2857 	if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
2858 		flags |= 2;
2859 		qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV);
2860 	} else {
2861 		flags |= 1;
2862 	}
2863 
2864 	if (dbuf->db_flags & DB_SEND_STATUS_GOOD)
2865 		flags |= BIT_15;
2866 
2867 	mutex_enter(&qlt->req_lock);
2868 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
2869 	if (req == NULL) {
2870 		mutex_exit(&qlt->req_lock);
2871 		return (FCT_BUSY);
2872 	}
2873 	bzero(req, IOCB_SIZE);
2874 	req[0] = 0x12; req[1] = 0x1;
2875 	req[2] = dbuf->db_handle;
2876 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
2877 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
2878 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
2879 	req[12] = 1;
2880 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
2881 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
2882 	QMEM_WR16(qlt, req+0x1A, flags);
2883 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
2884 	QMEM_WR32(qlt, req+0x24, dbuf->db_relative_offset);
2885 	QMEM_WR32(qlt, req+0x2C, dbuf->db_data_size);
2886 	QMEM_WR64(qlt, req+0x34, bctl->bctl_dev_addr);
2887 	QMEM_WR32(qlt, req+0x34+8, dbuf->db_data_size);
2888 	qlt_submit_req_entries(qlt, 1);
2889 	mutex_exit(&qlt->req_lock);
2890 
2891 	return (STMF_SUCCESS);
2892 }
2893 
2894 /*
2895  * We must construct proper FCP_RSP_IU now. Here we only focus on
2896  * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO),
2897  * we could have catched them before we enter here.
2898  */
2899 fct_status_t
2900 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd)
2901 {
2902 	qlt_cmd_t *qcmd		= (qlt_cmd_t *)cmd->cmd_fca_private;
2903 	scsi_task_t *task	= (scsi_task_t *)cmd->cmd_specific;
2904 	qlt_dmem_bctl_t *bctl;
2905 	uint32_t size;
2906 	uint8_t *req, *fcp_rsp_iu;
2907 	uint8_t *psd, sensbuf[24];		/* sense data */
2908 	uint16_t flags;
2909 	uint16_t scsi_status;
2910 	int use_mode2;
2911 	int ndx;
2912 
2913 	/*
2914 	 * Enter fast channel for non check condition
2915 	 */
2916 	if (task->task_scsi_status != STATUS_CHECK) {
2917 		/*
2918 		 * We will use mode1
2919 		 */
2920 		flags = BIT_6 | BIT_15 |
2921 		    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
2922 		scsi_status = (uint16_t)task->task_scsi_status;
2923 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
2924 			scsi_status |= BIT_10;
2925 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
2926 			scsi_status |= BIT_11;
2927 		}
2928 		qcmd->dbuf_rsp_iu = NULL;
2929 
2930 		/*
2931 		 * Fillout CTIO type 7 IOCB
2932 		 */
2933 		mutex_enter(&qlt->req_lock);
2934 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
2935 		if (req == NULL) {
2936 			mutex_exit(&qlt->req_lock);
2937 			return (FCT_BUSY);
2938 		}
2939 
2940 		/*
2941 		 * Common fields
2942 		 */
2943 		bzero(req, IOCB_SIZE);
2944 		req[0x00] = 0x12;
2945 		req[0x01] = 0x1;
2946 		req[0x02] = BIT_7;	/* indicate if it's a pure status req */
2947 		QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
2948 		QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
2949 		QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
2950 		QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
2951 
2952 		/*
2953 		 * Mode-specific fields
2954 		 */
2955 		QMEM_WR16(qlt, req + 0x1A, flags);
2956 		QMEM_WR32(qlt, req + 0x1C, task->task_resid);
2957 		QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
2958 		QMEM_WR16(qlt, req + 0x22, scsi_status);
2959 
2960 		/*
2961 		 * Trigger FW to send SCSI status out
2962 		 */
2963 		qlt_submit_req_entries(qlt, 1);
2964 		mutex_exit(&qlt->req_lock);
2965 		return (STMF_SUCCESS);
2966 	}
2967 
2968 	ASSERT(task->task_scsi_status == STATUS_CHECK);
2969 	/*
2970 	 * Decide the SCSI status mode, that should be used
2971 	 */
2972 	use_mode2 = (task->task_sense_length > 24);
2973 
2974 	/*
2975 	 * Prepare required information per the SCSI status mode
2976 	 */
2977 	flags = BIT_15 | (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
2978 	if (use_mode2) {
2979 		flags |= BIT_7;
2980 
2981 		size = task->task_sense_length;
2982 		qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt,
2983 		    task->task_sense_length, &size, 0);
2984 		if (!qcmd->dbuf_rsp_iu) {
2985 			return (FCT_ALLOC_FAILURE);
2986 		}
2987 
2988 		/*
2989 		 * Start to construct FCP_RSP IU
2990 		 */
2991 		fcp_rsp_iu = qcmd->dbuf_rsp_iu->db_sglist[0].seg_addr;
2992 		bzero(fcp_rsp_iu, 24);
2993 
2994 		/*
2995 		 * FCP_RSP IU flags, byte10
2996 		 */
2997 		fcp_rsp_iu[10] |= BIT_1;
2998 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
2999 			fcp_rsp_iu[10] |= BIT_2;
3000 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3001 			fcp_rsp_iu[10] |= BIT_3;
3002 		}
3003 
3004 		/*
3005 		 * SCSI status code, byte11
3006 		 */
3007 		fcp_rsp_iu[11] = task->task_scsi_status;
3008 
3009 		/*
3010 		 * FCP_RESID (Overrun or underrun)
3011 		 */
3012 		fcp_rsp_iu[12] = (task->task_resid >> 24) & 0xFF;
3013 		fcp_rsp_iu[13] = (task->task_resid >> 16) & 0xFF;
3014 		fcp_rsp_iu[14] = (task->task_resid >>  8) & 0xFF;
3015 		fcp_rsp_iu[15] = (task->task_resid >>  0) & 0xFF;
3016 
3017 		/*
3018 		 * FCP_SNS_LEN
3019 		 */
3020 		fcp_rsp_iu[18] = (task->task_sense_length >> 8) & 0xFF;
3021 		fcp_rsp_iu[19] = (task->task_sense_length >> 0) & 0xFF;
3022 
3023 		/*
3024 		 * FCP_RSP_LEN
3025 		 */
3026 		/*
3027 		 * no FCP_RSP_INFO
3028 		 */
3029 		/*
3030 		 * FCP_SNS_INFO
3031 		 */
3032 		bcopy(task->task_sense_data, fcp_rsp_iu + 24,
3033 		    task->task_sense_length);
3034 
3035 		/*
3036 		 * Ensure dma data consistency
3037 		 */
3038 		qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV);
3039 	} else {
3040 		flags |= BIT_6;
3041 
3042 		scsi_status = (uint16_t)task->task_scsi_status;
3043 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3044 			scsi_status |= BIT_10;
3045 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3046 			scsi_status |= BIT_11;
3047 		}
3048 		if (task->task_sense_length) {
3049 			scsi_status |= BIT_9;
3050 		}
3051 		bcopy(task->task_sense_data, sensbuf, task->task_sense_length);
3052 		qcmd->dbuf_rsp_iu = NULL;
3053 	}
3054 
3055 	/*
3056 	 * Fillout CTIO type 7 IOCB
3057 	 */
3058 	mutex_enter(&qlt->req_lock);
3059 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3060 	if (req == NULL) {
3061 		mutex_exit(&qlt->req_lock);
3062 		if (use_mode2) {
3063 			qlt_dmem_free(cmd->cmd_port->port_fds,
3064 			    qcmd->dbuf_rsp_iu);
3065 			qcmd->dbuf_rsp_iu = NULL;
3066 		}
3067 		return (FCT_BUSY);
3068 	}
3069 
3070 	/*
3071 	 * Common fields
3072 	 */
3073 	bzero(req, IOCB_SIZE);
3074 	req[0x00] = 0x12;
3075 	req[0x01] = 0x1;
3076 	req[0x02] = BIT_7;	/* to indicate if it's a pure status req */
3077 	QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3078 	QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3079 	QMEM_WR16(qlt, req + 0x0A, 0);	/* not timed by FW */
3080 	if (use_mode2) {
3081 		QMEM_WR16(qlt, req+0x0C, 1);	/* FCP RSP IU data field */
3082 	}
3083 	QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3084 	QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3085 
3086 	/*
3087 	 * Mode-specific fields
3088 	 */
3089 	if (!use_mode2) {
3090 		QMEM_WR16(qlt, req + 0x18, task->task_sense_length);
3091 	}
3092 	QMEM_WR16(qlt, req + 0x1A, flags);
3093 	QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3094 	QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3095 	if (use_mode2) {
3096 		bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private;
3097 		QMEM_WR32(qlt, req + 0x2C, 24 + task->task_sense_length);
3098 		QMEM_WR64(qlt, req + 0x34, bctl->bctl_dev_addr);
3099 		QMEM_WR32(qlt, req + 0x3C, 24 + task->task_sense_length);
3100 	} else {
3101 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3102 		psd = req+0x28;
3103 
3104 		/*
3105 		 * Data in sense buf is always big-endian, data in IOCB
3106 		 * should always be little-endian, so we must do swapping.
3107 		 */
3108 		size = ((task->task_sense_length + 3) & (~3));
3109 		for (ndx = 0; ndx < size; ndx += 4) {
3110 			psd[ndx + 0] = sensbuf[ndx + 3];
3111 			psd[ndx + 1] = sensbuf[ndx + 2];
3112 			psd[ndx + 2] = sensbuf[ndx + 1];
3113 			psd[ndx + 3] = sensbuf[ndx + 0];
3114 		}
3115 	}
3116 
3117 	/*
3118 	 * Trigger FW to send SCSI status out
3119 	 */
3120 	qlt_submit_req_entries(qlt, 1);
3121 	mutex_exit(&qlt->req_lock);
3122 
3123 	return (STMF_SUCCESS);
3124 }
3125 
3126 fct_status_t
3127 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd)
3128 {
3129 	qlt_cmd_t	*qcmd;
3130 	fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
3131 	uint8_t *req, *addr;
3132 	qlt_dmem_bctl_t *bctl;
3133 	uint32_t minsize;
3134 	uint8_t elsop, req1f;
3135 
3136 	addr = els->els_resp_payload;
3137 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3138 
3139 	minsize = els->els_resp_size;
3140 	qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0);
3141 	if (qcmd->dbuf == NULL)
3142 		return (FCT_BUSY);
3143 
3144 	bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private;
3145 
3146 	bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size);
3147 	qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV);
3148 
3149 	if (addr[0] == 0x02) {	/* ACC */
3150 		req1f = BIT_5;
3151 	} else {
3152 		req1f = BIT_6;
3153 	}
3154 	elsop = els->els_req_payload[0];
3155 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
3156 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
3157 		req1f |= BIT_4;
3158 	}
3159 
3160 	mutex_enter(&qlt->req_lock);
3161 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3162 	if (req == NULL) {
3163 		mutex_exit(&qlt->req_lock);
3164 		qlt_dmem_free(NULL, qcmd->dbuf);
3165 		qcmd->dbuf = NULL;
3166 		return (FCT_BUSY);
3167 	}
3168 	bzero(req, IOCB_SIZE);
3169 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
3170 	req[0x16] = elsop; req[0x1f] = req1f;
3171 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3172 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3173 	QMEM_WR16(qlt, (&req[0xC]), 1);
3174 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
3175 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
3176 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
3177 		req[0x1b] = (cmd->cmd_lportid >> 16) & 0xff;
3178 		req[0x1c] = cmd->cmd_lportid & 0xff;
3179 		req[0x1d] = (cmd->cmd_lportid >> 8) & 0xff;
3180 	}
3181 	QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size);
3182 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
3183 	QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size);
3184 	qlt_submit_req_entries(qlt, 1);
3185 	mutex_exit(&qlt->req_lock);
3186 
3187 	return (FCT_SUCCESS);
3188 }
3189 
3190 fct_status_t
3191 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate)
3192 {
3193 	qlt_abts_cmd_t *qcmd;
3194 	fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific;
3195 	uint8_t *req;
3196 	uint32_t lportid;
3197 	uint32_t fctl;
3198 	int i;
3199 
3200 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
3201 
3202 	mutex_enter(&qlt->req_lock);
3203 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3204 	if (req == NULL) {
3205 		mutex_exit(&qlt->req_lock);
3206 		return (FCT_BUSY);
3207 	}
3208 	bcopy(qcmd->buf, req, IOCB_SIZE);
3209 	lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF;
3210 	fctl = QMEM_RD32(qlt, req+0x1C);
3211 	fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16);
3212 	req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate;
3213 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3214 	if (cmd->cmd_rp)
3215 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3216 	else
3217 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
3218 	if (terminate) {
3219 		QMEM_WR16(qlt, (&req[0xC]), 1);
3220 	}
3221 	QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid);
3222 	req[0x17] = abts->abts_resp_rctl;
3223 	QMEM_WR32(qlt, req+0x18, lportid);
3224 	QMEM_WR32(qlt, req+0x1C, fctl);
3225 	req[0x23]++;
3226 	for (i = 0; i < 12; i += 4) {
3227 		/* Take care of firmware's LE requirement */
3228 		req[0x2C+i] = abts->abts_resp_payload[i+3];
3229 		req[0x2C+i+1] = abts->abts_resp_payload[i+2];
3230 		req[0x2C+i+2] = abts->abts_resp_payload[i+1];
3231 		req[0x2C+i+3] = abts->abts_resp_payload[i];
3232 	}
3233 	qlt_submit_req_entries(qlt, 1);
3234 	mutex_exit(&qlt->req_lock);
3235 
3236 	return (FCT_SUCCESS);
3237 }
3238 
3239 static void
3240 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot)
3241 {
3242 	int i;
3243 	uint32_t d;
3244 	caddr_t req;
3245 	/* Just put it on the request queue */
3246 	mutex_enter(&qlt->req_lock);
3247 	req = qlt_get_req_entries(qlt, 1);
3248 	if (req == NULL) {
3249 		mutex_exit(&qlt->req_lock);
3250 		/* XXX handle this */
3251 		return;
3252 	}
3253 	for (i = 0; i < 16; i++) {
3254 		d = QMEM_RD32(qlt, inot);
3255 		inot += 4;
3256 		QMEM_WR32(qlt, req, d);
3257 		req += 4;
3258 	}
3259 	req -= 64;
3260 	req[0] = 0x0e;
3261 	qlt_submit_req_entries(qlt, 1);
3262 	mutex_exit(&qlt->req_lock);
3263 }
3264 
3265 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 };
3266 static void
3267 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio)
3268 {
3269 	fct_cmd_t	*cmd;
3270 	scsi_task_t	*task;
3271 	qlt_cmd_t	*qcmd;
3272 	uint32_t	rportid, fw_xchg_addr;
3273 	uint8_t		*p, *q, *req, tm;
3274 	uint16_t	cdb_size, flags, oxid;
3275 	char		info[160];
3276 
3277 	/*
3278 	 * If either bidirection xfer is requested of there is extended
3279 	 * CDB, atio[0x20 + 11] will be greater than or equal to 3.
3280 	 */
3281 	cdb_size = 16;
3282 	if (atio[0x20 + 11] >= 3) {
3283 		uint8_t b = atio[0x20 + 11];
3284 		uint16_t b1;
3285 		if ((b & 3) == 3) {
3286 			cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O "
3287 			    "received, dropping the cmd as bidirectional "
3288 			    " transfers are not yet supported", qlt->instance);
3289 			/* XXX abort the I/O */
3290 			return;
3291 		}
3292 		cdb_size += b & 0xfc;
3293 		/*
3294 		 * Verify that we have enough entries. Without additional CDB
3295 		 * Everything will fit nicely within the same 64 bytes. So the
3296 		 * additional cdb size is essentially the # of additional bytes
3297 		 * we need.
3298 		 */
3299 		b1 = (uint16_t)b;
3300 		if (((((b1 & 0xfc) + 63) >> 6) + 1) > ((uint16_t)atio[1])) {
3301 			cmn_err(CE_WARN, "qlt(%d): cmd received with extended "
3302 			    " cdb (cdb size = %d bytes), however the firmware "
3303 			    " did not DMAed the entire FCP_CMD IU, entry count "
3304 			    " is %d while it should be %d", qlt->instance,
3305 			    cdb_size, atio[1], ((((b1 & 0xfc) + 63) >> 6) + 1));
3306 			/* XXX abort the I/O */
3307 			return;
3308 		}
3309 	}
3310 
3311 	rportid = (((uint32_t)atio[8 + 5]) << 16) |
3312 	    (((uint32_t)atio[8 + 6]) << 8) | atio[8+7];
3313 	fw_xchg_addr = QMEM_RD32(qlt, atio+4);
3314 	oxid = (((uint16_t)atio[8 + 16]) << 8) | atio[8+17];
3315 
3316 	if (fw_xchg_addr == 0xFFFFFFFF) {
3317 		cmd = NULL;
3318 	} else {
3319 		cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE,
3320 		    rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE);
3321 	}
3322 	if (cmd == NULL) {
3323 		/* Abort this IO */
3324 		flags = BIT_14 | ((atio[3] & 0xF0) << 5);
3325 
3326 		mutex_enter(&qlt->req_lock);
3327 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3328 		if (req == NULL) {
3329 			mutex_exit(&qlt->req_lock);
3330 
3331 			(void) snprintf(info, 160,
3332 			    "qlt_handle_atio: qlt-%p, can't "
3333 			    "allocate space for scsi_task", (void *)qlt);
3334 			info[159] = 0;
3335 			(void) fct_port_shutdown(qlt->qlt_port,
3336 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3337 			return;
3338 		}
3339 		bzero(req, IOCB_SIZE);
3340 		req[0] = 0x12; req[1] = 0x1;
3341 		QMEM_WR32(qlt, req+4, 0);
3342 		QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port,
3343 		    rportid));
3344 		QMEM_WR16(qlt, req+10, 60);
3345 		QMEM_WR32(qlt, req+0x10, rportid);
3346 		QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
3347 		QMEM_WR16(qlt, req+0x1A, flags);
3348 		QMEM_WR16(qlt, req+0x20, oxid);
3349 		qlt_submit_req_entries(qlt, 1);
3350 		mutex_exit(&qlt->req_lock);
3351 
3352 		return;
3353 	}
3354 
3355 	task = (scsi_task_t *)cmd->cmd_specific;
3356 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3357 	qcmd->fw_xchg_addr = fw_xchg_addr;
3358 	qcmd->param.atio_byte3 = atio[3];
3359 	cmd->cmd_oxid = oxid;
3360 	cmd->cmd_rxid = (((uint16_t)atio[8 + 18]) << 8) | atio[8+19];
3361 	cmd->cmd_rportid = rportid;
3362 	cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) |
3363 	    (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3];
3364 	cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3365 	/* Dont do a 64 byte read as this is IOMMU */
3366 	q = atio+0x28;
3367 	/* XXX Handle fcp_cntl */
3368 	task->task_cmd_seq_no = (uint32_t)(*q++);
3369 	task->task_csn_size = 8;
3370 	task->task_flags = qlt_task_flags[(*q++) & 7];
3371 	tm = *q++;
3372 	if (tm) {
3373 		if (tm & BIT_1)
3374 			task->task_mgmt_function = TM_ABORT_TASK_SET;
3375 		else if (tm & BIT_2)
3376 			task->task_mgmt_function = TM_CLEAR_TASK_SET;
3377 		else if (tm & BIT_4)
3378 			task->task_mgmt_function = TM_LUN_RESET;
3379 		else if (tm & BIT_5)
3380 			task->task_mgmt_function = TM_TARGET_COLD_RESET;
3381 		else if (tm & BIT_6)
3382 			task->task_mgmt_function = TM_CLEAR_ACA;
3383 		else
3384 			task->task_mgmt_function = TM_ABORT_TASK;
3385 	}
3386 	task->task_max_nbufs = STMF_BUFS_MAX;
3387 	task->task_csn_size = 8;
3388 	task->task_flags |= ((*q++) & 3) << 5;
3389 	p = task->task_cdb;
3390 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3391 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3392 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3393 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3394 	if (cdb_size > 16) {
3395 		uint16_t xtra = cdb_size - 16;
3396 		uint16_t i;
3397 		uint8_t cb[4];
3398 
3399 		while (xtra) {
3400 			*p++ = *q++;
3401 			xtra--;
3402 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
3403 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
3404 				q = (uint8_t *)qlt->queue_mem_ptr +
3405 				    ATIO_QUEUE_OFFSET;
3406 			}
3407 		}
3408 		for (i = 0; i < 4; i++) {
3409 			cb[i] = *q++;
3410 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
3411 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
3412 				q = (uint8_t *)qlt->queue_mem_ptr +
3413 				    ATIO_QUEUE_OFFSET;
3414 			}
3415 		}
3416 		task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) |
3417 		    (((uint32_t)cb[1]) << 16) |
3418 		    (((uint32_t)cb[2]) << 8) | cb[3];
3419 	} else {
3420 		task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) |
3421 		    (((uint32_t)q[1]) << 16) |
3422 		    (((uint32_t)q[2]) << 8) | q[3];
3423 	}
3424 	fct_post_rcvd_cmd(cmd, 0);
3425 }
3426 
3427 static void
3428 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp)
3429 {
3430 	uint16_t status;
3431 	uint32_t portid;
3432 	uint32_t subcode1, subcode2;
3433 
3434 	status = QMEM_RD16(qlt, rsp+8);
3435 	portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff;
3436 	subcode1 = QMEM_RD32(qlt, rsp+0x14);
3437 	subcode2 = QMEM_RD32(qlt, rsp+0x18);
3438 
3439 	mutex_enter(&qlt->preq_lock);
3440 	if (portid != qlt->rp_id_in_dereg) {
3441 		int instance = ddi_get_instance(qlt->dip);
3442 		cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x"
3443 		    " received when driver wasn't waiting for it",
3444 		    instance, portid);
3445 		mutex_exit(&qlt->preq_lock);
3446 		return;
3447 	}
3448 
3449 	if (status != 0) {
3450 		QLT_LOG(qlt->qlt_port_alias, "implicit logout completed "
3451 		    "for 0x%x with status %x, subcode1 %x subcode2 %x",
3452 		    portid, status, subcode1, subcode2);
3453 		if (status == 0x31 && subcode1 == 0x0a)
3454 			qlt->rp_dereg_status = FCT_SUCCESS;
3455 		else
3456 			qlt->rp_dereg_status =
3457 			    QLT_FIRMWARE_ERROR(status, subcode1, subcode2);
3458 	} else {
3459 		qlt->rp_dereg_status = FCT_SUCCESS;
3460 	}
3461 	cv_signal(&qlt->rp_dereg_cv);
3462 	mutex_exit(&qlt->preq_lock);
3463 }
3464 
3465 /*
3466  * Note that when an ELS is aborted, the regular or aborted completion
3467  * (if any) gets posted before the abort IOCB comes back on response queue.
3468  */
3469 static void
3470 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
3471 {
3472 	char		info[160];
3473 	fct_cmd_t	*cmd;
3474 	qlt_cmd_t	*qcmd;
3475 	uint32_t	hndl;
3476 	uint32_t	subcode1, subcode2;
3477 	uint16_t	status;
3478 
3479 	hndl = QMEM_RD32(qlt, rsp+4);
3480 	status = QMEM_RD16(qlt, rsp+8);
3481 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
3482 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
3483 
3484 	if (!CMD_HANDLE_VALID(hndl)) {
3485 		/*
3486 		 * This cannot happen for unsol els completion. This can
3487 		 * only happen when abort for an unsol els completes.
3488 		 * This condition indicates a firmware bug.
3489 		 */
3490 		(void) snprintf(info, 160, "qlt_handle_unsol_els_completion: "
3491 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
3492 		    hndl, status, subcode1, subcode2, (void *)rsp);
3493 		info[159] = 0;
3494 		(void) fct_port_shutdown(qlt->qlt_port,
3495 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3496 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3497 		return;
3498 	}
3499 
3500 	if (status == 5) {
3501 		/*
3502 		 * When an unsolicited els is aborted, the abort is done
3503 		 * by a ELSPT iocb with abort control. This is the aborted IOCB
3504 		 * and not the abortee. We will do the cleanup when the
3505 		 * IOCB which caused the abort, returns.
3506 		 */
3507 		stmf_trace(0, "--UNSOL ELS returned with status 5 --");
3508 		return;
3509 	}
3510 
3511 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3512 	if (cmd == NULL) {
3513 		/*
3514 		 * Now why would this happen ???
3515 		 */
3516 		(void) snprintf(info, 160,
3517 		    "qlt_handle_unsol_els_completion: can not "
3518 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3519 		    (void *)rsp);
3520 		info[159] = 0;
3521 		(void) fct_port_shutdown(qlt->qlt_port,
3522 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3523 
3524 		return;
3525 	}
3526 
3527 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
3528 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3529 	if (qcmd->flags & QLT_CMD_ABORTING) {
3530 		/*
3531 		 * This is the same case as "if (status == 5)" above. The
3532 		 * only difference is that in this case the firmware actually
3533 		 * finished sending the response. So the abort attempt will
3534 		 * come back with status ?. We will handle it there.
3535 		 */
3536 		stmf_trace(0, "--UNSOL ELS finished while we are trying to "
3537 		    "abort it");
3538 		return;
3539 	}
3540 
3541 	if (qcmd->dbuf != NULL) {
3542 		qlt_dmem_free(NULL, qcmd->dbuf);
3543 		qcmd->dbuf = NULL;
3544 	}
3545 
3546 	if (status == 0) {
3547 		fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
3548 	} else {
3549 		fct_send_response_done(cmd,
3550 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
3551 	}
3552 }
3553 
3554 static void
3555 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
3556 {
3557 	char		info[160];
3558 	fct_cmd_t	*cmd;
3559 	qlt_cmd_t	*qcmd;
3560 	uint32_t	hndl;
3561 	uint32_t	subcode1, subcode2;
3562 	uint16_t	status;
3563 
3564 	hndl = QMEM_RD32(qlt, rsp+4);
3565 	status = QMEM_RD16(qlt, rsp+8);
3566 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
3567 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
3568 
3569 	if (!CMD_HANDLE_VALID(hndl)) {
3570 		ASSERT(hndl == 0);
3571 		/*
3572 		 * Someone has requested to abort it, but no one is waiting for
3573 		 * this completion.
3574 		 */
3575 		if ((status != 0) && (status != 8)) {
3576 			/*
3577 			 * There could be exchange resource leakage, so
3578 			 * throw HBA fatal error event now
3579 			 */
3580 			(void) snprintf(info, 160,
3581 			    "qlt_handle_unsol_els_abort_completion: "
3582 			    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
3583 			    hndl, status, subcode1, subcode2, (void *)rsp);
3584 			info[159] = 0;
3585 			(void) fct_port_shutdown(qlt->qlt_port,
3586 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3587 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3588 			return;
3589 		}
3590 
3591 		return;
3592 	}
3593 
3594 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3595 	if (cmd == NULL) {
3596 		/*
3597 		 * Why would this happen ??
3598 		 */
3599 		(void) snprintf(info, 160,
3600 		    "qlt_handle_unsol_els_abort_completion: can not get "
3601 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3602 		    (void *)rsp);
3603 		info[159] = 0;
3604 		(void) fct_port_shutdown(qlt->qlt_port,
3605 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3606 
3607 		return;
3608 	}
3609 
3610 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
3611 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3612 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
3613 
3614 	if (qcmd->dbuf != NULL) {
3615 		qlt_dmem_free(NULL, qcmd->dbuf);
3616 		qcmd->dbuf = NULL;
3617 	}
3618 
3619 	if (status == 0) {
3620 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
3621 	} else if (status == 8) {
3622 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
3623 	} else {
3624 		fct_cmd_fca_aborted(cmd,
3625 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
3626 	}
3627 }
3628 
3629 static void
3630 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
3631 {
3632 	char		info[160];
3633 	fct_cmd_t	*cmd;
3634 	fct_els_t	*els;
3635 	qlt_cmd_t	*qcmd;
3636 	uint32_t	hndl;
3637 	uint32_t	subcode1, subcode2;
3638 	uint16_t	status;
3639 
3640 	hndl = QMEM_RD32(qlt, rsp+4);
3641 	status = QMEM_RD16(qlt, rsp+8);
3642 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
3643 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
3644 
3645 	if (!CMD_HANDLE_VALID(hndl)) {
3646 		/*
3647 		 * This cannot happen for sol els completion.
3648 		 */
3649 		(void) snprintf(info, 160, "qlt_handle_sol_els_completion: "
3650 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
3651 		    hndl, status, subcode1, subcode2, (void *)rsp);
3652 		info[159] = 0;
3653 		(void) fct_port_shutdown(qlt->qlt_port,
3654 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3655 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3656 		return;
3657 	}
3658 
3659 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3660 	if (cmd == NULL) {
3661 		(void) snprintf(info, 160,
3662 		    "qlt_handle_sol_els_completion: can not "
3663 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3664 		    (void *)rsp);
3665 		info[159] = 0;
3666 		(void) fct_port_shutdown(qlt->qlt_port,
3667 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3668 
3669 		return;
3670 	}
3671 
3672 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS);
3673 	els = (fct_els_t *)cmd->cmd_specific;
3674 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3675 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10]));
3676 
3677 	if (qcmd->flags & QLT_CMD_ABORTING) {
3678 		/*
3679 		 * We will handle it when the ABORT IO IOCB returns.
3680 		 */
3681 		return;
3682 	}
3683 
3684 	if (qcmd->dbuf != NULL) {
3685 		if (status == 0) {
3686 			qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
3687 			bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
3688 			    qcmd->param.resp_offset,
3689 			    els->els_resp_payload, els->els_resp_size);
3690 		}
3691 		qlt_dmem_free(NULL, qcmd->dbuf);
3692 		qcmd->dbuf = NULL;
3693 	}
3694 
3695 	if (status == 0) {
3696 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
3697 	} else {
3698 		fct_send_cmd_done(cmd,
3699 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
3700 	}
3701 }
3702 
3703 static void
3704 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp)
3705 {
3706 	fct_cmd_t	*cmd;
3707 	fct_sol_ct_t	*ct;
3708 	qlt_cmd_t	*qcmd;
3709 	uint32_t	 hndl;
3710 	uint16_t	 status;
3711 	char		 info[160];
3712 
3713 	hndl = QMEM_RD32(qlt, rsp+4);
3714 	status = QMEM_RD16(qlt, rsp+8);
3715 
3716 	if (!CMD_HANDLE_VALID(hndl)) {
3717 		/*
3718 		 * Solicited commands will always have a valid handle.
3719 		 */
3720 		(void) snprintf(info, 160, "qlt_handle_ct_completion: hndl-"
3721 		    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
3722 		info[159] = 0;
3723 		(void) fct_port_shutdown(qlt->qlt_port,
3724 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3725 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3726 		return;
3727 	}
3728 
3729 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3730 	if (cmd == NULL) {
3731 		(void) snprintf(info, 160,
3732 		    "qlt_handle_ct_completion: cannot find "
3733 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3734 		    (void *)rsp);
3735 		info[159] = 0;
3736 		(void) fct_port_shutdown(qlt->qlt_port,
3737 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3738 
3739 		return;
3740 	}
3741 
3742 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
3743 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3744 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT);
3745 
3746 	if (qcmd->flags & QLT_CMD_ABORTING) {
3747 		/*
3748 		 * We will handle it when ABORT IO IOCB returns;
3749 		 */
3750 		return;
3751 	}
3752 
3753 	ASSERT(qcmd->dbuf);
3754 	if (status == 0) {
3755 		qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
3756 		bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
3757 		    qcmd->param.resp_offset,
3758 		    ct->ct_resp_payload, ct->ct_resp_size);
3759 	}
3760 	qlt_dmem_free(NULL, qcmd->dbuf);
3761 	qcmd->dbuf = NULL;
3762 
3763 	if (status == 0) {
3764 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
3765 	} else {
3766 		fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
3767 	}
3768 }
3769 
3770 static void
3771 qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp)
3772 {
3773 	fct_cmd_t	*cmd;
3774 	scsi_task_t	*task;
3775 	qlt_cmd_t	*qcmd;
3776 	stmf_data_buf_t	*dbuf;
3777 	fct_status_t	fc_st;
3778 	uint32_t	iof = 0;
3779 	uint32_t	hndl;
3780 	uint16_t	status;
3781 	uint16_t	flags;
3782 	uint8_t		abort_req;
3783 	uint8_t		n;
3784 	char		info[160];
3785 
3786 	/* XXX: Check validity of the IOCB by checking 4th byte. */
3787 	hndl = QMEM_RD32(qlt, rsp+4);
3788 	status = QMEM_RD16(qlt, rsp+8);
3789 	flags = QMEM_RD16(qlt, rsp+0x1a);
3790 	n = rsp[2];
3791 
3792 	if (!CMD_HANDLE_VALID(hndl)) {
3793 		ASSERT(hndl == 0);
3794 		/*
3795 		 * Someone has requested to abort it, but no one is waiting for
3796 		 * this completion.
3797 		 */
3798 		QLT_LOG(qlt->qlt_port_alias, "qlt_handle_ctio_completion: "
3799 		    "hndl-%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
3800 		if ((status != 1) && (status != 2)) {
3801 			/*
3802 			 * There could be exchange resource leakage, so
3803 			 * throw HBA fatal error event now
3804 			 */
3805 			(void) snprintf(info, 160,
3806 			    "qlt_handle_ctio_completion: hndl-"
3807 			    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
3808 			info[159] = 0;
3809 			(void) fct_port_shutdown(qlt->qlt_port,
3810 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3811 
3812 		}
3813 
3814 		return;
3815 	}
3816 
3817 	if (flags & BIT_14) {
3818 		abort_req = 1;
3819 		QLT_EXT_LOG(qlt->qlt_port_alias, "qlt_handle_ctio_completion: "
3820 		    "abort: hndl-%x, status-%x, rsp-%p", hndl, status,
3821 		    (void *)rsp);
3822 	} else {
3823 		abort_req = 0;
3824 	}
3825 
3826 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3827 	if (cmd == NULL) {
3828 		(void) snprintf(info, 160,
3829 		    "qlt_handle_ctio_completion: cannot find "
3830 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3831 		    (void *)rsp);
3832 		info[159] = 0;
3833 		(void) fct_port_shutdown(qlt->qlt_port,
3834 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3835 
3836 		return;
3837 	}
3838 
3839 	task = (scsi_task_t *)cmd->cmd_specific;
3840 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3841 	if (qcmd->dbuf_rsp_iu) {
3842 		ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7);
3843 		qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu);
3844 		qcmd->dbuf_rsp_iu = NULL;
3845 	}
3846 
3847 	if ((status == 1) || (status == 2)) {
3848 		if (abort_req) {
3849 			fc_st = FCT_ABORT_SUCCESS;
3850 			iof = FCT_IOF_FCA_DONE;
3851 		} else {
3852 			fc_st = FCT_SUCCESS;
3853 			if (flags & BIT_15) {
3854 				iof = FCT_IOF_FCA_DONE;
3855 			}
3856 		}
3857 	} else {
3858 		if ((status == 8) && abort_req) {
3859 			fc_st = FCT_NOT_FOUND;
3860 			iof = FCT_IOF_FCA_DONE;
3861 		} else {
3862 			fc_st = QLT_FIRMWARE_ERROR(status, 0, 0);
3863 		}
3864 	}
3865 	dbuf = NULL;
3866 	if (((n & BIT_7) == 0) && (!abort_req)) {
3867 		/* A completion of data xfer */
3868 		if (n == 0) {
3869 			dbuf = qcmd->dbuf;
3870 		} else {
3871 			dbuf = stmf_handle_to_buf(task, n);
3872 		}
3873 
3874 		ASSERT(dbuf != NULL);
3875 		if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT)
3876 			qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU);
3877 		if (flags & BIT_15) {
3878 			dbuf->db_flags |= DB_STATUS_GOOD_SENT;
3879 		}
3880 
3881 		dbuf->db_xfer_status = fc_st;
3882 		fct_scsi_data_xfer_done(cmd, dbuf, iof);
3883 		return;
3884 	}
3885 	if (!abort_req) {
3886 		/*
3887 		 * This was just a pure status xfer.
3888 		 */
3889 		fct_send_response_done(cmd, fc_st, iof);
3890 		return;
3891 	}
3892 
3893 	fct_cmd_fca_aborted(cmd, fc_st, iof);
3894 }
3895 
3896 static void
3897 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
3898 {
3899 	char		info[80];
3900 	fct_cmd_t	*cmd;
3901 	qlt_cmd_t	*qcmd;
3902 	uint32_t	h;
3903 	uint16_t	status;
3904 
3905 	h = QMEM_RD32(qlt, rsp+4);
3906 	status = QMEM_RD16(qlt, rsp+8);
3907 
3908 	if (!CMD_HANDLE_VALID(h)) {
3909 		/*
3910 		 * Solicited commands always have a valid handle.
3911 		 */
3912 		(void) snprintf(info, 80,
3913 		    "qlt_handle_sol_abort_completion: hndl-"
3914 		    "%x, status-%x, rsp-%p", h, status, (void *)rsp);
3915 		info[79] = 0;
3916 		(void) fct_port_shutdown(qlt->qlt_port,
3917 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3918 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3919 		return;
3920 	}
3921 	cmd = fct_handle_to_cmd(qlt->qlt_port, h);
3922 	if (cmd == NULL) {
3923 		/*
3924 		 * What happened to the cmd ??
3925 		 */
3926 		(void) snprintf(info, 80,
3927 		    "qlt_handle_sol_abort_completion: cannot "
3928 		    "find cmd, hndl-%x, status-%x, rsp-%p", h, status,
3929 		    (void *)rsp);
3930 		info[79] = 0;
3931 		(void) fct_port_shutdown(qlt->qlt_port,
3932 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3933 
3934 		return;
3935 	}
3936 
3937 	ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
3938 	    (cmd->cmd_type == FCT_CMD_SOL_CT));
3939 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3940 	if (qcmd->dbuf != NULL) {
3941 		qlt_dmem_free(NULL, qcmd->dbuf);
3942 		qcmd->dbuf = NULL;
3943 	}
3944 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
3945 	if (status == 0) {
3946 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
3947 	} else if (status == 0x31) {
3948 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
3949 	} else {
3950 		fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
3951 	}
3952 }
3953 
3954 static void
3955 qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp)
3956 {
3957 	qlt_abts_cmd_t	*qcmd;
3958 	fct_cmd_t	*cmd;
3959 	uint32_t	remote_portid;
3960 	char		info[160];
3961 
3962 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
3963 	    ((uint32_t)(resp[0x1A])) << 16;
3964 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS,
3965 	    sizeof (qlt_abts_cmd_t), 0);
3966 	if (cmd == NULL) {
3967 		(void) snprintf(info, 160,
3968 		    "qlt_handle_rcvd_abts: qlt-%p, can't "
3969 		    "allocate space for fct_cmd", (void *)qlt);
3970 		info[159] = 0;
3971 		(void) fct_port_shutdown(qlt->qlt_port,
3972 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3973 		return;
3974 	}
3975 
3976 	resp[0xC] = resp[0xD] = resp[0xE] = 0;
3977 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
3978 	bcopy(resp, qcmd->buf, IOCB_SIZE);
3979 	cmd->cmd_port = qlt->qlt_port;
3980 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA);
3981 	if (cmd->cmd_rp_handle == 0xFFFF)
3982 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3983 
3984 	cmd->cmd_rportid = remote_portid;
3985 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
3986 	    ((uint32_t)(resp[0x16])) << 16;
3987 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
3988 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
3989 	fct_post_rcvd_cmd(cmd, 0);
3990 }
3991 
3992 static void
3993 qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp)
3994 {
3995 	uint16_t status;
3996 	char	info[80];
3997 
3998 	status = QMEM_RD16(qlt, resp+8);
3999 
4000 	if ((status == 0) || (status == 5)) {
4001 		return;
4002 	}
4003 	(void) snprintf(info, 80, "ABTS completion failed %x/%x/%x resp_off %x",
4004 	    status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38),
4005 	    ((uint32_t)(qlt->resp_ndx_to_fw)) << 6);
4006 	info[79] = 0;
4007 	(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
4008 	    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4009 }
4010 
4011 #ifdef	DEBUG
4012 uint32_t qlt_drop_abort_counter = 0;
4013 #endif
4014 
4015 fct_status_t
4016 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
4017 {
4018 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4019 
4020 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
4021 	    (qlt->qlt_state == FCT_STATE_OFFLINING)) {
4022 		return (FCT_NOT_FOUND);
4023 	}
4024 
4025 #ifdef DEBUG
4026 	if (qlt_drop_abort_counter > 0) {
4027 		if (atomic_add_32_nv(&qlt_drop_abort_counter, -1) == 1)
4028 			return (FCT_SUCCESS);
4029 	}
4030 #endif
4031 
4032 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
4033 		return (qlt_abort_unsol_scsi_cmd(qlt, cmd));
4034 	}
4035 
4036 	if (flags & FCT_IOF_FORCE_FCA_DONE) {
4037 		cmd->cmd_handle = 0;
4038 	}
4039 
4040 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
4041 		return (qlt_send_abts_response(qlt, cmd, 1));
4042 	}
4043 
4044 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
4045 		return (qlt_abort_purex(qlt, cmd));
4046 	}
4047 
4048 	if ((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4049 	    (cmd->cmd_type == FCT_CMD_SOL_CT)) {
4050 		return (qlt_abort_sol_cmd(qlt, cmd));
4051 	}
4052 
4053 	ASSERT(0);
4054 	return (FCT_FAILURE);
4055 }
4056 
4057 fct_status_t
4058 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4059 {
4060 	uint8_t *req;
4061 	qlt_cmd_t *qcmd;
4062 
4063 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4064 	qcmd->flags |= QLT_CMD_ABORTING;
4065 	QLT_LOG(qlt->qlt_port_alias, "qlt_abort_sol_cmd: fctcmd-%p, "
4066 	    "cmd_handle-%x", cmd, cmd->cmd_handle);
4067 
4068 	mutex_enter(&qlt->req_lock);
4069 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4070 	if (req == NULL) {
4071 		mutex_exit(&qlt->req_lock);
4072 
4073 		return (FCT_BUSY);
4074 	}
4075 	bzero(req, IOCB_SIZE);
4076 	req[0] = 0x33; req[1] = 1;
4077 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4078 	if (cmd->cmd_rp) {
4079 		QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4080 	} else {
4081 		QMEM_WR16(qlt, req+8, 0xFFFF);
4082 	}
4083 
4084 	QMEM_WR32(qlt, req+0xc, cmd->cmd_handle);
4085 	QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid);
4086 	qlt_submit_req_entries(qlt, 1);
4087 	mutex_exit(&qlt->req_lock);
4088 
4089 	return (FCT_SUCCESS);
4090 }
4091 
4092 fct_status_t
4093 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd)
4094 {
4095 	uint8_t *req;
4096 	qlt_cmd_t *qcmd;
4097 	fct_els_t *els;
4098 	uint8_t elsop, req1f;
4099 
4100 	els = (fct_els_t *)cmd->cmd_specific;
4101 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4102 	elsop = els->els_req_payload[0];
4103 	QLT_LOG(qlt->qlt_port_alias,
4104 	    "qlt_abort_purex: fctcmd-%p, cmd_handle-%x, "
4105 	    "elsop-%x", cmd, cmd->cmd_handle, elsop);
4106 	req1f = 0x60;	/* Terminate xchg */
4107 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
4108 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
4109 		req1f |= BIT_4;
4110 	}
4111 
4112 	mutex_enter(&qlt->req_lock);
4113 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4114 	if (req == NULL) {
4115 		mutex_exit(&qlt->req_lock);
4116 
4117 		return (FCT_BUSY);
4118 	}
4119 
4120 	qcmd->flags |= QLT_CMD_ABORTING;
4121 	bzero(req, IOCB_SIZE);
4122 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
4123 	req[0x16] = elsop; req[0x1f] = req1f;
4124 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4125 	if (cmd->cmd_rp) {
4126 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4127 	} else {
4128 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
4129 	}
4130 
4131 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
4132 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
4133 	qlt_submit_req_entries(qlt, 1);
4134 	mutex_exit(&qlt->req_lock);
4135 
4136 	return (FCT_SUCCESS);
4137 }
4138 
4139 fct_status_t
4140 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4141 {
4142 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4143 	uint8_t *req;
4144 	uint16_t flags;
4145 
4146 	flags = BIT_14 | (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
4147 	QLT_EXT_LOG(qlt->qlt_port_alias, "qlt_abort_unsol_scsi_cmd: fctcmd-%p, "
4148 	    "cmd_handle-%x", cmd, cmd->cmd_handle);
4149 
4150 	mutex_enter(&qlt->req_lock);
4151 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4152 	if (req == NULL) {
4153 		mutex_exit(&qlt->req_lock);
4154 
4155 		return (FCT_BUSY);
4156 	}
4157 
4158 	qcmd->flags |= QLT_CMD_ABORTING;
4159 	bzero(req, IOCB_SIZE);
4160 	req[0] = 0x12; req[1] = 0x1;
4161 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4162 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4163 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
4164 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
4165 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
4166 	QMEM_WR16(qlt, req+0x1A, flags);
4167 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
4168 	qlt_submit_req_entries(qlt, 1);
4169 	mutex_exit(&qlt->req_lock);
4170 
4171 	return (FCT_SUCCESS);
4172 }
4173 
4174 fct_status_t
4175 qlt_send_cmd(fct_cmd_t *cmd)
4176 {
4177 	qlt_state_t *qlt;
4178 
4179 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
4180 	if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
4181 		return (qlt_send_els(qlt, cmd));
4182 	} else if (cmd->cmd_type == FCT_CMD_SOL_CT) {
4183 		return (qlt_send_ct(qlt, cmd));
4184 	}
4185 
4186 	ASSERT(0);
4187 	return (FCT_FAILURE);
4188 }
4189 
4190 fct_status_t
4191 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd)
4192 {
4193 	uint8_t *req;
4194 	fct_els_t *els;
4195 	qlt_cmd_t *qcmd;
4196 	stmf_data_buf_t *buf;
4197 	qlt_dmem_bctl_t *bctl;
4198 	uint32_t sz, minsz;
4199 
4200 	els = (fct_els_t *)cmd->cmd_specific;
4201 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4202 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4203 	qcmd->param.resp_offset = (els->els_req_size + 7) & ~7;
4204 	sz = minsz = qcmd->param.resp_offset + els->els_resp_size;
4205 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4206 	if (buf == NULL) {
4207 		return (FCT_BUSY);
4208 	}
4209 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4210 
4211 	qcmd->dbuf = buf;
4212 	bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr,
4213 	    els->els_req_size);
4214 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4215 
4216 	mutex_enter(&qlt->req_lock);
4217 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4218 	if (req == NULL) {
4219 		qlt_dmem_free(NULL, buf);
4220 		mutex_exit(&qlt->req_lock);
4221 		return (FCT_BUSY);
4222 	}
4223 	bzero(req, IOCB_SIZE);
4224 	req[0] = 0x53; req[1] = 1;
4225 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4226 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4227 	QMEM_WR16(qlt, (&req[0xC]), 1);
4228 	QMEM_WR16(qlt, (&req[0xE]), 0x1000);
4229 	QMEM_WR16(qlt, (&req[0x14]), 1);
4230 	req[0x16] = els->els_req_payload[0];
4231 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
4232 		req[0x1b] = (cmd->cmd_lportid >> 16) & 0xff;
4233 		req[0x1c] = cmd->cmd_lportid & 0xff;
4234 		req[0x1d] = (cmd->cmd_lportid >> 8) & 0xff;
4235 	}
4236 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id);
4237 	QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size);
4238 	QMEM_WR32(qlt, (&req[0x24]), els->els_req_size);
4239 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
4240 	QMEM_WR32(qlt, (&req[0x30]), els->els_req_size);
4241 	QMEM_WR64(qlt, (&req[0x34]), bctl->bctl_dev_addr +
4242 	    qcmd->param.resp_offset);
4243 	QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size);
4244 	qlt_submit_req_entries(qlt, 1);
4245 	mutex_exit(&qlt->req_lock);
4246 
4247 	return (FCT_SUCCESS);
4248 }
4249 
4250 fct_status_t
4251 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd)
4252 {
4253 	uint8_t *req;
4254 	fct_sol_ct_t *ct;
4255 	qlt_cmd_t *qcmd;
4256 	stmf_data_buf_t *buf;
4257 	qlt_dmem_bctl_t *bctl;
4258 	uint32_t sz, minsz;
4259 
4260 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4261 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4262 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4263 	qcmd->param.resp_offset = (ct->ct_req_size + 7) & ~7;
4264 	sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size;
4265 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4266 	if (buf == NULL) {
4267 		return (FCT_BUSY);
4268 	}
4269 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4270 
4271 	qcmd->dbuf = buf;
4272 	bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr,
4273 	    ct->ct_req_size);
4274 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4275 
4276 	mutex_enter(&qlt->req_lock);
4277 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4278 	if (req == NULL) {
4279 		qlt_dmem_free(NULL, buf);
4280 		mutex_exit(&qlt->req_lock);
4281 		return (FCT_BUSY);
4282 	}
4283 	bzero(req, IOCB_SIZE);
4284 	req[0] = 0x29; req[1] = 1;
4285 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4286 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4287 	QMEM_WR16(qlt, (&req[0xC]), 1);
4288 	QMEM_WR16(qlt, (&req[0x10]), 0x20);	/* > (2 * RA_TOV) */
4289 	QMEM_WR16(qlt, (&req[0x14]), 1);
4290 
4291 	QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size);
4292 	QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size);
4293 
4294 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */
4295 	QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size);
4296 	QMEM_WR64(qlt, (&req[0x34]), bctl->bctl_dev_addr +
4297 	    qcmd->param.resp_offset);		/* RESPONSE DSD */
4298 	QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size);
4299 
4300 	qlt_submit_req_entries(qlt, 1);
4301 	mutex_exit(&qlt->req_lock);
4302 
4303 	return (FCT_SUCCESS);
4304 }
4305 
4306 
4307 /*
4308  * All QLT_FIRMWARE_* will mainly be handled in this function
4309  * It can not be called in interrupt context
4310  *
4311  * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags
4312  * and qlt_ioctl_lock
4313  */
4314 static fct_status_t
4315 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
4316 {
4317 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
4318 	int		i;
4319 	int		retries;
4320 	int		n, size_left;
4321 	char		c = ' ';
4322 	uint32_t	addr, endaddr, words_to_read;
4323 	caddr_t		buf;
4324 
4325 	mutex_enter(&qlt->qlt_ioctl_lock);
4326 	/*
4327 	 * To make sure that there's no outstanding dumping task
4328 	 */
4329 	if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
4330 		mutex_exit(&qlt->qlt_ioctl_lock);
4331 		QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: outstanding");
4332 		return (FCT_FAILURE);
4333 	}
4334 
4335 	/*
4336 	 * To make sure not to overwrite existing dump
4337 	 */
4338 	if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
4339 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
4340 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
4341 		/*
4342 		 * If we have alreay one dump, but it's not triggered by user
4343 		 * and the user hasn't fetched it, we shouldn't dump again.
4344 		 */
4345 		mutex_exit(&qlt->qlt_ioctl_lock);
4346 		QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: There's one "
4347 		    "dump, please fetech it");
4348 		cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
4349 		    "is one already outstanding.", qlt->instance);
4350 		return (FCT_FAILURE);
4351 	}
4352 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
4353 	if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
4354 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
4355 	} else {
4356 		qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
4357 	}
4358 	mutex_exit(&qlt->qlt_ioctl_lock);
4359 
4360 	size_left = QLT_FWDUMP_BUFSIZE;
4361 	if (!qlt->qlt_fwdump_buf) {
4362 		ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID));
4363 		/*
4364 		 * It's the only place that we allocate buf for dumping. After
4365 		 * it's allocated, we will use it until the port is detached.
4366 		 */
4367 		qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_SLEEP);
4368 	}
4369 
4370 	/*
4371 	 * Start to dump firmware
4372 	 */
4373 	buf = (caddr_t)qlt->qlt_fwdump_buf;
4374 
4375 	/*
4376 	 * Print the ISP firmware revision number and attributes information
4377 	 * Read the RISC to Host Status register
4378 	 */
4379 	n = snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d "
4380 	    "Attributes %04x\n\nR2H Status Register\n%08x",
4381 	    qlt->fw_major, qlt->fw_minor,
4382 	    qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, 0x44));
4383 	buf += n; size_left -= n;
4384 
4385 	/*
4386 	 * Before pausing the RISC, make sure no mailbox can execute
4387 	 */
4388 	mutex_enter(&qlt->mbox_lock);
4389 	if (qlt->mbox_io_state != MBOX_STATE_UNKNOWN) {
4390 		/*
4391 		 * Wait to grab the mailboxes
4392 		 */
4393 		for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
4394 		    (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
4395 			(void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock,
4396 			    ddi_get_lbolt() + drv_usectohz(1000000));
4397 			if (retries > 5) {
4398 				mutex_exit(&qlt->mbox_lock);
4399 				QLT_LOG(qlt->qlt_port_alias,
4400 				    "qlt_firmware_dump: "
4401 				    "can't drain out mailbox commands");
4402 				goto dump_fail;
4403 			}
4404 		}
4405 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
4406 		cv_broadcast(&qlt->mbox_cv);
4407 	}
4408 	mutex_exit(&qlt->mbox_lock);
4409 
4410 	/*
4411 	 * Pause the RISC processor
4412 	 */
4413 	REG_WR32(qlt, REG_HCCR, 0x30000000);
4414 
4415 	/*
4416 	 * Wait for the RISC processor to pause
4417 	 */
4418 	for (i = 0; i < 200; i++) {
4419 		if (REG_RD32(qlt, 0x44) & 0x100) {
4420 			break;
4421 		}
4422 		drv_usecwait(1000);
4423 	}
4424 	if (i == 200) {
4425 		QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: can't pause");
4426 		return (FCT_FAILURE);
4427 	}
4428 
4429 	if (!qlt->qlt_25xx_chip) {
4430 		goto over_25xx_specific_dump;
4431 	}
4432 	n = snprintf(buf, size_left, "\n\nHostRisc registers\n");
4433 	buf += n; size_left -= n;
4434 	REG_WR32(qlt, 0x54, 0x7000);
4435 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4436 	buf += n; size_left -= n;
4437 	REG_WR32(qlt, 0x54, 0x7010);
4438 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4439 	buf += n; size_left -= n;
4440 	REG_WR32(qlt, 0x54, 0x7C00);
4441 
4442 	n = snprintf(buf, size_left, "\nPCIe registers\n");
4443 	buf += n; size_left -= n;
4444 	REG_WR32(qlt, 0xC0, 0x1);
4445 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left);
4446 	buf += n; size_left -= n;
4447 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left);
4448 	buf += n; size_left -= n;
4449 	REG_WR32(qlt, 0xC0, 0x0);
4450 
4451 over_25xx_specific_dump:;
4452 	n = snprintf(buf, size_left, "\n\nHost Interface Registers\n");
4453 	buf += n; size_left -= n;
4454 	/*
4455 	 * Capture data from 32 regsiters
4456 	 */
4457 	n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left);
4458 	buf += n; size_left -= n;
4459 
4460 	/*
4461 	 * Disable interrupts
4462 	 */
4463 	REG_WR32(qlt, 0xc, 0);
4464 
4465 	/*
4466 	 * Shadow registers
4467 	 */
4468 	n = snprintf(buf, size_left, "\nShadow Registers\n");
4469 	buf += n; size_left -= n;
4470 
4471 	REG_WR32(qlt, 0x54, 0xF70);
4472 	addr = 0xb0000000;
4473 	for (i = 0; i < 0xb; i++) {
4474 		if ((!qlt->qlt_25xx_chip) && (i >= 7)) {
4475 			break;
4476 		}
4477 		if (i && ((i & 7) == 0)) {
4478 			n = snprintf(buf, size_left, "\n");
4479 			buf += n; size_left -= n;
4480 		}
4481 		REG_WR32(qlt, 0xF0, addr);
4482 		n = snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC));
4483 		buf += n; size_left -= n;
4484 		addr += 0x100000;
4485 	}
4486 
4487 	if (qlt->qlt_25xx_chip) {
4488 		REG_WR32(qlt, 0x54, 0x10);
4489 		n = snprintf(buf, size_left, "\n\nRISC IO Register\n%08x",
4490 		    REG_RD32(qlt, 0xC0));
4491 		buf += n; size_left -= n;
4492 	}
4493 
4494 	/*
4495 	 * Mailbox registers
4496 	 */
4497 	n = snprintf(buf, size_left, "\n\nMailbox Registers\n");
4498 	buf += n; size_left -= n;
4499 	for (i = 0; i < 32; i += 2) {
4500 		if ((i + 2) & 15) {
4501 			c = ' ';
4502 		} else {
4503 			c = '\n';
4504 		}
4505 		n = snprintf(buf, size_left, "%04x %04x%c",
4506 		    REG_RD16(qlt, 0x80 + (i << 1)),
4507 		    REG_RD16(qlt, 0x80 + ((i+1) << 1)), c);
4508 		buf += n; size_left -= n;
4509 	}
4510 
4511 	/*
4512 	 * Transfer sequence registers
4513 	 */
4514 	n = snprintf(buf, size_left, "\nXSEQ GP Registers\n");
4515 	buf += n; size_left -= n;
4516 
4517 	REG_WR32(qlt, 0x54, 0xBF00);
4518 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4519 	buf += n; size_left -= n;
4520 	REG_WR32(qlt, 0x54, 0xBF10);
4521 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4522 	buf += n; size_left -= n;
4523 	REG_WR32(qlt, 0x54, 0xBF20);
4524 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4525 	buf += n; size_left -= n;
4526 	REG_WR32(qlt, 0x54, 0xBF30);
4527 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4528 	buf += n; size_left -= n;
4529 	REG_WR32(qlt, 0x54, 0xBF40);
4530 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4531 	buf += n; size_left -= n;
4532 	REG_WR32(qlt, 0x54, 0xBF50);
4533 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4534 	buf += n; size_left -= n;
4535 	REG_WR32(qlt, 0x54, 0xBF60);
4536 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4537 	buf += n; size_left -= n;
4538 	REG_WR32(qlt, 0x54, 0xBF70);
4539 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4540 	buf += n; size_left -= n;
4541 	n = snprintf(buf, size_left, "\nXSEQ-0 registers\n");
4542 	buf += n; size_left -= n;
4543 	REG_WR32(qlt, 0x54, 0xBFE0);
4544 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4545 	buf += n; size_left -= n;
4546 	n = snprintf(buf, size_left, "\nXSEQ-1 registers\n");
4547 	buf += n; size_left -= n;
4548 	REG_WR32(qlt, 0x54, 0xBFF0);
4549 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4550 	buf += n; size_left -= n;
4551 
4552 	/*
4553 	 * Receive sequence registers
4554 	 */
4555 	n = snprintf(buf, size_left, "\nRSEQ GP Registers\n");
4556 	buf += n; size_left -= n;
4557 	REG_WR32(qlt, 0x54, 0xFF00);
4558 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4559 	buf += n; size_left -= n;
4560 	REG_WR32(qlt, 0x54, 0xFF10);
4561 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4562 	buf += n; size_left -= n;
4563 	REG_WR32(qlt, 0x54, 0xFF20);
4564 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4565 	buf += n; size_left -= n;
4566 	REG_WR32(qlt, 0x54, 0xFF30);
4567 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4568 	buf += n; size_left -= n;
4569 	REG_WR32(qlt, 0x54, 0xFF40);
4570 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4571 	buf += n; size_left -= n;
4572 	REG_WR32(qlt, 0x54, 0xFF50);
4573 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4574 	buf += n; size_left -= n;
4575 	REG_WR32(qlt, 0x54, 0xFF60);
4576 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4577 	buf += n; size_left -= n;
4578 	REG_WR32(qlt, 0x54, 0xFF70);
4579 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4580 	buf += n; size_left -= n;
4581 	n = snprintf(buf, size_left, "\nRSEQ-0 registers\n");
4582 	buf += n; size_left -= n;
4583 	REG_WR32(qlt, 0x54, 0xFFD0);
4584 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4585 	buf += n; size_left -= n;
4586 	n = snprintf(buf, size_left, "\nRSEQ-1 registers\n");
4587 	buf += n; size_left -= n;
4588 	REG_WR32(qlt, 0x54, 0xFFE0);
4589 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4590 	buf += n; size_left -= n;
4591 	n = snprintf(buf, size_left, "\nRSEQ-2 registers\n");
4592 	buf += n; size_left -= n;
4593 	REG_WR32(qlt, 0x54, 0xFFF0);
4594 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4595 	buf += n; size_left -= n;
4596 
4597 	if (!qlt->qlt_25xx_chip)
4598 		goto over_aseq_regs;
4599 
4600 	/*
4601 	 * Auxiliary sequencer registers
4602 	 */
4603 	n = snprintf(buf, size_left, "\nASEQ GP Registers\n");
4604 	buf += n; size_left -= n;
4605 	REG_WR32(qlt, 0x54, 0xB000);
4606 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4607 	buf += n; size_left -= n;
4608 	REG_WR32(qlt, 0x54, 0xB010);
4609 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4610 	buf += n; size_left -= n;
4611 	REG_WR32(qlt, 0x54, 0xB020);
4612 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4613 	buf += n; size_left -= n;
4614 	REG_WR32(qlt, 0x54, 0xB030);
4615 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4616 	buf += n; size_left -= n;
4617 	REG_WR32(qlt, 0x54, 0xB040);
4618 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4619 	buf += n; size_left -= n;
4620 	REG_WR32(qlt, 0x54, 0xB050);
4621 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4622 	buf += n; size_left -= n;
4623 	REG_WR32(qlt, 0x54, 0xB060);
4624 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4625 	buf += n; size_left -= n;
4626 	REG_WR32(qlt, 0x54, 0xB070);
4627 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4628 	buf += n; size_left -= n;
4629 	n = snprintf(buf, size_left, "\nASEQ-0 registers\n");
4630 	buf += n; size_left -= n;
4631 	REG_WR32(qlt, 0x54, 0xB0C0);
4632 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4633 	buf += n; size_left -= n;
4634 	REG_WR32(qlt, 0x54, 0xB0D0);
4635 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4636 	buf += n; size_left -= n;
4637 	n = snprintf(buf, size_left, "\nASEQ-1 registers\n");
4638 	buf += n; size_left -= n;
4639 	REG_WR32(qlt, 0x54, 0xB0E0);
4640 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4641 	buf += n; size_left -= n;
4642 	n = snprintf(buf, size_left, "\nASEQ-2 registers\n");
4643 	buf += n; size_left -= n;
4644 	REG_WR32(qlt, 0x54, 0xB0F0);
4645 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4646 	buf += n; size_left -= n;
4647 
4648 over_aseq_regs:;
4649 
4650 	/*
4651 	 * Command DMA registers
4652 	 */
4653 	n = snprintf(buf, size_left, "\nCommand DMA registers\n");
4654 	buf += n; size_left -= n;
4655 	REG_WR32(qlt, 0x54, 0x7100);
4656 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4657 	buf += n; size_left -= n;
4658 
4659 	/*
4660 	 * Queues
4661 	 */
4662 	n = snprintf(buf, size_left,
4663 	    "\nRequest0 Queue DMA Channel registers\n");
4664 	buf += n; size_left -= n;
4665 	REG_WR32(qlt, 0x54, 0x7200);
4666 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
4667 	buf += n; size_left -= n;
4668 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
4669 	buf += n; size_left -= n;
4670 
4671 	n = snprintf(buf, size_left,
4672 	    "\n\nResponse0 Queue DMA Channel registers\n");
4673 	buf += n; size_left -= n;
4674 	REG_WR32(qlt, 0x54, 0x7300);
4675 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
4676 	buf += n; size_left -= n;
4677 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
4678 	buf += n; size_left -= n;
4679 
4680 	n = snprintf(buf, size_left,
4681 	    "\n\nRequest1 Queue DMA Channel registers\n");
4682 	buf += n; size_left -= n;
4683 	REG_WR32(qlt, 0x54, 0x7400);
4684 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
4685 	buf += n; size_left -= n;
4686 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
4687 	buf += n; size_left -= n;
4688 
4689 	/*
4690 	 * Transmit DMA registers
4691 	 */
4692 	n = snprintf(buf, size_left, "\n\nXMT0 Data DMA registers\n");
4693 	buf += n; size_left -= n;
4694 	REG_WR32(qlt, 0x54, 0x7600);
4695 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4696 	buf += n; size_left -= n;
4697 	REG_WR32(qlt, 0x54, 0x7610);
4698 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4699 	buf += n; size_left -= n;
4700 	n = snprintf(buf, size_left, "\nXMT1 Data DMA registers\n");
4701 	buf += n; size_left -= n;
4702 	REG_WR32(qlt, 0x54, 0x7620);
4703 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4704 	buf += n; size_left -= n;
4705 	REG_WR32(qlt, 0x54, 0x7630);
4706 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4707 	buf += n; size_left -= n;
4708 	n = snprintf(buf, size_left, "\nXMT2 Data DMA registers\n");
4709 	buf += n; size_left -= n;
4710 	REG_WR32(qlt, 0x54, 0x7640);
4711 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4712 	buf += n; size_left -= n;
4713 	REG_WR32(qlt, 0x54, 0x7650);
4714 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4715 	buf += n; size_left -= n;
4716 	n = snprintf(buf, size_left, "\nXMT3 Data DMA registers\n");
4717 	buf += n; size_left -= n;
4718 	REG_WR32(qlt, 0x54, 0x7660);
4719 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4720 	buf += n; size_left -= n;
4721 	REG_WR32(qlt, 0x54, 0x7670);
4722 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4723 	buf += n; size_left -= n;
4724 	n = snprintf(buf, size_left, "\nXMT4 Data DMA registers\n");
4725 	buf += n; size_left -= n;
4726 	REG_WR32(qlt, 0x54, 0x7680);
4727 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4728 	buf += n; size_left -= n;
4729 	REG_WR32(qlt, 0x54, 0x7690);
4730 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4731 	buf += n; size_left -= n;
4732 	n = snprintf(buf, size_left, "\nXMT Data DMA Common registers\n");
4733 	buf += n; size_left -= n;
4734 	REG_WR32(qlt, 0x54, 0x76A0);
4735 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4736 	buf += n; size_left -= n;
4737 
4738 	/*
4739 	 * Receive DMA registers
4740 	 */
4741 	n = snprintf(buf, size_left, "\nRCV Thread 0 Data DMA registers\n");
4742 	buf += n; size_left -= n;
4743 	REG_WR32(qlt, 0x54, 0x7700);
4744 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4745 	buf += n; size_left -= n;
4746 	REG_WR32(qlt, 0x54, 0x7710);
4747 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4748 	buf += n; size_left -= n;
4749 	n = snprintf(buf, size_left, "\nRCV Thread 1 Data DMA registers\n");
4750 	buf += n; size_left -= n;
4751 	REG_WR32(qlt, 0x54, 0x7720);
4752 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4753 	buf += n; size_left -= n;
4754 	REG_WR32(qlt, 0x54, 0x7730);
4755 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4756 	buf += n; size_left -= n;
4757 
4758 	/*
4759 	 * RISC registers
4760 	 */
4761 	n = snprintf(buf, size_left, "\nRISC GP registers\n");
4762 	buf += n; size_left -= n;
4763 	REG_WR32(qlt, 0x54, 0x0F00);
4764 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4765 	buf += n; size_left -= n;
4766 	REG_WR32(qlt, 0x54, 0x0F10);
4767 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4768 	buf += n; size_left -= n;
4769 	REG_WR32(qlt, 0x54, 0x0F20);
4770 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4771 	buf += n; size_left -= n;
4772 	REG_WR32(qlt, 0x54, 0x0F30);
4773 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4774 	buf += n; size_left -= n;
4775 	REG_WR32(qlt, 0x54, 0x0F40);
4776 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4777 	buf += n; size_left -= n;
4778 	REG_WR32(qlt, 0x54, 0x0F50);
4779 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4780 	buf += n; size_left -= n;
4781 	REG_WR32(qlt, 0x54, 0x0F60);
4782 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4783 	buf += n; size_left -= n;
4784 	REG_WR32(qlt, 0x54, 0x0F70);
4785 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4786 	buf += n; size_left -= n;
4787 
4788 	/*
4789 	 * Local memory controller registers
4790 	 */
4791 	n = snprintf(buf, size_left, "\nLMC registers\n");
4792 	buf += n; size_left -= n;
4793 	REG_WR32(qlt, 0x54, 0x3000);
4794 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4795 	buf += n; size_left -= n;
4796 	REG_WR32(qlt, 0x54, 0x3010);
4797 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4798 	buf += n; size_left -= n;
4799 	REG_WR32(qlt, 0x54, 0x3020);
4800 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4801 	buf += n; size_left -= n;
4802 	REG_WR32(qlt, 0x54, 0x3030);
4803 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4804 	buf += n; size_left -= n;
4805 	REG_WR32(qlt, 0x54, 0x3040);
4806 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4807 	buf += n; size_left -= n;
4808 	REG_WR32(qlt, 0x54, 0x3050);
4809 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4810 	buf += n; size_left -= n;
4811 	REG_WR32(qlt, 0x54, 0x3060);
4812 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4813 	buf += n; size_left -= n;
4814 
4815 	if (qlt->qlt_25xx_chip) {
4816 		REG_WR32(qlt, 0x54, 0x3070);
4817 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4818 		buf += n; size_left -= n;
4819 	}
4820 
4821 	/*
4822 	 * Fibre protocol module regsiters
4823 	 */
4824 	n = snprintf(buf, size_left, "\nFPM hardware registers\n");
4825 	buf += n; size_left -= n;
4826 	REG_WR32(qlt, 0x54, 0x4000);
4827 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4828 	buf += n; size_left -= n;
4829 	REG_WR32(qlt, 0x54, 0x4010);
4830 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4831 	buf += n; size_left -= n;
4832 	REG_WR32(qlt, 0x54, 0x4020);
4833 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4834 	buf += n; size_left -= n;
4835 	REG_WR32(qlt, 0x54, 0x4030);
4836 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4837 	buf += n; size_left -= n;
4838 	REG_WR32(qlt, 0x54, 0x4040);
4839 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4840 	buf += n; size_left -= n;
4841 	REG_WR32(qlt, 0x54, 0x4050);
4842 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4843 	buf += n; size_left -= n;
4844 	REG_WR32(qlt, 0x54, 0x4060);
4845 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4846 	buf += n; size_left -= n;
4847 	REG_WR32(qlt, 0x54, 0x4070);
4848 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4849 	buf += n; size_left -= n;
4850 	REG_WR32(qlt, 0x54, 0x4080);
4851 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4852 	buf += n; size_left -= n;
4853 	REG_WR32(qlt, 0x54, 0x4090);
4854 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4855 	buf += n; size_left -= n;
4856 	REG_WR32(qlt, 0x54, 0x40A0);
4857 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4858 	buf += n; size_left -= n;
4859 	REG_WR32(qlt, 0x54, 0x40B0);
4860 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4861 	buf += n; size_left -= n;
4862 
4863 	/*
4864 	 * Fibre buffer registers
4865 	 */
4866 	n = snprintf(buf, size_left, "\nFB hardware registers\n");
4867 	buf += n; size_left -= n;
4868 	REG_WR32(qlt, 0x54, 0x6000);
4869 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4870 	buf += n; size_left -= n;
4871 	REG_WR32(qlt, 0x54, 0x6010);
4872 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4873 	buf += n; size_left -= n;
4874 	REG_WR32(qlt, 0x54, 0x6020);
4875 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4876 	buf += n; size_left -= n;
4877 	REG_WR32(qlt, 0x54, 0x6030);
4878 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4879 	buf += n; size_left -= n;
4880 	REG_WR32(qlt, 0x54, 0x6040);
4881 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4882 	buf += n; size_left -= n;
4883 	REG_WR32(qlt, 0x54, 0x6100);
4884 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4885 	buf += n; size_left -= n;
4886 	REG_WR32(qlt, 0x54, 0x6130);
4887 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4888 	buf += n; size_left -= n;
4889 	REG_WR32(qlt, 0x54, 0x6150);
4890 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4891 	buf += n; size_left -= n;
4892 	REG_WR32(qlt, 0x54, 0x6170);
4893 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4894 	buf += n; size_left -= n;
4895 	REG_WR32(qlt, 0x54, 0x6190);
4896 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4897 	buf += n; size_left -= n;
4898 	REG_WR32(qlt, 0x54, 0x61B0);
4899 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4900 	buf += n; size_left -= n;
4901 
4902 	if (qlt->qlt_25xx_chip) {
4903 		REG_WR32(qlt, 0x54, 0x6F00);
4904 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4905 		buf += n; size_left -= n;
4906 	}
4907 
4908 	qlt->intr_sneak_counter = 10;
4909 	qlt_disable_intr(qlt);
4910 	mutex_enter(&qlt->intr_lock);
4911 	qlt->qlt_intr_enabled = 0;
4912 	(void) qlt_reset_chip_and_download_fw(qlt, 1);
4913 	drv_usecwait(20);
4914 	qlt->intr_sneak_counter = 0;
4915 	mutex_exit(&qlt->intr_lock);
4916 
4917 	/*
4918 	 * Memory
4919 	 */
4920 	n = snprintf(buf, size_left, "\nCode RAM\n");
4921 	buf += n; size_left -= n;
4922 
4923 	addr = 0x20000;
4924 	endaddr = 0x22000;
4925 	words_to_read = 0;
4926 	while (addr < endaddr) {
4927 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
4928 		if ((words_to_read + addr) > endaddr) {
4929 			words_to_read = endaddr - addr;
4930 		}
4931 		if (qlt_read_risc_ram(qlt, addr, words_to_read) !=
4932 		    QLT_SUCCESS) {
4933 			QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: Error "
4934 			    "reading risc ram - CODE RAM");
4935 			goto dump_fail;
4936 		}
4937 
4938 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
4939 		buf += n; size_left -= n;
4940 
4941 		if (size_left < 100000) {
4942 			QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: run "
4943 			    "out of space - CODE RAM");
4944 			goto dump_ok;
4945 		}
4946 		addr += words_to_read;
4947 	}
4948 
4949 	n = snprintf(buf, size_left, "\nExternal Memory\n");
4950 	buf += n; size_left -= n;
4951 
4952 	addr = 0x100000;
4953 	endaddr = (((uint32_t)(qlt->fw_endaddrhi)) << 16) | qlt->fw_endaddrlo;
4954 	endaddr++;
4955 	if (endaddr & 7) {
4956 		endaddr = (endaddr + 7) & 0xFFFFFFF8;
4957 	}
4958 
4959 	words_to_read = 0;
4960 	while (addr < endaddr) {
4961 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
4962 		if ((words_to_read + addr) > endaddr) {
4963 			words_to_read = endaddr - addr;
4964 		}
4965 		if (qlt_read_risc_ram(qlt, addr, words_to_read) !=
4966 		    QLT_SUCCESS) {
4967 			QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: Error "
4968 			    "reading risc ram - EXT RAM");
4969 			goto dump_fail;
4970 		}
4971 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
4972 		buf += n; size_left -= n;
4973 		if (size_left < 100000) {
4974 			QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: run "
4975 			    "out of space - EXT RAM");
4976 			goto dump_ok;
4977 		}
4978 		addr += words_to_read;
4979 	}
4980 
4981 	/*
4982 	 * Label the end tag
4983 	 */
4984 	n = snprintf(buf, size_left, "[<==END] ISP Debug Dump\n");
4985 	buf += n; size_left -= n;
4986 
4987 	/*
4988 	 * Queue dumping
4989 	 */
4990 	n = snprintf(buf, size_left, "\nRequest Queue\n");
4991 	buf += n; size_left -= n;
4992 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
4993 	    REQUEST_QUEUE_ENTRIES, buf, size_left);
4994 	buf += n; size_left -= n;
4995 
4996 	n = snprintf(buf, size_left, "\nPriority Queue\n");
4997 	buf += n; size_left -= n;
4998 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
4999 	    PRIORITY_QUEUE_ENTRIES, buf, size_left);
5000 	buf += n; size_left -= n;
5001 
5002 	n = snprintf(buf, size_left, "\nResponse Queue\n");
5003 	buf += n; size_left -= n;
5004 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
5005 	    RESPONSE_QUEUE_ENTRIES, buf, size_left);
5006 	buf += n; size_left -= n;
5007 
5008 	n = snprintf(buf, size_left, "\nATIO queue\n");
5009 	buf += n; size_left -= n;
5010 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET,
5011 	    ATIO_QUEUE_ENTRIES, buf, size_left);
5012 	buf += n; size_left -= n;
5013 
5014 	/*
5015 	 * Lable dump reason
5016 	 */
5017 	n = snprintf(buf, size_left, "\nFirmware dump reason: %s-%s\n",
5018 	    qlt->qlt_port_alias, ssci->st_additional_info);
5019 	buf += n; size_left -= n;
5020 
5021 dump_ok:
5022 	QLT_LOG(qlt->qlt_port_alias, "qlt_fireware_dump: left-%d", size_left);
5023 
5024 	mutex_enter(&qlt->qlt_ioctl_lock);
5025 	qlt->qlt_ioctl_flags &=
5026 	    ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
5027 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
5028 	mutex_exit(&qlt->qlt_ioctl_lock);
5029 	return (FCT_SUCCESS);
5030 
5031 dump_fail:
5032 	mutex_enter(&qlt->qlt_ioctl_lock);
5033 	qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK;
5034 	mutex_exit(&qlt->qlt_ioctl_lock);
5035 	return (FCT_FAILURE);
5036 }
5037 
5038 static int
5039 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count,
5040     int size_left)
5041 {
5042 	int		i;
5043 	int		n;
5044 	char		c = ' ';
5045 
5046 	for (i = 0, n = 0; i < count; i++) {
5047 		if ((i + 1) & 7) {
5048 			c = ' ';
5049 		} else {
5050 			c = '\n';
5051 		}
5052 		n += snprintf(&buf[n], (size_left - n), "%08x%c",
5053 		    REG_RD32(qlt, startaddr + (i << 2)), c);
5054 	}
5055 	return (n);
5056 }
5057 
5058 static int
5059 qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
5060     caddr_t buf, int size_left)
5061 {
5062 	int		i;
5063 	int		n;
5064 	char		c = ' ';
5065 	uint32_t	*ptr;
5066 
5067 	ptr = (uint32_t *)((caddr_t)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
5068 	for (i = 0, n = 0; i < words; i++) {
5069 		if ((i & 7) == 0) {
5070 			n += snprintf(&buf[n], (size_left - n), "%08x: ",
5071 			    addr + i);
5072 		}
5073 		if ((i + 1) & 7) {
5074 			c = ' ';
5075 		} else {
5076 			c = '\n';
5077 		}
5078 		n += snprintf(&buf[n], (size_left - n), "%08x%c", ptr[i], c);
5079 	}
5080 	return (n);
5081 }
5082 
5083 static int
5084 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf,
5085     int size_left)
5086 {
5087 	int		i;
5088 	int		n;
5089 	char		c = ' ';
5090 	int		words;
5091 	uint16_t	*ptr;
5092 	uint16_t	w;
5093 
5094 	words = entries * 32;
5095 	ptr = (uint16_t *)qadr;
5096 	for (i = 0, n = 0; i < words; i++) {
5097 		if ((i & 7) == 0) {
5098 			n += snprintf(&buf[n], (size_left - n), "%05x: ", i);
5099 		}
5100 		if ((i + 1) & 7) {
5101 			c = ' ';
5102 		} else {
5103 			c = '\n';
5104 		}
5105 		w = QMEM_RD16(qlt, &ptr[i]);
5106 		n += snprintf(&buf[n], (size_left - n), "%04x%c", w, c);
5107 	}
5108 	return (n);
5109 }
5110 
5111 /*
5112  * Only called by debug dump. Interrupts are disabled and mailboxes alongwith
5113  * mailbox ram is available.
5114  * Copy data from RISC RAM to system memory
5115  */
5116 static fct_status_t
5117 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words)
5118 {
5119 	uint64_t	da;
5120 	fct_status_t	ret;
5121 
5122 	REG_WR16(qlt, REG_MBOX(0), 0xc);
5123 	da = qlt->queue_mem_cookie.dmac_laddress;
5124 	da += MBOX_DMA_MEM_OFFSET;
5125 
5126 	/*
5127 	 * System destination address
5128 	 */
5129 	REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
5130 	da >>= 16;
5131 	REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
5132 	da >>= 16;
5133 	REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
5134 	da >>= 16;
5135 	REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
5136 
5137 	/*
5138 	 * Length
5139 	 */
5140 	REG_WR16(qlt, REG_MBOX(5), words & 0xffff);
5141 	REG_WR16(qlt, REG_MBOX(4), ((words >> 16) & 0xffff));
5142 
5143 	/*
5144 	 * RISC source address
5145 	 */
5146 	REG_WR16(qlt, REG_MBOX(1), addr & 0xffff);
5147 	REG_WR16(qlt, REG_MBOX(8), ((addr >> 16) & 0xffff));
5148 
5149 	ret = qlt_raw_mailbox_command(qlt);
5150 	REG_WR32(qlt, REG_HCCR, 0xA0000000);
5151 	if (ret == QLT_SUCCESS) {
5152 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5153 		    MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
5154 	} else {
5155 		QLT_LOG(qlt->qlt_port_alias, "qlt_read_risc_ram: qlt raw_mbox "
5156 		    "failed 0x%llX", ret);
5157 	}
5158 	return (ret);
5159 }
5160