xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_xioctl.c (revision 257873cfc1dd3337766407f80397db60a56f2f5a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2008 QLogic Corporation */
23 
24 /*
25  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2008 QLogic Corporation; ql_xioctl.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2008 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local data
55  */
56 
57 /*
58  * Local prototypes
59  */
60 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
61 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
62     boolean_t (*)(EXT_IOCTL *));
63 static boolean_t ql_validate_signature(EXT_IOCTL *);
64 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
65 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
66 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
67 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
68 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
75 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
76 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
77 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
78 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
90 
91 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
92 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
93 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
94 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
95     uint8_t);
96 static uint32_t	ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
97 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
98 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
99 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
100 static int ql_load_fcode(ql_adapter_state_t *, uint8_t *, uint32_t);
101 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t, int);
102 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
103     uint8_t);
104 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
105 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
106 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
107 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
108 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
109 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
110 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
113 static uint32_t ql_setup_led(ql_adapter_state_t *);
114 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
115 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
116 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
117 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
118 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
119 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
120 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
121 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
122 static void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
123 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
124 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
125 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
126 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
127 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
128 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
129 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
130 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
131 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
132 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
133 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
134 static void ql_restart_hba(ql_adapter_state_t *);
135 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
138 
139 /* ******************************************************************** */
140 /*			External IOCTL support.				*/
141 /* ******************************************************************** */
142 
143 /*
144  * ql_alloc_xioctl_resource
145  *	Allocates resources needed by module code.
146  *
147  * Input:
148  *	ha:		adapter state pointer.
149  *
150  * Returns:
151  *	SYS_ERRNO
152  *
153  * Context:
154  *	Kernel context.
155  */
156 int
157 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
158 {
159 	ql_xioctl_t	*xp;
160 
161 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
162 
163 	if (ha->xioctl != NULL) {
164 		QL_PRINT_9(CE_CONT, "(%d): already allocated exiting\n",
165 		    ha->instance);
166 		return (0);
167 	}
168 
169 	xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
170 	if (xp == NULL) {
171 		EL(ha, "failed, kmem_zalloc\n");
172 		return (ENOMEM);
173 	}
174 	ha->xioctl = xp;
175 
176 	/* Allocate AEN tracking buffer */
177 	xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
178 	    sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
179 	if (xp->aen_tracking_queue == NULL) {
180 		EL(ha, "failed, kmem_zalloc-2\n");
181 		ql_free_xioctl_resource(ha);
182 		return (ENOMEM);
183 	}
184 
185 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
186 
187 	return (0);
188 }
189 
190 /*
191  * ql_free_xioctl_resource
192  *	Frees resources used by module code.
193  *
194  * Input:
195  *	ha:		adapter state pointer.
196  *
197  * Context:
198  *	Kernel context.
199  */
200 void
201 ql_free_xioctl_resource(ql_adapter_state_t *ha)
202 {
203 	ql_xioctl_t	*xp = ha->xioctl;
204 
205 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
206 
207 	if (xp == NULL) {
208 		QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
209 		return;
210 	}
211 
212 	if (xp->aen_tracking_queue != NULL) {
213 		kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
214 		    sizeof (EXT_ASYNC_EVENT));
215 		xp->aen_tracking_queue = NULL;
216 	}
217 
218 	kmem_free(xp, sizeof (ql_xioctl_t));
219 	ha->xioctl = NULL;
220 
221 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
222 }
223 
224 /*
225  * ql_xioctl
226  *	External IOCTL processing.
227  *
228  * Input:
229  *	ha:	adapter state pointer.
230  *	cmd:	function to perform
231  *	arg:	data type varies with request
232  *	mode:	flags
233  *	cred_p:	credentials pointer
234  *	rval_p:	pointer to result value
235  *
236  * Returns:
237  *	0:		success
238  *	ENXIO:		No such device or address
239  *	ENOPROTOOPT:	Protocol not available
240  *
241  * Context:
242  *	Kernel context.
243  */
244 /* ARGSUSED */
245 int
246 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
247     cred_t *cred_p, int *rval_p)
248 {
249 	int	rval;
250 
251 	QL_PRINT_9(CE_CONT, "(%d): entered, cmd=%d\n", ha->instance, cmd);
252 
253 	if (ha->xioctl == NULL) {
254 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
255 		return (ENXIO);
256 	}
257 
258 	switch (cmd) {
259 	case EXT_CC_QUERY:
260 	case EXT_CC_SEND_FCCT_PASSTHRU:
261 	case EXT_CC_REG_AEN:
262 	case EXT_CC_GET_AEN:
263 	case EXT_CC_SEND_SCSI_PASSTHRU:
264 	case EXT_CC_WWPN_TO_SCSIADDR:
265 	case EXT_CC_SEND_ELS_RNID:
266 	case EXT_CC_SET_DATA:
267 	case EXT_CC_GET_DATA:
268 	case EXT_CC_HOST_IDX:
269 	case EXT_CC_READ_NVRAM:
270 	case EXT_CC_UPDATE_NVRAM:
271 	case EXT_CC_READ_OPTION_ROM:
272 	case EXT_CC_READ_OPTION_ROM_EX:
273 	case EXT_CC_UPDATE_OPTION_ROM:
274 	case EXT_CC_UPDATE_OPTION_ROM_EX:
275 	case EXT_CC_GET_VPD:
276 	case EXT_CC_SET_VPD:
277 	case EXT_CC_LOOPBACK:
278 	case EXT_CC_GET_FCACHE:
279 	case EXT_CC_GET_FCACHE_EX:
280 	case EXT_CC_HOST_DRVNAME:
281 	case EXT_CC_GET_SFP_DATA:
282 	case EXT_CC_PORT_PARAM:
283 	case EXT_CC_GET_PCI_DATA:
284 	case EXT_CC_GET_FWEXTTRACE:
285 	case EXT_CC_GET_FWFCETRACE:
286 	case EXT_CC_GET_VP_CNT_ID:
287 	case EXT_CC_VPORT_CMD:
288 		rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
289 		break;
290 	default:
291 		/* function not supported. */
292 		EL(ha, "function=%d not supported\n", cmd);
293 		rval = ENOPROTOOPT;
294 	}
295 
296 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
297 
298 	return (rval);
299 }
300 
301 /*
302  * ql_sdm_ioctl
303  *	Provides ioctl functions for SAN/Device Management functions
304  *	AKA External Ioctl functions.
305  *
306  * Input:
307  *	ha:		adapter state pointer.
308  *	ioctl_code:	ioctl function to perform
309  *	arg:		Pointer to EXT_IOCTL cmd data in application land.
310  *	mode:		flags
311  *
312  * Returns:
313  *	0:	success
314  *	ENOMEM:	Alloc of local EXT_IOCTL struct failed.
315  *	EFAULT:	Copyin of caller's EXT_IOCTL struct failed or
316  *		copyout of EXT_IOCTL status info failed.
317  *	EINVAL:	Signature or version of caller's EXT_IOCTL invalid.
318  *	EBUSY:	Device busy
319  *
320  * Context:
321  *	Kernel context.
322  */
323 static int
324 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
325 {
326 	EXT_IOCTL		*cmd;
327 	int			rval;
328 	ql_adapter_state_t	*vha;
329 
330 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
331 
332 	/* Copy argument structure (EXT_IOCTL) from application land. */
333 	if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
334 	    ql_validate_signature)) != 0) {
335 		/*
336 		 * a non-zero value at this time means a problem getting
337 		 * the requested information from application land, just
338 		 * return the error code and hope for the best.
339 		 */
340 		EL(ha, "failed, sdm_setup\n");
341 		return (rval);
342 	}
343 
344 	/*
345 	 * Map the physical ha ptr (which the ioctl is called with)
346 	 * to the virtual ha that the caller is addressing.
347 	 */
348 	if (ha->flags & VP_ENABLED) {
349 		/*
350 		 * Special case: HbaSelect == 0 is physical ha
351 		 */
352 		if (cmd->HbaSelect != 0) {
353 			vha = ha->vp_next;
354 			while (vha != NULL) {
355 				if (vha->vp_index == cmd->HbaSelect) {
356 					ha = vha;
357 					break;
358 				}
359 				vha = vha->vp_next;
360 			}
361 
362 			/*
363 			 * If we can't find the specified vp index then
364 			 * we probably have an error (vp indexes shifting
365 			 * under our feet?).
366 			 */
367 			if (vha == NULL) {
368 				EL(ha, "Invalid HbaSelect vp index: %xh\n",
369 				    cmd->HbaSelect);
370 				cmd->Status = EXT_STATUS_INVALID_VPINDEX;
371 				cmd->ResponseLen = 0;
372 				return (EFAULT);
373 			}
374 		}
375 	}
376 
377 	/*
378 	 * If driver is suspended or stalled, rtn BUSY so caller
379 	 * can try again at some later time
380 	 */
381 	if (ha->flags & ADAPTER_SUSPENDED ||
382 	    ha->task_daemon_flags & DRIVER_STALL) {
383 		EL(ha, "driver %s\n",
384 		    ha->flags & ADAPTER_SUSPENDED ? "suspended" : "stalled");
385 		cmd->Status = EXT_STATUS_BUSY;
386 		cmd->ResponseLen = 0;
387 		rval = EBUSY;
388 
389 		/* Return results to caller */
390 		if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
391 			EL(ha, "failed, sdm_return\n");
392 			rval = EFAULT;
393 		}
394 		return (rval);
395 	}
396 
397 	switch (ioctl_code) {
398 	case EXT_CC_QUERY_OS:
399 		ql_query(ha, cmd, mode);
400 		break;
401 	case EXT_CC_SEND_FCCT_PASSTHRU_OS:
402 		ql_fcct(ha, cmd, mode);
403 		break;
404 	case EXT_CC_REG_AEN_OS:
405 		ql_aen_reg(ha, cmd, mode);
406 		break;
407 	case EXT_CC_GET_AEN_OS:
408 		ql_aen_get(ha, cmd, mode);
409 		break;
410 	case EXT_CC_GET_DATA_OS:
411 		ql_get_host_data(ha, cmd, mode);
412 		break;
413 	case EXT_CC_SET_DATA_OS:
414 		ql_set_host_data(ha, cmd, mode);
415 		break;
416 	case EXT_CC_SEND_ELS_RNID_OS:
417 		ql_send_els_rnid(ha, cmd, mode);
418 		break;
419 	case EXT_CC_SCSI_PASSTHRU_OS:
420 		ql_scsi_passthru(ha, cmd, mode);
421 		break;
422 	case EXT_CC_WWPN_TO_SCSIADDR_OS:
423 		ql_wwpn_to_scsiaddr(ha, cmd, mode);
424 		break;
425 	case EXT_CC_HOST_IDX_OS:
426 		ql_host_idx(ha, cmd, mode);
427 		break;
428 	case EXT_CC_HOST_DRVNAME_OS:
429 		ql_host_drvname(ha, cmd, mode);
430 		break;
431 	case EXT_CC_READ_NVRAM_OS:
432 		ql_read_nvram(ha, cmd, mode);
433 		break;
434 	case EXT_CC_UPDATE_NVRAM_OS:
435 		ql_write_nvram(ha, cmd, mode);
436 		break;
437 	case EXT_CC_READ_OPTION_ROM_OS:
438 	case EXT_CC_READ_OPTION_ROM_EX_OS:
439 		ql_read_flash(ha, cmd, mode);
440 		break;
441 	case EXT_CC_UPDATE_OPTION_ROM_OS:
442 	case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
443 		ql_write_flash(ha, cmd, mode);
444 		break;
445 	case EXT_CC_LOOPBACK_OS:
446 		ql_diagnostic_loopback(ha, cmd, mode);
447 		break;
448 	case EXT_CC_GET_VPD_OS:
449 		ql_read_vpd(ha, cmd, mode);
450 		break;
451 	case EXT_CC_SET_VPD_OS:
452 		ql_write_vpd(ha, cmd, mode);
453 		break;
454 	case EXT_CC_GET_FCACHE_OS:
455 		ql_get_fcache(ha, cmd, mode);
456 		break;
457 	case EXT_CC_GET_FCACHE_EX_OS:
458 		ql_get_fcache_ex(ha, cmd, mode);
459 		break;
460 	case EXT_CC_GET_SFP_DATA_OS:
461 		ql_get_sfp(ha, cmd, mode);
462 		break;
463 	case EXT_CC_PORT_PARAM_OS:
464 		ql_port_param(ha, cmd, mode);
465 		break;
466 	case EXT_CC_GET_PCI_DATA_OS:
467 		ql_get_pci_data(ha, cmd, mode);
468 		break;
469 	case EXT_CC_GET_FWEXTTRACE_OS:
470 		ql_get_fwexttrace(ha, cmd, mode);
471 		break;
472 	case EXT_CC_GET_FWFCETRACE_OS:
473 		ql_get_fwfcetrace(ha, cmd, mode);
474 		break;
475 	case EXT_CC_MENLO_RESET:
476 		ql_menlo_reset(ha, cmd, mode);
477 		break;
478 	case EXT_CC_MENLO_GET_FW_VERSION:
479 		ql_menlo_get_fw_version(ha, cmd, mode);
480 		break;
481 	case EXT_CC_MENLO_UPDATE_FW:
482 		ql_menlo_update_fw(ha, cmd, mode);
483 		break;
484 	case EXT_CC_MENLO_MANAGE_INFO:
485 		ql_menlo_manage_info(ha, cmd, mode);
486 		break;
487 	case EXT_CC_GET_VP_CNT_ID_OS:
488 		ql_get_vp_cnt_id(ha, cmd, mode);
489 		break;
490 	case EXT_CC_VPORT_CMD_OS:
491 		ql_vp_ioctl(ha, cmd, mode);
492 		break;
493 	default:
494 		/* function not supported. */
495 		EL(ha, "failed, function not supported=%d\n", ioctl_code);
496 
497 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
498 		cmd->ResponseLen = 0;
499 		break;
500 	}
501 
502 	/* Return results to caller */
503 	if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
504 		EL(ha, "failed, sdm_return\n");
505 		return (EFAULT);
506 	}
507 
508 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
509 
510 	return (0);
511 }
512 
513 /*
514  * ql_sdm_setup
515  *	Make a local copy of the EXT_IOCTL struct and validate it.
516  *
517  * Input:
518  *	ha:		adapter state pointer.
519  *	cmd_struct:	Pointer to location to store local adrs of EXT_IOCTL.
520  *	arg:		Address of application EXT_IOCTL cmd data
521  *	mode:		flags
522  *	val_sig:	Pointer to a function to validate the ioctl signature.
523  *
524  * Returns:
525  *	0:		success
526  *	EFAULT:		Copy in error of application EXT_IOCTL struct.
527  *	EINVAL:		Invalid version, signature.
528  *	ENOMEM:		Local allocation of EXT_IOCTL failed.
529  *
530  * Context:
531  *	Kernel context.
532  */
533 static int
534 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
535     int mode, boolean_t (*val_sig)(EXT_IOCTL *))
536 {
537 	int		rval;
538 	EXT_IOCTL	*cmd;
539 
540 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
541 
542 	/* Allocate local memory for EXT_IOCTL. */
543 	*cmd_struct = NULL;
544 	cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
545 	if (cmd == NULL) {
546 		EL(ha, "failed, kmem_zalloc\n");
547 		return (ENOMEM);
548 	}
549 	/* Get argument structure. */
550 	rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
551 	if (rval != 0) {
552 		EL(ha, "failed, ddi_copyin\n");
553 		rval = EFAULT;
554 	} else {
555 		/*
556 		 * Check signature and the version.
557 		 * If either are not valid then neither is the
558 		 * structure so don't attempt to return any error status
559 		 * because we can't trust what caller's arg points to.
560 		 * Just return the errno.
561 		 */
562 		if (val_sig(cmd) == 0) {
563 			EL(ha, "failed, signature\n");
564 			rval = EINVAL;
565 		} else if (cmd->Version > EXT_VERSION) {
566 			EL(ha, "failed, version\n");
567 			rval = EINVAL;
568 		}
569 	}
570 
571 	if (rval == 0) {
572 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
573 		*cmd_struct = cmd;
574 		cmd->Status = EXT_STATUS_OK;
575 		cmd->DetailStatus = 0;
576 	} else {
577 		kmem_free((void *)cmd, sizeof (EXT_IOCTL));
578 	}
579 
580 	return (rval);
581 }
582 
583 /*
584  * ql_validate_signature
585  *	Validate the signature string for an external ioctl call.
586  *
587  * Input:
588  *	sg:	Pointer to EXT_IOCTL signature to validate.
589  *
590  * Returns:
591  *	B_TRUE:		Signature is valid.
592  *	B_FALSE:	Signature is NOT valid.
593  *
594  * Context:
595  *	Kernel context.
596  */
597 static boolean_t
598 ql_validate_signature(EXT_IOCTL *cmd_struct)
599 {
600 	/*
601 	 * Check signature.
602 	 *
603 	 * If signature is not valid then neither is the rest of
604 	 * the structure (e.g., can't trust it), so don't attempt
605 	 * to return any error status other than the errno.
606 	 */
607 	if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
608 		QL_PRINT_2(CE_CONT, "failed,\n");
609 		return (B_FALSE);
610 	}
611 
612 	return (B_TRUE);
613 }
614 
615 /*
616  * ql_sdm_return
617  *	Copies return data/status to application land for
618  *	ioctl call using the SAN/Device Management EXT_IOCTL call interface.
619  *
620  * Input:
621  *	ha:		adapter state pointer.
622  *	cmd:		Pointer to kernel copy of requestor's EXT_IOCTL struct.
623  *	ioctl_code:	ioctl function to perform
624  *	arg:		EXT_IOCTL cmd data in application land.
625  *	mode:		flags
626  *
627  * Returns:
628  *	0:	success
629  *	EFAULT:	Copy out error.
630  *
631  * Context:
632  *	Kernel context.
633  */
634 /* ARGSUSED */
635 static int
636 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
637 {
638 	int	rval = 0;
639 
640 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
641 
642 	rval |= ddi_copyout((void *)&cmd->ResponseLen,
643 	    (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
644 	    mode);
645 
646 	rval |= ddi_copyout((void *)&cmd->Status,
647 	    (void *)&(((EXT_IOCTL*)arg)->Status),
648 	    sizeof (cmd->Status), mode);
649 	rval |= ddi_copyout((void *)&cmd->DetailStatus,
650 	    (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
651 	    sizeof (cmd->DetailStatus), mode);
652 
653 	kmem_free((void *)cmd, sizeof (EXT_IOCTL));
654 
655 	if (rval != 0) {
656 		/* Some copyout operation failed */
657 		EL(ha, "failed, ddi_copyout\n");
658 		return (EFAULT);
659 	}
660 
661 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
662 
663 	return (0);
664 }
665 
666 /*
667  * ql_query
668  *	Performs all EXT_CC_QUERY functions.
669  *
670  * Input:
671  *	ha:	adapter state pointer.
672  *	cmd:	Local EXT_IOCTL cmd struct pointer.
673  *	mode:	flags.
674  *
675  * Returns:
676  *	None, request status indicated in cmd->Status.
677  *
678  * Context:
679  *	Kernel context.
680  */
681 static void
682 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
683 {
684 	QL_PRINT_9(CE_CONT, "(%d): entered, cmd=%d\n", ha->instance,
685 	    cmd->SubCode);
686 
687 	/* case off on command subcode */
688 	switch (cmd->SubCode) {
689 	case EXT_SC_QUERY_HBA_NODE:
690 		ql_qry_hba_node(ha, cmd, mode);
691 		break;
692 	case EXT_SC_QUERY_HBA_PORT:
693 		ql_qry_hba_port(ha, cmd, mode);
694 		break;
695 	case EXT_SC_QUERY_DISC_PORT:
696 		ql_qry_disc_port(ha, cmd, mode);
697 		break;
698 	case EXT_SC_QUERY_DISC_TGT:
699 		ql_qry_disc_tgt(ha, cmd, mode);
700 		break;
701 	case EXT_SC_QUERY_DRIVER:
702 		ql_qry_driver(ha, cmd, mode);
703 		break;
704 	case EXT_SC_QUERY_FW:
705 		ql_qry_fw(ha, cmd, mode);
706 		break;
707 	case EXT_SC_QUERY_CHIP:
708 		ql_qry_chip(ha, cmd, mode);
709 		break;
710 	case EXT_SC_QUERY_DISC_LUN:
711 	default:
712 		/* function not supported. */
713 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
714 		EL(ha, "failed, Unsupported Subcode=%xh\n",
715 		    cmd->SubCode);
716 		break;
717 	}
718 
719 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
720 }
721 
722 /*
723  * ql_qry_hba_node
724  *	Performs EXT_SC_QUERY_HBA_NODE subfunction.
725  *
726  * Input:
727  *	ha:	adapter state pointer.
728  *	cmd:	EXT_IOCTL cmd struct pointer.
729  *	mode:	flags.
730  *
731  * Returns:
732  *	None, request status indicated in cmd->Status.
733  *
734  * Context:
735  *	Kernel context.
736  */
737 static void
738 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
739 {
740 	EXT_HBA_NODE	tmp_node = {0};
741 	uint_t		len;
742 	caddr_t		bufp;
743 	ql_mbx_data_t	mr;
744 
745 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
746 
747 	if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
748 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
749 		cmd->DetailStatus = sizeof (EXT_HBA_NODE);
750 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
751 		    "Len=%xh\n", cmd->ResponseLen);
752 		cmd->ResponseLen = 0;
753 		return;
754 	}
755 
756 	/* fill in the values */
757 
758 	bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
759 	    EXT_DEF_WWN_NAME_SIZE);
760 
761 	(void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
762 
763 	(void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
764 
765 	bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
766 
767 	(void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
768 
769 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
770 		size_t		verlen;
771 		uint16_t	w;
772 		char		*tmpptr;
773 
774 		verlen = strlen((char *)(tmp_node.DriverVersion));
775 		if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
776 			EL(ha, "failed, No room for fpga version string\n");
777 		} else {
778 			w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
779 			    (uint16_t *)
780 			    (ha->sbus_fpga_iobase + FPGA_REVISION));
781 
782 			tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
783 			if (tmpptr == NULL) {
784 				EL(ha, "Unable to insert fpga version str\n");
785 			} else {
786 				(void) sprintf(tmpptr, "%d.%d",
787 				    ((w & 0xf0) >> 4), (w & 0x0f));
788 				tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
789 			}
790 		}
791 	}
792 	(void) ql_get_fw_version(ha, &mr);
793 
794 	(void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
795 	    mr.mb[1], mr.mb[2], mr.mb[3]);
796 
797 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
798 		switch (mr.mb[6]) {
799 		case FWATTRIB_EF:
800 			(void) strcat((char *)(tmp_node.FWVersion), " EF");
801 			break;
802 		case FWATTRIB_TP:
803 			(void) strcat((char *)(tmp_node.FWVersion), " TP");
804 			break;
805 		case FWATTRIB_IP:
806 			(void) strcat((char *)(tmp_node.FWVersion), " IP");
807 			break;
808 		case FWATTRIB_IPX:
809 			(void) strcat((char *)(tmp_node.FWVersion), " IPX");
810 			break;
811 		case FWATTRIB_FL:
812 			(void) strcat((char *)(tmp_node.FWVersion), " FL");
813 			break;
814 		case FWATTRIB_FPX:
815 			(void) strcat((char *)(tmp_node.FWVersion), " FLX");
816 			break;
817 		default:
818 			break;
819 		}
820 	}
821 
822 	/* FCode version. */
823 	/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
824 	if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
825 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
826 	    (int *)&len) == DDI_PROP_SUCCESS) {
827 		if (len < EXT_DEF_MAX_STR_SIZE) {
828 			bcopy(bufp, tmp_node.OptRomVersion, len);
829 		} else {
830 			bcopy(bufp, tmp_node.OptRomVersion,
831 			    EXT_DEF_MAX_STR_SIZE - 1);
832 			tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
833 			    '\0';
834 		}
835 		kmem_free(bufp, len);
836 	} else {
837 		(void) sprintf((char *)tmp_node.OptRomVersion, "0");
838 	}
839 	tmp_node.PortCount = 1;
840 	tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
841 
842 	if (ddi_copyout((void *)&tmp_node,
843 	    (void *)(uintptr_t)(cmd->ResponseAdr),
844 	    sizeof (EXT_HBA_NODE), mode) != 0) {
845 		cmd->Status = EXT_STATUS_COPY_ERR;
846 		cmd->ResponseLen = 0;
847 		EL(ha, "failed, ddi_copyout\n");
848 	} else {
849 		cmd->ResponseLen = sizeof (EXT_HBA_NODE);
850 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
851 	}
852 }
853 
854 /*
855  * ql_qry_hba_port
856  *	Performs EXT_SC_QUERY_HBA_PORT subfunction.
857  *
858  * Input:
859  *	ha:	adapter state pointer.
860  *	cmd:	EXT_IOCTL cmd struct pointer.
861  *	mode:	flags.
862  *
863  * Returns:
864  *	None, request status indicated in cmd->Status.
865  *
866  * Context:
867  *	Kernel context.
868  */
869 static void
870 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
871 {
872 	ql_link_t	*link;
873 	ql_tgt_t	*tq;
874 	ql_mbx_data_t	mr;
875 	EXT_HBA_PORT	tmp_port = {0};
876 	int		rval;
877 	uint16_t	port_cnt, tgt_cnt, index;
878 
879 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
880 
881 	if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
882 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
883 		cmd->DetailStatus = sizeof (EXT_HBA_PORT);
884 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
885 		    cmd->ResponseLen);
886 		cmd->ResponseLen = 0;
887 		return;
888 	}
889 
890 	/* fill in the values */
891 
892 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
893 	    EXT_DEF_WWN_NAME_SIZE);
894 	tmp_port.Id[0] = 0;
895 	tmp_port.Id[1] = ha->d_id.b.domain;
896 	tmp_port.Id[2] = ha->d_id.b.area;
897 	tmp_port.Id[3] = ha->d_id.b.al_pa;
898 
899 	/* For now we are initiator only driver */
900 	tmp_port.Type = EXT_DEF_INITIATOR_DEV;
901 
902 	if (ha->task_daemon_flags & LOOP_DOWN) {
903 		tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
904 	} else if (DRIVER_SUSPENDED(ha)) {
905 		tmp_port.State = EXT_DEF_HBA_SUSPENDED;
906 	} else {
907 		tmp_port.State = EXT_DEF_HBA_OK;
908 	}
909 
910 	if (ha->flags & POINT_TO_POINT) {
911 		tmp_port.Mode = EXT_DEF_P2P_MODE;
912 	} else {
913 		tmp_port.Mode = EXT_DEF_LOOP_MODE;
914 	}
915 	/*
916 	 * fill in the portspeed values.
917 	 *
918 	 * default to not yet negotiated state
919 	 */
920 	tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
921 
922 	if (tmp_port.State == EXT_DEF_HBA_OK) {
923 		if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
924 			mr.mb[1] = 0;
925 			mr.mb[2] = 0;
926 			rval = ql_data_rate(ha, &mr);
927 			if (rval != QL_SUCCESS) {
928 				EL(ha, "failed, data_rate=%xh\n", rval);
929 			} else {
930 				switch (mr.mb[1]) {
931 				case 0:
932 					tmp_port.PortSpeed =
933 					    EXT_DEF_PORTSPEED_1GBIT;
934 					break;
935 				case 1:
936 					tmp_port.PortSpeed =
937 					    EXT_DEF_PORTSPEED_2GBIT;
938 					break;
939 				case 3:
940 					tmp_port.PortSpeed =
941 					    EXT_DEF_PORTSPEED_4GBIT;
942 					break;
943 				case 4:
944 					tmp_port.PortSpeed =
945 					    EXT_DEF_PORTSPEED_8GBIT;
946 					break;
947 				default:
948 					tmp_port.PortSpeed =
949 					    EXT_DEF_PORTSPEED_UNKNOWN;
950 					EL(ha, "failed, data rate=%xh\n",
951 					    mr.mb[1]);
952 					break;
953 				}
954 			}
955 		} else {
956 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
957 		}
958 	}
959 
960 	/* Report all supported port speeds */
961 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
962 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
963 		    EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
964 		    EXT_DEF_PORTSPEED_1GBIT);
965 		/*
966 		 * Correct supported speeds based on type of
967 		 * sfp that is present
968 		 */
969 		switch (ha->sfp_stat) {
970 		case 1:
971 			/* no sfp detected */
972 			break;
973 		case 2:
974 		case 4:
975 			/* 4GB sfp */
976 			tmp_port.PortSupportedSpeed &=
977 			    ~EXT_DEF_PORTSPEED_8GBIT;
978 			break;
979 		case 3:
980 		case 5:
981 			/* 8GB sfp */
982 			tmp_port.PortSupportedSpeed &=
983 			    ~EXT_DEF_PORTSPEED_1GBIT;
984 			break;
985 		default:
986 			EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
987 			break;
988 
989 		}
990 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
991 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
992 		    EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
993 	} else if (CFG_IST(ha, CFG_CTRL_2300)) {
994 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
995 		    EXT_DEF_PORTSPEED_1GBIT);
996 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
997 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
998 	} else if (CFG_IST(ha, CFG_CTRL_2200)) {
999 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1000 	} else {
1001 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1002 		EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1003 	}
1004 	tmp_port.sfp_status = LSB(ha->sfp_stat);
1005 	port_cnt = 0;
1006 	tgt_cnt = 0;
1007 
1008 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1009 		for (link = ha->dev[index].first; link != NULL;
1010 		    link = link->next) {
1011 			tq = link->base_address;
1012 
1013 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1014 				continue;
1015 			}
1016 
1017 			port_cnt++;
1018 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1019 				tgt_cnt++;
1020 			}
1021 		}
1022 	}
1023 
1024 	tmp_port.DiscPortCount = port_cnt;
1025 	tmp_port.DiscTargetCount = tgt_cnt;
1026 
1027 	tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1028 
1029 	rval = ddi_copyout((void *)&tmp_port,
1030 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1031 	    sizeof (EXT_HBA_PORT), mode);
1032 	if (rval != 0) {
1033 		cmd->Status = EXT_STATUS_COPY_ERR;
1034 		cmd->ResponseLen = 0;
1035 		EL(ha, "failed, ddi_copyout\n");
1036 	} else {
1037 		cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1038 		QL_PRINT_9(CE_CONT, "(%d): exiting, ports=%d, targets=%d\n",
1039 		    ha->instance, port_cnt, tgt_cnt);
1040 	}
1041 }
1042 
1043 /*
1044  * ql_qry_disc_port
1045  *	Performs EXT_SC_QUERY_DISC_PORT subfunction.
1046  *
1047  * Input:
1048  *	ha:	adapter state pointer.
1049  *	cmd:	EXT_IOCTL cmd struct pointer.
1050  *	mode:	flags.
1051  *
1052  *	cmd->Instance = Port instance in fcport chain.
1053  *
1054  * Returns:
1055  *	None, request status indicated in cmd->Status.
1056  *
1057  * Context:
1058  *	Kernel context.
1059  */
1060 static void
1061 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1062 {
1063 	EXT_DISC_PORT	tmp_port = {0};
1064 	ql_link_t	*link;
1065 	ql_tgt_t	*tq;
1066 	uint16_t	index;
1067 	uint16_t	inst = 0;
1068 
1069 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1070 
1071 	if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1072 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1073 		cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1074 		EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1075 		    cmd->ResponseLen);
1076 		cmd->ResponseLen = 0;
1077 		return;
1078 	}
1079 
1080 	for (link = NULL, index = 0;
1081 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1082 		for (link = ha->dev[index].first; link != NULL;
1083 		    link = link->next) {
1084 			tq = link->base_address;
1085 
1086 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1087 				continue;
1088 			}
1089 			if (inst != cmd->Instance) {
1090 				inst++;
1091 				continue;
1092 			}
1093 
1094 			/* fill in the values */
1095 			bcopy(tq->node_name, tmp_port.WWNN,
1096 			    EXT_DEF_WWN_NAME_SIZE);
1097 			bcopy(tq->port_name, tmp_port.WWPN,
1098 			    EXT_DEF_WWN_NAME_SIZE);
1099 
1100 			break;
1101 		}
1102 	}
1103 
1104 	if (link == NULL) {
1105 		/* no matching device */
1106 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1107 		EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1108 		cmd->ResponseLen = 0;
1109 		return;
1110 	}
1111 
1112 	tmp_port.Id[0] = 0;
1113 	tmp_port.Id[1] = tq->d_id.b.domain;
1114 	tmp_port.Id[2] = tq->d_id.b.area;
1115 	tmp_port.Id[3] = tq->d_id.b.al_pa;
1116 
1117 	tmp_port.Type = 0;
1118 	if (tq->flags & TQF_INITIATOR_DEVICE) {
1119 		tmp_port.Type = (uint16_t)(tmp_port.Type |
1120 		    EXT_DEF_INITIATOR_DEV);
1121 	} else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1122 		(void) ql_inq_scan(ha, tq, 1);
1123 	} else if (tq->flags & TQF_TAPE_DEVICE) {
1124 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1125 	}
1126 
1127 	if (tq->flags & TQF_FABRIC_DEVICE) {
1128 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1129 	} else {
1130 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1131 	}
1132 
1133 	tmp_port.Status = 0;
1134 	tmp_port.Bus = 0;  /* Hard-coded for Solaris */
1135 
1136 	bcopy(tq->port_name, &tmp_port.TargetId, 8);
1137 
1138 	if (ddi_copyout((void *)&tmp_port,
1139 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1140 	    sizeof (EXT_DISC_PORT), mode) != 0) {
1141 		cmd->Status = EXT_STATUS_COPY_ERR;
1142 		cmd->ResponseLen = 0;
1143 		EL(ha, "failed, ddi_copyout\n");
1144 	} else {
1145 		cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1146 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1147 	}
1148 }
1149 
1150 /*
1151  * ql_qry_disc_tgt
1152  *	Performs EXT_SC_QUERY_DISC_TGT subfunction.
1153  *
1154  * Input:
1155  *	ha:		adapter state pointer.
1156  *	cmd:		EXT_IOCTL cmd struct pointer.
1157  *	mode:		flags.
1158  *
1159  *	cmd->Instance = Port instance in fcport chain.
1160  *
1161  * Returns:
1162  *	None, request status indicated in cmd->Status.
1163  *
1164  * Context:
1165  *	Kernel context.
1166  */
1167 static void
1168 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1169 {
1170 	EXT_DISC_TARGET	tmp_tgt = {0};
1171 	ql_link_t	*link;
1172 	ql_tgt_t	*tq;
1173 	uint16_t	index;
1174 	uint16_t	inst = 0;
1175 
1176 	QL_PRINT_9(CE_CONT, "(%d): entered, target=%d\n", ha->instance,
1177 	    cmd->Instance);
1178 
1179 	if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1180 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1181 		cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1182 		EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1183 		    cmd->ResponseLen);
1184 		cmd->ResponseLen = 0;
1185 		return;
1186 	}
1187 
1188 	/* Scan port list for requested target and fill in the values */
1189 	for (link = NULL, index = 0;
1190 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1191 		for (link = ha->dev[index].first; link != NULL;
1192 		    link = link->next) {
1193 			tq = link->base_address;
1194 
1195 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1196 			    tq->flags & TQF_INITIATOR_DEVICE) {
1197 				continue;
1198 			}
1199 			if (inst != cmd->Instance) {
1200 				inst++;
1201 				continue;
1202 			}
1203 
1204 			/* fill in the values */
1205 			bcopy(tq->node_name, tmp_tgt.WWNN,
1206 			    EXT_DEF_WWN_NAME_SIZE);
1207 			bcopy(tq->port_name, tmp_tgt.WWPN,
1208 			    EXT_DEF_WWN_NAME_SIZE);
1209 
1210 			break;
1211 		}
1212 	}
1213 
1214 	if (link == NULL) {
1215 		/* no matching device */
1216 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1217 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
1218 		EL(ha, "failed, not found target=%d\n", cmd->Instance);
1219 		cmd->ResponseLen = 0;
1220 		return;
1221 	}
1222 	tmp_tgt.Id[0] = 0;
1223 	tmp_tgt.Id[1] = tq->d_id.b.domain;
1224 	tmp_tgt.Id[2] = tq->d_id.b.area;
1225 	tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1226 
1227 	tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1228 
1229 	if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1230 		(void) ql_inq_scan(ha, tq, 1);
1231 	}
1232 
1233 	tmp_tgt.Type = 0;
1234 	if (tq->flags & TQF_TAPE_DEVICE) {
1235 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1236 	}
1237 
1238 	if (tq->flags & TQF_FABRIC_DEVICE) {
1239 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1240 	} else {
1241 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1242 	}
1243 
1244 	tmp_tgt.Status = 0;
1245 
1246 	tmp_tgt.Bus = 0;  /* Hard-coded for Solaris. */
1247 
1248 	bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1249 
1250 	if (ddi_copyout((void *)&tmp_tgt,
1251 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1252 	    sizeof (EXT_DISC_TARGET), mode) != 0) {
1253 		cmd->Status = EXT_STATUS_COPY_ERR;
1254 		cmd->ResponseLen = 0;
1255 		EL(ha, "failed, ddi_copyout\n");
1256 	} else {
1257 		cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1258 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1259 	}
1260 }
1261 
1262 /*
1263  * ql_qry_fw
1264  *	Performs EXT_SC_QUERY_FW subfunction.
1265  *
1266  * Input:
1267  *	ha:	adapter state pointer.
1268  *	cmd:	EXT_IOCTL cmd struct pointer.
1269  *	mode:	flags.
1270  *
1271  * Returns:
1272  *	None, request status indicated in cmd->Status.
1273  *
1274  * Context:
1275  *	Kernel context.
1276  */
1277 static void
1278 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1279 {
1280 	ql_mbx_data_t	mr;
1281 	EXT_FW		fw_info = {0};
1282 
1283 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1284 
1285 	if (cmd->ResponseLen < sizeof (EXT_FW)) {
1286 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1287 		cmd->DetailStatus = sizeof (EXT_FW);
1288 		EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1289 		    cmd->ResponseLen);
1290 		cmd->ResponseLen = 0;
1291 		return;
1292 	}
1293 
1294 	(void) ql_get_fw_version(ha, &mr);
1295 
1296 	(void) sprintf((char *)(fw_info.Version), "%d.%d.%d", mr.mb[1],
1297 	    mr.mb[2], mr.mb[2]);
1298 
1299 	fw_info.Attrib = mr.mb[6];
1300 
1301 	if (ddi_copyout((void *)&fw_info, (void *)(uintptr_t)(cmd->ResponseAdr),
1302 	    sizeof (EXT_FW), mode) != 0) {
1303 		cmd->Status = EXT_STATUS_COPY_ERR;
1304 		cmd->ResponseLen = 0;
1305 		EL(ha, "failed, ddi_copyout\n");
1306 		return;
1307 	} else {
1308 		cmd->ResponseLen = sizeof (EXT_FW);
1309 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1310 	}
1311 }
1312 
1313 /*
1314  * ql_qry_chip
1315  *	Performs EXT_SC_QUERY_CHIP subfunction.
1316  *
1317  * Input:
1318  *	ha:	adapter state pointer.
1319  *	cmd:	EXT_IOCTL cmd struct pointer.
1320  *	mode:	flags.
1321  *
1322  * Returns:
1323  *	None, request status indicated in cmd->Status.
1324  *
1325  * Context:
1326  *	Kernel context.
1327  */
1328 static void
1329 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1330 {
1331 	EXT_CHIP	chip = {0};
1332 
1333 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1334 
1335 	if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1336 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1337 		cmd->DetailStatus = sizeof (EXT_CHIP);
1338 		EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1339 		    cmd->ResponseLen);
1340 		cmd->ResponseLen = 0;
1341 		return;
1342 	}
1343 
1344 	chip.VendorId = ha->ven_id;
1345 	chip.DeviceId = ha->device_id;
1346 	chip.SubVendorId = ha->subven_id;
1347 	chip.SubSystemId = ha->subsys_id;
1348 	chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1349 	chip.IoAddrLen = 0x100;
1350 	chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1351 	chip.MemAddrLen = 0x100;
1352 	chip.ChipRevID = ha->rev_id;
1353 
1354 	if (ddi_copyout((void *)&chip, (void *)(uintptr_t)(cmd->ResponseAdr),
1355 	    sizeof (EXT_CHIP), mode) != 0) {
1356 		cmd->Status = EXT_STATUS_COPY_ERR;
1357 		cmd->ResponseLen = 0;
1358 		EL(ha, "failed, ddi_copyout\n");
1359 	} else {
1360 		cmd->ResponseLen = sizeof (EXT_CHIP);
1361 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1362 	}
1363 }
1364 
1365 /*
1366  * ql_qry_driver
1367  *	Performs EXT_SC_QUERY_DRIVER subfunction.
1368  *
1369  * Input:
1370  *	ha:	adapter state pointer.
1371  *	cmd:	EXT_IOCTL cmd struct pointer.
1372  *	mode:	flags.
1373  *
1374  * Returns:
1375  *	None, request status indicated in cmd->Status.
1376  *
1377  * Context:
1378  *	Kernel context.
1379  */
1380 static void
1381 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1382 {
1383 	EXT_DRIVER	qd = {0};
1384 
1385 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1386 
1387 	if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1388 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
1389 		cmd->DetailStatus = sizeof (EXT_DRIVER);
1390 		EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1391 		    cmd->ResponseLen);
1392 		cmd->ResponseLen = 0;
1393 		return;
1394 	}
1395 
1396 	(void) strcpy((void *)&qd.Version[0], QL_VERSION);
1397 	qd.NumOfBus = 1;	/* Fixed for Solaris */
1398 	qd.TargetsPerBus = (uint16_t)
1399 	    (CFG_IST(ha, (CFG_CTRL_2425|CFG_EXT_FW_INTERFACE)) ?
1400 	    MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1401 	qd.LunsPerTarget = 2030;
1402 	qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1403 	qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1404 
1405 	if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1406 	    sizeof (EXT_DRIVER), mode) != 0) {
1407 		cmd->Status = EXT_STATUS_COPY_ERR;
1408 		cmd->ResponseLen = 0;
1409 		EL(ha, "failed, ddi_copyout\n");
1410 	} else {
1411 		cmd->ResponseLen = sizeof (EXT_DRIVER);
1412 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1413 	}
1414 }
1415 
1416 /*
1417  * ql_fcct
1418  *	IOCTL management server FC-CT passthrough.
1419  *
1420  * Input:
1421  *	ha:	adapter state pointer.
1422  *	cmd:	User space CT arguments pointer.
1423  *	mode:	flags.
1424  *
1425  * Returns:
1426  *	None, request status indicated in cmd->Status.
1427  *
1428  * Context:
1429  *	Kernel context.
1430  */
1431 static void
1432 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1433 {
1434 	ql_mbx_iocb_t		*pkt;
1435 	ql_mbx_data_t		mr;
1436 	dma_mem_t		*dma_mem;
1437 	caddr_t			pld;
1438 	uint32_t		pkt_size, pld_byte_cnt, *long_ptr;
1439 	int			rval;
1440 	ql_ct_iu_preamble_t	*ct;
1441 	ql_xioctl_t		*xp = ha->xioctl;
1442 	ql_tgt_t		tq;
1443 	uint16_t		comp_status, loop_id;
1444 
1445 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1446 
1447 	/* Get CT argument structure. */
1448 	if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1449 		EL(ha, "failed, No switch\n");
1450 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1451 		cmd->ResponseLen = 0;
1452 		return;
1453 	}
1454 
1455 	if (DRIVER_SUSPENDED(ha)) {
1456 		EL(ha, "failed, LOOP_NOT_READY\n");
1457 		cmd->Status = EXT_STATUS_BUSY;
1458 		cmd->ResponseLen = 0;
1459 		return;
1460 	}
1461 
1462 	/* Login management server device. */
1463 	if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1464 		tq.d_id.b.al_pa = 0xfa;
1465 		tq.d_id.b.area = 0xff;
1466 		tq.d_id.b.domain = 0xff;
1467 		tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_2425) ?
1468 		    MANAGEMENT_SERVER_24XX_LOOP_ID :
1469 		    MANAGEMENT_SERVER_LOOP_ID);
1470 		rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1471 		if (rval != QL_SUCCESS) {
1472 			EL(ha, "failed, server login\n");
1473 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1474 			cmd->ResponseLen = 0;
1475 			return;
1476 		} else {
1477 			xp->flags |= QL_MGMT_SERVER_LOGIN;
1478 		}
1479 	}
1480 
1481 	QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1482 	QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1483 
1484 	/* Allocate a DMA Memory Descriptor */
1485 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1486 	if (dma_mem == NULL) {
1487 		EL(ha, "failed, kmem_zalloc\n");
1488 		cmd->Status = EXT_STATUS_NO_MEMORY;
1489 		cmd->ResponseLen = 0;
1490 		return;
1491 	}
1492 	/* Determine maximum buffer size. */
1493 	if (cmd->RequestLen < cmd->ResponseLen) {
1494 		pld_byte_cnt = cmd->ResponseLen;
1495 	} else {
1496 		pld_byte_cnt = cmd->RequestLen;
1497 	}
1498 
1499 	/* Allocate command block. */
1500 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1501 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1502 	if (pkt == NULL) {
1503 		EL(ha, "failed, kmem_zalloc\n");
1504 		cmd->Status = EXT_STATUS_NO_MEMORY;
1505 		cmd->ResponseLen = 0;
1506 		return;
1507 	}
1508 	pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1509 
1510 	/* Get command payload data. */
1511 	if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1512 	    cmd->RequestLen, mode) != cmd->RequestLen) {
1513 		EL(ha, "failed, get_buffer_data\n");
1514 		kmem_free(pkt, pkt_size);
1515 		cmd->Status = EXT_STATUS_COPY_ERR;
1516 		cmd->ResponseLen = 0;
1517 		return;
1518 	}
1519 
1520 	/* Get DMA memory for the IOCB */
1521 	if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1522 	    MEM_RING_ALIGN) != QL_SUCCESS) {
1523 		cmn_err(CE_WARN, "%s(%d): DMA memory "
1524 		    "alloc failed", QL_NAME, ha->instance);
1525 		kmem_free(pkt, pkt_size);
1526 		kmem_free(dma_mem, sizeof (dma_mem_t));
1527 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1528 		cmd->ResponseLen = 0;
1529 		return;
1530 	}
1531 
1532 	/* Copy out going payload data to IOCB DMA buffer. */
1533 	ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1534 	    (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1535 
1536 	/* Sync IOCB DMA buffer. */
1537 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1538 	    DDI_DMA_SYNC_FORDEV);
1539 
1540 	/*
1541 	 * Setup IOCB
1542 	 */
1543 	ct = (ql_ct_iu_preamble_t *)pld;
1544 	if (CFG_IST(ha, CFG_CTRL_2425)) {
1545 		pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1546 		pkt->ms24.entry_count = 1;
1547 
1548 		/* Set loop ID */
1549 		pkt->ms24.n_port_hdl = (uint16_t)
1550 		    (ct->gs_type == GS_TYPE_DIR_SERVER ?
1551 		    LE_16(SNS_24XX_HDL) :
1552 		    LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1553 
1554 		/* Set ISP command timeout. */
1555 		pkt->ms24.timeout = LE_16(120);
1556 
1557 		/* Set cmd/response data segment counts. */
1558 		pkt->ms24.cmd_dseg_count = LE_16(1);
1559 		pkt->ms24.resp_dseg_count = LE_16(1);
1560 
1561 		/* Load ct cmd byte count. */
1562 		pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1563 
1564 		/* Load ct rsp byte count. */
1565 		pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1566 
1567 		long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1568 
1569 		/* Load MS command entry data segments. */
1570 		*long_ptr++ = (uint32_t)
1571 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1572 		*long_ptr++ = (uint32_t)
1573 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1574 		*long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1575 
1576 		/* Load MS response entry data segments. */
1577 		*long_ptr++ = (uint32_t)
1578 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1579 		*long_ptr++ = (uint32_t)
1580 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1581 		*long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1582 
1583 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1584 		    sizeof (ql_mbx_iocb_t));
1585 
1586 		comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1587 		if (comp_status == CS_DATA_UNDERRUN) {
1588 			if ((BE_16(ct->max_residual_size)) == 0) {
1589 				comp_status = CS_COMPLETE;
1590 			}
1591 		}
1592 
1593 		if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1594 		    0) {
1595 			EL(ha, "failed, I/O timeout or "
1596 			    "es=%xh, ss_l=%xh, rval=%xh\n",
1597 			    pkt->sts24.entry_status,
1598 			    pkt->sts24.scsi_status_l, rval);
1599 			kmem_free(pkt, pkt_size);
1600 			ql_free_dma_resource(ha, dma_mem);
1601 			kmem_free(dma_mem, sizeof (dma_mem_t));
1602 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1603 			cmd->ResponseLen = 0;
1604 			return;
1605 		}
1606 	} else {
1607 		pkt->ms.entry_type = MS_TYPE;
1608 		pkt->ms.entry_count = 1;
1609 
1610 		/* Set loop ID */
1611 		loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1612 		    SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1613 		if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1614 			pkt->ms.loop_id_l = LSB(loop_id);
1615 			pkt->ms.loop_id_h = MSB(loop_id);
1616 		} else {
1617 			pkt->ms.loop_id_h = LSB(loop_id);
1618 		}
1619 
1620 		/* Set ISP command timeout. */
1621 		pkt->ms.timeout = LE_16(120);
1622 
1623 		/* Set data segment counts. */
1624 		pkt->ms.cmd_dseg_count_l = 1;
1625 		pkt->ms.total_dseg_count = LE_16(2);
1626 
1627 		/* Response total byte count. */
1628 		pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1629 		pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1630 
1631 		/* Command total byte count. */
1632 		pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1633 		pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1634 
1635 		/* Load command/response data segments. */
1636 		pkt->ms.dseg_0_address[0] = (uint32_t)
1637 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1638 		pkt->ms.dseg_0_address[1] = (uint32_t)
1639 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1640 		pkt->ms.dseg_1_address[0] = (uint32_t)
1641 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1642 		pkt->ms.dseg_1_address[1] = (uint32_t)
1643 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1644 
1645 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1646 		    sizeof (ql_mbx_iocb_t));
1647 
1648 		comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1649 		if (comp_status == CS_DATA_UNDERRUN) {
1650 			if ((BE_16(ct->max_residual_size)) == 0) {
1651 				comp_status = CS_COMPLETE;
1652 			}
1653 		}
1654 		if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1655 			EL(ha, "failed, I/O timeout or "
1656 			    "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1657 			kmem_free(pkt, pkt_size);
1658 			ql_free_dma_resource(ha, dma_mem);
1659 			kmem_free(dma_mem, sizeof (dma_mem_t));
1660 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1661 			cmd->ResponseLen = 0;
1662 			return;
1663 		}
1664 	}
1665 
1666 	/* Sync in coming DMA buffer. */
1667 	(void) ddi_dma_sync(dma_mem->dma_handle, 0,
1668 	    pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1669 	/* Copy in coming DMA data. */
1670 	ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1671 	    (uint8_t *)dma_mem->bp, pld_byte_cnt,
1672 	    DDI_DEV_AUTOINCR);
1673 
1674 	/* Copy response payload from DMA buffer to application. */
1675 	if (cmd->ResponseLen != 0) {
1676 		QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1677 		    cmd->ResponseLen);
1678 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
1679 
1680 		/* Send response payload. */
1681 		if (ql_send_buffer_data(pld,
1682 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
1683 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
1684 			EL(ha, "failed, send_buffer_data\n");
1685 			cmd->Status = EXT_STATUS_COPY_ERR;
1686 			cmd->ResponseLen = 0;
1687 		}
1688 	}
1689 
1690 	kmem_free(pkt, pkt_size);
1691 	ql_free_dma_resource(ha, dma_mem);
1692 	kmem_free(dma_mem, sizeof (dma_mem_t));
1693 
1694 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1695 }
1696 
1697 /*
1698  * ql_aen_reg
1699  *	IOCTL management server Asynchronous Event Tracking Enable/Disable.
1700  *
1701  * Input:
1702  *	ha:	adapter state pointer.
1703  *	cmd:	EXT_IOCTL cmd struct pointer.
1704  *	mode:	flags.
1705  *
1706  * Returns:
1707  *	None, request status indicated in cmd->Status.
1708  *
1709  * Context:
1710  *	Kernel context.
1711  */
1712 static void
1713 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1714 {
1715 	EXT_REG_AEN	reg_struct;
1716 	int		rval = 0;
1717 	ql_xioctl_t	*xp = ha->xioctl;
1718 
1719 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1720 
1721 	rval = ddi_copyin((void*)(uintptr_t)(cmd->RequestAdr), &reg_struct,
1722 	    cmd->RequestLen, mode);
1723 
1724 	if (rval == 0) {
1725 		if (reg_struct.Enable) {
1726 			xp->flags |= QL_AEN_TRACKING_ENABLE;
1727 		} else {
1728 			xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1729 			/* Empty the queue. */
1730 			INTR_LOCK(ha);
1731 			xp->aen_q_head = 0;
1732 			xp->aen_q_tail = 0;
1733 			INTR_UNLOCK(ha);
1734 		}
1735 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1736 	} else {
1737 		cmd->Status = EXT_STATUS_COPY_ERR;
1738 		EL(ha, "failed, ddi_copyin\n");
1739 	}
1740 }
1741 
1742 /*
1743  * ql_aen_get
1744  *	IOCTL management server Asynchronous Event Record Transfer.
1745  *
1746  * Input:
1747  *	ha:	adapter state pointer.
1748  *	cmd:	EXT_IOCTL cmd struct pointer.
1749  *	mode:	flags.
1750  *
1751  * Returns:
1752  *	None, request status indicated in cmd->Status.
1753  *
1754  * Context:
1755  *	Kernel context.
1756  */
1757 static void
1758 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1759 {
1760 	uint32_t	out_size;
1761 	EXT_ASYNC_EVENT	*tmp_q;
1762 	EXT_ASYNC_EVENT	aen[EXT_DEF_MAX_AEN_QUEUE];
1763 	uint8_t		i;
1764 	uint8_t		queue_cnt;
1765 	uint8_t		request_cnt;
1766 	ql_xioctl_t	*xp = ha->xioctl;
1767 
1768 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1769 
1770 	/* Compute the number of events that can be returned */
1771 	request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1772 
1773 	if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1774 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1775 		cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1776 		EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1777 		    "Len=%xh\n", request_cnt);
1778 		cmd->ResponseLen = 0;
1779 		return;
1780 	}
1781 
1782 	/* 1st: Make a local copy of the entire queue content. */
1783 	tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1784 	queue_cnt = 0;
1785 
1786 	INTR_LOCK(ha);
1787 	i = xp->aen_q_head;
1788 
1789 	for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1790 		if (tmp_q[i].AsyncEventCode != 0) {
1791 			bcopy(&tmp_q[i], &aen[queue_cnt],
1792 			    sizeof (EXT_ASYNC_EVENT));
1793 			queue_cnt++;
1794 			tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1795 		}
1796 		if (i == xp->aen_q_tail) {
1797 			/* done. */
1798 			break;
1799 		}
1800 		i++;
1801 		if (i == EXT_DEF_MAX_AEN_QUEUE) {
1802 			i = 0;
1803 		}
1804 	}
1805 
1806 	/* Empty the queue. */
1807 	xp->aen_q_head = 0;
1808 	xp->aen_q_tail = 0;
1809 
1810 	INTR_UNLOCK(ha);
1811 
1812 	/* 2nd: Now transfer the queue content to user buffer */
1813 	/* Copy the entire queue to user's buffer. */
1814 	out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1815 	if (queue_cnt == 0) {
1816 		cmd->ResponseLen = 0;
1817 	} else if (ddi_copyout((void *)&aen[0],
1818 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1819 	    out_size, mode) != 0) {
1820 		cmd->Status = EXT_STATUS_COPY_ERR;
1821 		cmd->ResponseLen = 0;
1822 		EL(ha, "failed, ddi_copyout\n");
1823 	} else {
1824 		cmd->ResponseLen = out_size;
1825 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1826 	}
1827 }
1828 
1829 /*
1830  * ql_enqueue_aen
1831  *
1832  * Input:
1833  *	ha:		adapter state pointer.
1834  *	event_code:	async event code of the event to add to queue.
1835  *	payload:	event payload for the queue.
1836  *	INTR_LOCK must be already obtained.
1837  *
1838  * Context:
1839  *	Interrupt or Kernel context, no mailbox commands allowed.
1840  */
1841 void
1842 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1843 {
1844 	uint8_t			new_entry;	/* index to current entry */
1845 	uint16_t		*mbx;
1846 	EXT_ASYNC_EVENT		*aen_queue;
1847 	ql_xioctl_t		*xp = ha->xioctl;
1848 
1849 	QL_PRINT_9(CE_CONT, "(%d): entered, event_code=%d\n", ha->instance,
1850 	    event_code);
1851 
1852 	if (xp == NULL) {
1853 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1854 		return;
1855 	}
1856 	aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1857 
1858 	if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1859 		/* Need to change queue pointers to make room. */
1860 
1861 		/* Increment tail for adding new entry. */
1862 		xp->aen_q_tail++;
1863 		if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1864 			xp->aen_q_tail = 0;
1865 		}
1866 		if (xp->aen_q_head == xp->aen_q_tail) {
1867 			/*
1868 			 * We're overwriting the oldest entry, so need to
1869 			 * update the head pointer.
1870 			 */
1871 			xp->aen_q_head++;
1872 			if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1873 				xp->aen_q_head = 0;
1874 			}
1875 		}
1876 	}
1877 
1878 	new_entry = xp->aen_q_tail;
1879 	aen_queue[new_entry].AsyncEventCode = event_code;
1880 
1881 	/* Update payload */
1882 	if (payload != NULL) {
1883 		switch (event_code) {
1884 		case MBA_LIP_OCCURRED:
1885 		case MBA_LOOP_UP:
1886 		case MBA_LOOP_DOWN:
1887 		case MBA_LIP_F8:
1888 		case MBA_LIP_RESET:
1889 		case MBA_PORT_UPDATE:
1890 			break;
1891 		case MBA_RSCN_UPDATE:
1892 			mbx = (uint16_t *)payload;
1893 			/* al_pa */
1894 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
1895 			    LSB(mbx[2]);
1896 			/* area */
1897 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
1898 			    MSB(mbx[2]);
1899 			/* domain */
1900 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1901 			    LSB(mbx[1]);
1902 			/* save in big endian */
1903 			BIG_ENDIAN_24(&aen_queue[new_entry].
1904 			    Payload.RSCN.RSCNInfo[0]);
1905 
1906 			aen_queue[new_entry].Payload.RSCN.AddrFormat =
1907 			    MSB(mbx[1]);
1908 
1909 			break;
1910 		default:
1911 			/* Not supported */
1912 			EL(ha, "failed, event code not supported=%xh\n",
1913 			    event_code);
1914 			aen_queue[new_entry].AsyncEventCode = 0;
1915 			break;
1916 		}
1917 	}
1918 
1919 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
1920 }
1921 
1922 /*
1923  * ql_scsi_passthru
1924  *	IOCTL SCSI passthrough.
1925  *
1926  * Input:
1927  *	ha:	adapter state pointer.
1928  *	cmd:	User space SCSI command pointer.
1929  *	mode:	flags.
1930  *
1931  * Returns:
1932  *	None, request status indicated in cmd->Status.
1933  *
1934  * Context:
1935  *	Kernel context.
1936  */
1937 static void
1938 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1939 {
1940 	ql_mbx_iocb_t		*pkt;
1941 	ql_mbx_data_t		mr;
1942 	dma_mem_t		*dma_mem;
1943 	caddr_t			pld;
1944 	uint32_t		pkt_size, pld_size;
1945 	uint16_t		qlnt, retries, cnt, cnt2;
1946 	uint8_t			*name;
1947 	EXT_FC_SCSI_PASSTHRU	*ufc_req;
1948 	EXT_SCSI_PASSTHRU	*usp_req;
1949 	int			rval;
1950 	union _passthru {
1951 		EXT_SCSI_PASSTHRU	sp_cmd;
1952 		EXT_FC_SCSI_PASSTHRU	fc_cmd;
1953 	} pt_req;		/* Passthru request */
1954 	uint32_t		status, sense_sz = 0;
1955 	ql_tgt_t		*tq = NULL;
1956 	EXT_SCSI_PASSTHRU	*sp_req = &pt_req.sp_cmd;
1957 	EXT_FC_SCSI_PASSTHRU	*fc_req = &pt_req.fc_cmd;
1958 
1959 	/* SCSI request struct for SCSI passthrough IOs. */
1960 	struct {
1961 		uint16_t	lun;
1962 		uint16_t	sense_length;	/* Sense buffer size */
1963 		size_t		resid;		/* Residual */
1964 		uint8_t		*cdbp;		/* Requestor's CDB */
1965 		uint8_t		*u_sense;	/* Requestor's sense buffer */
1966 		uint8_t		cdb_len;	/* Requestor's CDB length */
1967 		uint8_t		direction;
1968 	} scsi_req;
1969 
1970 	struct {
1971 		uint8_t		*rsp_info;
1972 		uint8_t		*req_sense_data;
1973 		uint32_t	residual_length;
1974 		uint32_t	rsp_info_length;
1975 		uint32_t	req_sense_length;
1976 		uint16_t	comp_status;
1977 		uint8_t		state_flags_l;
1978 		uint8_t		state_flags_h;
1979 		uint8_t		scsi_status_l;
1980 		uint8_t		scsi_status_h;
1981 	} sts;
1982 
1983 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
1984 
1985 	/* Verify Sub Code and set cnt to needed request size. */
1986 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
1987 		pld_size = sizeof (EXT_SCSI_PASSTHRU);
1988 	} else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
1989 		pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
1990 	} else {
1991 		EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
1992 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
1993 		cmd->ResponseLen = 0;
1994 		return;
1995 	}
1996 
1997 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1998 	if (dma_mem == NULL) {
1999 		EL(ha, "failed, kmem_zalloc\n");
2000 		cmd->Status = EXT_STATUS_NO_MEMORY;
2001 		cmd->ResponseLen = 0;
2002 		return;
2003 	}
2004 	/*  Verify the size of and copy in the passthru request structure. */
2005 	if (cmd->RequestLen != pld_size) {
2006 		/* Return error */
2007 		EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2008 		    cmd->RequestLen, pld_size);
2009 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2010 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2011 		cmd->ResponseLen = 0;
2012 		return;
2013 	}
2014 
2015 	if (ddi_copyin((const void *)(uintptr_t)(uintptr_t)cmd->RequestAdr,
2016 	    &pt_req, pld_size, mode) != 0) {
2017 		EL(ha, "failed, ddi_copyin\n");
2018 		cmd->Status = EXT_STATUS_COPY_ERR;
2019 		cmd->ResponseLen = 0;
2020 		return;
2021 	}
2022 
2023 	/*
2024 	 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2025 	 * request data structure.
2026 	 */
2027 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2028 		scsi_req.lun = sp_req->TargetAddr.Lun;
2029 		scsi_req.sense_length = sizeof (sp_req->SenseData);
2030 		scsi_req.cdbp = &sp_req->Cdb[0];
2031 		scsi_req.cdb_len = sp_req->CdbLength;
2032 		scsi_req.direction = sp_req->Direction;
2033 		usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2034 		scsi_req.u_sense = &usp_req->SenseData[0];
2035 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
2036 
2037 		qlnt = QLNT_PORT;
2038 		name = (uint8_t *)&sp_req->TargetAddr.Target;
2039 		QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2040 		    ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2041 		tq = ql_find_port(ha, name, qlnt);
2042 	} else {
2043 		/*
2044 		 * Must be FC PASSTHRU, verified above.
2045 		 */
2046 		if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2047 			qlnt = QLNT_PORT;
2048 			name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2049 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2050 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2051 			    ha->instance, cmd->SubCode, name[0], name[1],
2052 			    name[2], name[3], name[4], name[5], name[6],
2053 			    name[7]);
2054 			tq = ql_find_port(ha, name, qlnt);
2055 		} else if (fc_req->FCScsiAddr.DestType ==
2056 		    EXT_DEF_DESTTYPE_WWNN) {
2057 			qlnt = QLNT_NODE;
2058 			name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2059 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2060 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2061 			    ha->instance, cmd->SubCode, name[0], name[1],
2062 			    name[2], name[3], name[4], name[5], name[6],
2063 			    name[7]);
2064 			tq = ql_find_port(ha, name, qlnt);
2065 		} else if (fc_req->FCScsiAddr.DestType ==
2066 		    EXT_DEF_DESTTYPE_PORTID) {
2067 			qlnt = QLNT_PID;
2068 			name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2069 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2070 			    "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2071 			    name[0], name[1], name[2]);
2072 			tq = ql_find_port(ha, name, qlnt);
2073 		} else {
2074 			EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2075 			    cmd->SubCode, fc_req->FCScsiAddr.DestType);
2076 			cmd->Status = EXT_STATUS_INVALID_PARAM;
2077 			cmd->ResponseLen = 0;
2078 			return;
2079 		}
2080 		scsi_req.lun = fc_req->FCScsiAddr.Lun;
2081 		scsi_req.sense_length = sizeof (fc_req->SenseData);
2082 		scsi_req.cdbp = &sp_req->Cdb[0];
2083 		scsi_req.cdb_len = sp_req->CdbLength;
2084 		ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2085 		scsi_req.u_sense = &ufc_req->SenseData[0];
2086 		scsi_req.direction = fc_req->Direction;
2087 	}
2088 
2089 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2090 		EL(ha, "failed, fc_port not found\n");
2091 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2092 		cmd->ResponseLen = 0;
2093 		return;
2094 	}
2095 
2096 	if (tq->flags & TQF_NEED_AUTHENTICATION) {
2097 		EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2098 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2099 		cmd->ResponseLen = 0;
2100 		return;
2101 	}
2102 
2103 	/* Allocate command block. */
2104 	if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2105 	    scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2106 	    cmd->ResponseLen) {
2107 		pld_size = cmd->ResponseLen;
2108 		pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2109 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2110 		if (pkt == NULL) {
2111 			EL(ha, "failed, kmem_zalloc\n");
2112 			cmd->Status = EXT_STATUS_NO_MEMORY;
2113 			cmd->ResponseLen = 0;
2114 			return;
2115 		}
2116 		pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2117 
2118 		/* Get DMA memory for the IOCB */
2119 		if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2120 		    MEM_DATA_ALIGN) != QL_SUCCESS) {
2121 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2122 			    "alloc failed", QL_NAME, ha->instance);
2123 			kmem_free(pkt, pkt_size);
2124 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2125 			cmd->ResponseLen = 0;
2126 			return;
2127 		}
2128 
2129 		if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2130 			scsi_req.direction = (uint8_t)
2131 			    (CFG_IST(ha, CFG_CTRL_2425) ?
2132 			    CF_RD : CF_DATA_IN | CF_STAG);
2133 		} else {
2134 			scsi_req.direction = (uint8_t)
2135 			    (CFG_IST(ha, CFG_CTRL_2425) ?
2136 			    CF_WR : CF_DATA_OUT | CF_STAG);
2137 			cmd->ResponseLen = 0;
2138 
2139 			/* Get command payload. */
2140 			if (ql_get_buffer_data(
2141 			    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2142 			    pld, pld_size, mode) != pld_size) {
2143 				EL(ha, "failed, get_buffer_data\n");
2144 				cmd->Status = EXT_STATUS_COPY_ERR;
2145 
2146 				kmem_free(pkt, pkt_size);
2147 				ql_free_dma_resource(ha, dma_mem);
2148 				kmem_free(dma_mem, sizeof (dma_mem_t));
2149 				return;
2150 			}
2151 
2152 			/* Copy out going data to DMA buffer. */
2153 			ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2154 			    (uint8_t *)dma_mem->bp, pld_size,
2155 			    DDI_DEV_AUTOINCR);
2156 
2157 			/* Sync DMA buffer. */
2158 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2159 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
2160 		}
2161 	} else {
2162 		scsi_req.direction = (uint8_t)
2163 		    (CFG_IST(ha, CFG_CTRL_2425) ? 0 : CF_STAG);
2164 		cmd->ResponseLen = 0;
2165 
2166 		pkt_size = sizeof (ql_mbx_iocb_t);
2167 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2168 		if (pkt == NULL) {
2169 			EL(ha, "failed, kmem_zalloc-2\n");
2170 			cmd->Status = EXT_STATUS_NO_MEMORY;
2171 			return;
2172 		}
2173 		pld = NULL;
2174 		pld_size = 0;
2175 	}
2176 
2177 	/* retries = ha->port_down_retry_count; */
2178 	retries = 1;
2179 	cmd->Status = EXT_STATUS_OK;
2180 	cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2181 
2182 	QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2183 	QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2184 
2185 	do {
2186 		if (DRIVER_SUSPENDED(ha)) {
2187 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2188 			break;
2189 		}
2190 
2191 		if (CFG_IST(ha, CFG_CTRL_2425)) {
2192 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2193 			pkt->cmd24.entry_count = 1;
2194 
2195 			/* Set LUN number */
2196 			pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2197 			pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2198 
2199 			/* Set N_port handle */
2200 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2201 
2202 			/* Set VP Index */
2203 			pkt->cmd24.vp_index = ha->vp_index;
2204 
2205 			/* Set target ID */
2206 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2207 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
2208 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2209 
2210 			/* Set ISP command timeout. */
2211 			pkt->cmd24.timeout = (uint16_t)LE_16(15);
2212 
2213 			/* Load SCSI CDB */
2214 			ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2215 			    pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2216 			    DDI_DEV_AUTOINCR);
2217 			for (cnt = 0; cnt < MAX_CMDSZ;
2218 			    cnt = (uint16_t)(cnt + 4)) {
2219 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2220 				    + cnt, 4);
2221 			}
2222 
2223 			/* Set tag queue control flags */
2224 			pkt->cmd24.task = TA_STAG;
2225 
2226 			if (pld_size) {
2227 				/* Set transfer direction. */
2228 				pkt->cmd24.control_flags = scsi_req.direction;
2229 
2230 				/* Set data segment count. */
2231 				pkt->cmd24.dseg_count = LE_16(1);
2232 
2233 				/* Load total byte count. */
2234 				pkt->cmd24.total_byte_count = LE_32(pld_size);
2235 
2236 				/* Load data descriptor. */
2237 				pkt->cmd24.dseg_0_address[0] = (uint32_t)
2238 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2239 				pkt->cmd24.dseg_0_address[1] = (uint32_t)
2240 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2241 				pkt->cmd24.dseg_0_length = LE_32(pld_size);
2242 			}
2243 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2244 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2245 			pkt->cmd3.entry_count = 1;
2246 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2247 				pkt->cmd3.target_l = LSB(tq->loop_id);
2248 				pkt->cmd3.target_h = MSB(tq->loop_id);
2249 			} else {
2250 				pkt->cmd3.target_h = LSB(tq->loop_id);
2251 			}
2252 			pkt->cmd3.lun_l = LSB(scsi_req.lun);
2253 			pkt->cmd3.lun_h = MSB(scsi_req.lun);
2254 			pkt->cmd3.control_flags_l = scsi_req.direction;
2255 			pkt->cmd3.timeout = LE_16(15);
2256 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2257 				pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2258 			}
2259 			if (pld_size) {
2260 				pkt->cmd3.dseg_count = LE_16(1);
2261 				pkt->cmd3.byte_count = LE_32(pld_size);
2262 				pkt->cmd3.dseg_0_address[0] = (uint32_t)
2263 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2264 				pkt->cmd3.dseg_0_address[1] = (uint32_t)
2265 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2266 				pkt->cmd3.dseg_0_length = LE_32(pld_size);
2267 			}
2268 		} else {
2269 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2270 			pkt->cmd.entry_count = 1;
2271 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2272 				pkt->cmd.target_l = LSB(tq->loop_id);
2273 				pkt->cmd.target_h = MSB(tq->loop_id);
2274 			} else {
2275 				pkt->cmd.target_h = LSB(tq->loop_id);
2276 			}
2277 			pkt->cmd.lun_l = LSB(scsi_req.lun);
2278 			pkt->cmd.lun_h = MSB(scsi_req.lun);
2279 			pkt->cmd.control_flags_l = scsi_req.direction;
2280 			pkt->cmd.timeout = LE_16(15);
2281 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2282 				pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2283 			}
2284 			if (pld_size) {
2285 				pkt->cmd.dseg_count = LE_16(1);
2286 				pkt->cmd.byte_count = LE_32(pld_size);
2287 				pkt->cmd.dseg_0_address = (uint32_t)
2288 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2289 				pkt->cmd.dseg_0_length = LE_32(pld_size);
2290 			}
2291 		}
2292 		/* Go issue command and wait for completion. */
2293 		QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2294 		QL_DUMP_9(pkt, 8, pkt_size);
2295 
2296 		status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2297 
2298 		if (pld_size) {
2299 			/* Sync in coming DMA buffer. */
2300 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2301 			    dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2302 			/* Copy in coming DMA data. */
2303 			ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2304 			    (uint8_t *)dma_mem->bp, pld_size,
2305 			    DDI_DEV_AUTOINCR);
2306 		}
2307 
2308 		if (CFG_IST(ha, CFG_CTRL_2425)) {
2309 			pkt->sts24.entry_status = (uint8_t)
2310 			    (pkt->sts24.entry_status & 0x3c);
2311 		} else {
2312 			pkt->sts.entry_status = (uint8_t)
2313 			    (pkt->sts.entry_status & 0x7e);
2314 		}
2315 
2316 		if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2317 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2318 			    pkt->sts.entry_status, tq->d_id.b24);
2319 			status = QL_FUNCTION_PARAMETER_ERROR;
2320 		}
2321 
2322 		sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_2425) ?
2323 		    LE_16(pkt->sts24.comp_status) :
2324 		    LE_16(pkt->sts.comp_status));
2325 
2326 		/*
2327 		 * We have verified about all the request that can be so far.
2328 		 * Now we need to start verification of our ability to
2329 		 * actually issue the CDB.
2330 		 */
2331 		if (DRIVER_SUSPENDED(ha)) {
2332 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2333 			break;
2334 		} else if (status == QL_SUCCESS &&
2335 		    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2336 		    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2337 			EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2338 			if (tq->flags & TQF_FABRIC_DEVICE) {
2339 				rval = ql_login_fport(ha, tq, tq->loop_id,
2340 				    LFF_NO_PLOGI, &mr);
2341 				if (rval != QL_SUCCESS) {
2342 					EL(ha, "failed, login_fport=%xh, "
2343 					    "d_id=%xh\n", rval, tq->d_id.b24);
2344 				}
2345 			} else {
2346 				rval = ql_login_lport(ha, tq, tq->loop_id,
2347 				    LLF_NONE);
2348 				if (rval != QL_SUCCESS) {
2349 					EL(ha, "failed, login_lport=%xh, "
2350 					    "d_id=%xh\n", rval, tq->d_id.b24);
2351 				}
2352 			}
2353 		} else {
2354 			break;
2355 		}
2356 
2357 		bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2358 
2359 	} while (retries--);
2360 
2361 	if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2362 		/* Cannot issue command now, maybe later */
2363 		EL(ha, "failed, suspended\n");
2364 		kmem_free(pkt, pkt_size);
2365 		ql_free_dma_resource(ha, dma_mem);
2366 		kmem_free(dma_mem, sizeof (dma_mem_t));
2367 		cmd->Status = EXT_STATUS_SUSPENDED;
2368 		cmd->ResponseLen = 0;
2369 		return;
2370 	}
2371 
2372 	if (status != QL_SUCCESS) {
2373 		/* Command error */
2374 		EL(ha, "failed, I/O\n");
2375 		kmem_free(pkt, pkt_size);
2376 		ql_free_dma_resource(ha, dma_mem);
2377 		kmem_free(dma_mem, sizeof (dma_mem_t));
2378 		cmd->Status = EXT_STATUS_ERR;
2379 		cmd->DetailStatus = status;
2380 		cmd->ResponseLen = 0;
2381 		return;
2382 	}
2383 
2384 	/* Setup status. */
2385 	if (CFG_IST(ha, CFG_CTRL_2425)) {
2386 		sts.scsi_status_l = pkt->sts24.scsi_status_l;
2387 		sts.scsi_status_h = pkt->sts24.scsi_status_h;
2388 
2389 		/* Setup residuals. */
2390 		sts.residual_length = LE_32(pkt->sts24.residual_length);
2391 
2392 		/* Setup state flags. */
2393 		sts.state_flags_l = pkt->sts24.state_flags_l;
2394 		sts.state_flags_h = pkt->sts24.state_flags_h;
2395 		if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2396 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2397 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2398 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2399 		} else {
2400 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2401 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2402 			    SF_GOT_STATUS);
2403 		}
2404 		if (scsi_req.direction & CF_WR) {
2405 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2406 			    SF_DATA_OUT);
2407 		} else if (scsi_req.direction & CF_RD) {
2408 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2409 			    SF_DATA_IN);
2410 		}
2411 		sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2412 
2413 		/* Setup FCP response info. */
2414 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2415 		    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2416 		sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2417 		for (cnt = 0; cnt < sts.rsp_info_length;
2418 		    cnt = (uint16_t)(cnt + 4)) {
2419 			ql_chg_endian(sts.rsp_info + cnt, 4);
2420 		}
2421 
2422 		/* Setup sense data. */
2423 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2424 			sts.req_sense_length =
2425 			    LE_32(pkt->sts24.fcp_sense_length);
2426 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2427 			    SF_ARQ_DONE);
2428 		} else {
2429 			sts.req_sense_length = 0;
2430 		}
2431 		sts.req_sense_data =
2432 		    &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2433 		cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2434 		    (uintptr_t)sts.req_sense_data);
2435 		for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2436 			ql_chg_endian(sts.req_sense_data + cnt, 4);
2437 		}
2438 	} else {
2439 		sts.scsi_status_l = pkt->sts.scsi_status_l;
2440 		sts.scsi_status_h = pkt->sts.scsi_status_h;
2441 
2442 		/* Setup residuals. */
2443 		sts.residual_length = LE_32(pkt->sts.residual_length);
2444 
2445 		/* Setup state flags. */
2446 		sts.state_flags_l = pkt->sts.state_flags_l;
2447 		sts.state_flags_h = pkt->sts.state_flags_h;
2448 
2449 		/* Setup FCP response info. */
2450 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2451 		    LE_16(pkt->sts.rsp_info_length) : 0;
2452 		sts.rsp_info = &pkt->sts.rsp_info[0];
2453 
2454 		/* Setup sense data. */
2455 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2456 		    LE_16(pkt->sts.req_sense_length) : 0;
2457 		sts.req_sense_data = &pkt->sts.req_sense_data[0];
2458 	}
2459 
2460 	QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2461 	QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2462 
2463 	switch (sts.comp_status) {
2464 	case CS_INCOMPLETE:
2465 	case CS_ABORTED:
2466 	case CS_DEVICE_UNAVAILABLE:
2467 	case CS_PORT_UNAVAILABLE:
2468 	case CS_PORT_LOGGED_OUT:
2469 	case CS_PORT_CONFIG_CHG:
2470 	case CS_PORT_BUSY:
2471 	case CS_LOOP_DOWN_ABORT:
2472 		cmd->Status = EXT_STATUS_BUSY;
2473 		break;
2474 	case CS_RESET:
2475 	case CS_QUEUE_FULL:
2476 		cmd->Status = EXT_STATUS_ERR;
2477 		break;
2478 	case CS_TIMEOUT:
2479 		cmd->Status = EXT_STATUS_ERR;
2480 		break;
2481 	case CS_DATA_OVERRUN:
2482 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
2483 		break;
2484 	case CS_DATA_UNDERRUN:
2485 		cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2486 		break;
2487 	}
2488 
2489 	/*
2490 	 * If non data transfer commands fix tranfer counts.
2491 	 */
2492 	if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2493 	    scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2494 	    scsi_req.cdbp[0] == SCMD_SEEK ||
2495 	    scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2496 	    scsi_req.cdbp[0] == SCMD_RESERVE ||
2497 	    scsi_req.cdbp[0] == SCMD_RELEASE ||
2498 	    scsi_req.cdbp[0] == SCMD_START_STOP ||
2499 	    scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2500 	    scsi_req.cdbp[0] == SCMD_VERIFY ||
2501 	    scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2502 	    scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2503 	    scsi_req.cdbp[0] == SCMD_SPACE ||
2504 	    scsi_req.cdbp[0] == SCMD_ERASE ||
2505 	    (scsi_req.cdbp[0] == SCMD_FORMAT &&
2506 	    (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2507 		/*
2508 		 * Non data transfer command, clear sts_entry residual
2509 		 * length.
2510 		 */
2511 		sts.residual_length = 0;
2512 		cmd->ResponseLen = 0;
2513 		if (sts.comp_status == CS_DATA_UNDERRUN) {
2514 			sts.comp_status = CS_COMPLETE;
2515 			cmd->Status = EXT_STATUS_OK;
2516 		}
2517 	} else {
2518 		cmd->ResponseLen = pld_size;
2519 	}
2520 
2521 	/* Correct ISP completion status */
2522 	if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2523 	    (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2524 		QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2525 		    ha->instance);
2526 		scsi_req.resid = 0;
2527 	} else if (sts.comp_status == CS_DATA_UNDERRUN) {
2528 		QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2529 		    ha->instance);
2530 		scsi_req.resid = sts.residual_length;
2531 		if (sts.scsi_status_h & FCP_RESID_UNDER) {
2532 			cmd->Status = (uint32_t)EXT_STATUS_OK;
2533 
2534 			cmd->ResponseLen = (uint32_t)
2535 			    (pld_size - scsi_req.resid);
2536 		} else {
2537 			EL(ha, "failed, Transfer ERROR\n");
2538 			cmd->Status = EXT_STATUS_ERR;
2539 			cmd->ResponseLen = 0;
2540 		}
2541 	} else {
2542 		QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2543 		    "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2544 		    tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2545 		    sts.scsi_status_l);
2546 
2547 		scsi_req.resid = pld_size;
2548 		/*
2549 		 * Handle residual count on SCSI check
2550 		 * condition.
2551 		 *
2552 		 * - If Residual Under / Over is set, use the
2553 		 *   Residual Transfer Length field in IOCB.
2554 		 * - If Residual Under / Over is not set, and
2555 		 *   Transferred Data bit is set in State Flags
2556 		 *   field of IOCB, report residual value of 0
2557 		 *   (you may want to do this for tape
2558 		 *   Write-type commands only). This takes care
2559 		 *   of logical end of tape problem and does
2560 		 *   not break Unit Attention.
2561 		 * - If Residual Under / Over is not set, and
2562 		 *   Transferred Data bit is not set in State
2563 		 *   Flags, report residual value equal to
2564 		 *   original data transfer length.
2565 		 */
2566 		if (sts.scsi_status_l & STATUS_CHECK) {
2567 			cmd->Status = EXT_STATUS_SCSI_STATUS;
2568 			cmd->DetailStatus = sts.scsi_status_l;
2569 			if (sts.scsi_status_h &
2570 			    (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2571 				scsi_req.resid = sts.residual_length;
2572 			} else if (sts.state_flags_h &
2573 			    STATE_XFERRED_DATA) {
2574 				scsi_req.resid = 0;
2575 			}
2576 		}
2577 	}
2578 
2579 	if (sts.scsi_status_l & STATUS_CHECK &&
2580 	    sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2581 	    sts.req_sense_length) {
2582 		/*
2583 		 * Check condition with vaild sense data flag set and sense
2584 		 * length != 0
2585 		 */
2586 		if (sts.req_sense_length > scsi_req.sense_length) {
2587 			sense_sz = scsi_req.sense_length;
2588 		} else {
2589 			sense_sz = sts.req_sense_length;
2590 		}
2591 
2592 		EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2593 		    tq->d_id.b24);
2594 		QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2595 
2596 		if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2597 		    (size_t)sense_sz, mode) != 0) {
2598 			EL(ha, "failed, request sense ddi_copyout\n");
2599 		}
2600 
2601 		cmd->Status = EXT_STATUS_SCSI_STATUS;
2602 		cmd->DetailStatus = sts.scsi_status_l;
2603 	}
2604 
2605 	/* Copy response payload from DMA buffer to application. */
2606 	if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2607 	    cmd->ResponseLen != 0) {
2608 		QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2609 		    "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2610 		    scsi_req.resid, pld_size, cmd->ResponseLen);
2611 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
2612 
2613 		/* Send response payload. */
2614 		if (ql_send_buffer_data(pld,
2615 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2616 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
2617 			EL(ha, "failed, send_buffer_data\n");
2618 			cmd->Status = EXT_STATUS_COPY_ERR;
2619 			cmd->ResponseLen = 0;
2620 		}
2621 	}
2622 
2623 	if (cmd->Status != EXT_STATUS_OK) {
2624 		EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2625 		    "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2626 	} else {
2627 		/*EMPTY*/
2628 		QL_PRINT_9(CE_CONT, "(%d): exiting, ResponseLen=%d\n",
2629 		    ha->instance, cmd->ResponseLen);
2630 	}
2631 
2632 	kmem_free(pkt, pkt_size);
2633 	ql_free_dma_resource(ha, dma_mem);
2634 	kmem_free(dma_mem, sizeof (dma_mem_t));
2635 }
2636 
2637 /*
2638  * ql_wwpn_to_scsiaddr
2639  *
2640  * Input:
2641  *	ha:	adapter state pointer.
2642  *	cmd:	EXT_IOCTL cmd struct pointer.
2643  *	mode:	flags.
2644  *
2645  * Context:
2646  *	Kernel context.
2647  */
2648 static void
2649 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2650 {
2651 	int		status;
2652 	uint8_t		wwpn[EXT_DEF_WWN_NAME_SIZE];
2653 	EXT_SCSI_ADDR	*tmp_addr;
2654 	ql_tgt_t	*tq;
2655 
2656 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2657 
2658 	if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2659 		/* Return error */
2660 		EL(ha, "incorrect RequestLen\n");
2661 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2662 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2663 		return;
2664 	}
2665 
2666 	status = ddi_copyin((void*)(uintptr_t)(cmd->RequestAdr), wwpn,
2667 	    cmd->RequestLen, mode);
2668 
2669 	if (status != 0) {
2670 		cmd->Status = EXT_STATUS_COPY_ERR;
2671 		EL(ha, "failed, ddi_copyin\n");
2672 		return;
2673 	}
2674 
2675 	tq = ql_find_port(ha, wwpn, QLNT_PORT);
2676 
2677 	if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2678 		/* no matching device */
2679 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2680 		EL(ha, "failed, device not found\n");
2681 		return;
2682 	}
2683 
2684 	/* Copy out the IDs found.  For now we can only return target ID. */
2685 	tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2686 
2687 	status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2688 
2689 	if (status != 0) {
2690 		cmd->Status = EXT_STATUS_COPY_ERR;
2691 		EL(ha, "failed, ddi_copyout\n");
2692 	} else {
2693 		cmd->Status = EXT_STATUS_OK;
2694 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2695 	}
2696 }
2697 
2698 /*
2699  * ql_host_idx
2700  *	Gets host order index.
2701  *
2702  * Input:
2703  *	ha:	adapter state pointer.
2704  *	cmd:	EXT_IOCTL cmd struct pointer.
2705  *	mode:	flags.
2706  *
2707  * Returns:
2708  *	None, request status indicated in cmd->Status.
2709  *
2710  * Context:
2711  *	Kernel context.
2712  */
2713 static void
2714 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2715 {
2716 	uint16_t	idx;
2717 
2718 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2719 
2720 	if (cmd->ResponseLen < sizeof (uint16_t)) {
2721 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2722 		cmd->DetailStatus = sizeof (uint16_t);
2723 		EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2724 		cmd->ResponseLen = 0;
2725 		return;
2726 	}
2727 
2728 	idx = (uint16_t)ha->instance;
2729 
2730 	if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2731 	    sizeof (uint16_t), mode) != 0) {
2732 		cmd->Status = EXT_STATUS_COPY_ERR;
2733 		cmd->ResponseLen = 0;
2734 		EL(ha, "failed, ddi_copyout\n");
2735 	} else {
2736 		cmd->ResponseLen = sizeof (uint16_t);
2737 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2738 	}
2739 }
2740 
2741 /*
2742  * ql_host_drvname
2743  *	Gets host driver name
2744  *
2745  * Input:
2746  *	ha:	adapter state pointer.
2747  *	cmd:	EXT_IOCTL cmd struct pointer.
2748  *	mode:	flags.
2749  *
2750  * Returns:
2751  *	None, request status indicated in cmd->Status.
2752  *
2753  * Context:
2754  *	Kernel context.
2755  */
2756 static void
2757 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2758 {
2759 
2760 	char		drvname[] = QL_NAME;
2761 	uint32_t	qlnamelen;
2762 
2763 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2764 
2765 	qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2766 
2767 	if (cmd->ResponseLen < qlnamelen) {
2768 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2769 		cmd->DetailStatus = qlnamelen;
2770 		EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2771 		    cmd->ResponseLen, qlnamelen);
2772 		cmd->ResponseLen = 0;
2773 		return;
2774 	}
2775 
2776 	if (ddi_copyout((void *)&drvname, (void *)(uintptr_t)(cmd->ResponseAdr),
2777 	    qlnamelen, mode) != 0) {
2778 		cmd->Status = EXT_STATUS_COPY_ERR;
2779 		cmd->ResponseLen = 0;
2780 		EL(ha, "failed, ddi_copyout\n");
2781 	} else {
2782 		cmd->ResponseLen = qlnamelen-1;
2783 	}
2784 
2785 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2786 }
2787 
2788 /*
2789  * ql_read_nvram
2790  *	Get NVRAM contents.
2791  *
2792  * Input:
2793  *	ha:	adapter state pointer.
2794  *	cmd:	EXT_IOCTL cmd struct pointer.
2795  *	mode:	flags.
2796  *
2797  * Returns:
2798  *	None, request status indicated in cmd->Status.
2799  *
2800  * Context:
2801  *	Kernel context.
2802  */
2803 static void
2804 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2805 {
2806 	uint32_t	nv_size;
2807 
2808 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2809 
2810 	nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_2425) ?
2811 	    sizeof (nvram_24xx_t) : sizeof (nvram_t));
2812 	if (cmd->ResponseLen < nv_size) {
2813 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2814 		cmd->DetailStatus = nv_size;
2815 		EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2816 		    cmd->ResponseLen);
2817 		cmd->ResponseLen = 0;
2818 		return;
2819 	}
2820 
2821 	/* Get NVRAM data. */
2822 	if (ql_nv_util_dump(ha,
2823 	    (void *)(uintptr_t)(cmd->ResponseAdr), mode) != 0) {
2824 		cmd->Status = EXT_STATUS_COPY_ERR;
2825 		cmd->ResponseLen = 0;
2826 		EL(ha, "failed, copy error\n");
2827 	} else {
2828 		cmd->ResponseLen = nv_size;
2829 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2830 	}
2831 }
2832 
2833 /*
2834  * ql_write_nvram
2835  *	Loads NVRAM contents.
2836  *
2837  * Input:
2838  *	ha:	adapter state pointer.
2839  *	cmd:	EXT_IOCTL cmd struct pointer.
2840  *	mode:	flags.
2841  *
2842  * Returns:
2843  *	None, request status indicated in cmd->Status.
2844  *
2845  * Context:
2846  *	Kernel context.
2847  */
2848 static void
2849 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2850 {
2851 	uint32_t	nv_size;
2852 
2853 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2854 
2855 	nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_2425) ?
2856 	    sizeof (nvram_24xx_t) : sizeof (nvram_t));
2857 	if (cmd->RequestLen < nv_size) {
2858 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2859 		cmd->DetailStatus = sizeof (nvram_t);
2860 		EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2861 		    cmd->RequestLen);
2862 		return;
2863 	}
2864 
2865 	/* Load NVRAM data. */
2866 	if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2867 	    mode) != 0) {
2868 		cmd->Status = EXT_STATUS_COPY_ERR;
2869 		EL(ha, "failed, copy error\n");
2870 	} else {
2871 		/*EMPTY*/
2872 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2873 	}
2874 }
2875 
2876 /*
2877  * ql_write_vpd
2878  *	Loads VPD contents.
2879  *
2880  * Input:
2881  *	ha:	adapter state pointer.
2882  *	cmd:	EXT_IOCTL cmd struct pointer.
2883  *	mode:	flags.
2884  *
2885  * Returns:
2886  *	None, request status indicated in cmd->Status.
2887  *
2888  * Context:
2889  *	Kernel context.
2890  */
2891 static void
2892 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2893 {
2894 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2895 
2896 	int32_t		rval = 0;
2897 
2898 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
2899 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2900 		EL(ha, "failed, invalid request for HBA\n");
2901 		return;
2902 	}
2903 
2904 	if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2905 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2906 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2907 		EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2908 		    cmd->RequestLen);
2909 		return;
2910 	}
2911 
2912 	/* Load VPD data. */
2913 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2914 	    mode)) != 0) {
2915 		cmd->Status = EXT_STATUS_COPY_ERR;
2916 		cmd->DetailStatus = rval;
2917 		EL(ha, "failed, errno=%x\n", rval);
2918 	} else {
2919 		/*EMPTY*/
2920 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2921 	}
2922 }
2923 
2924 /*
2925  * ql_read_vpd
2926  *	Dumps VPD contents.
2927  *
2928  * Input:
2929  *	ha:	adapter state pointer.
2930  *	cmd:	EXT_IOCTL cmd struct pointer.
2931  *	mode:	flags.
2932  *
2933  * Returns:
2934  *	None, request status indicated in cmd->Status.
2935  *
2936  * Context:
2937  *	Kernel context.
2938  */
2939 static void
2940 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2941 {
2942 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2943 
2944 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
2945 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2946 		EL(ha, "failed, invalid request for HBA\n");
2947 		return;
2948 	}
2949 
2950 	if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2951 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2952 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2953 		EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2954 		    cmd->ResponseLen);
2955 		return;
2956 	}
2957 
2958 	/* Dump VPD data. */
2959 	if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2960 	    mode)) != 0) {
2961 		cmd->Status = EXT_STATUS_COPY_ERR;
2962 		EL(ha, "failed,\n");
2963 	} else {
2964 		/*EMPTY*/
2965 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
2966 	}
2967 }
2968 
2969 /*
2970  * ql_get_fcache
2971  *	Dumps flash cache contents.
2972  *
2973  * Input:
2974  *	ha:	adapter state pointer.
2975  *	cmd:	EXT_IOCTL cmd struct pointer.
2976  *	mode:	flags.
2977  *
2978  * Returns:
2979  *	None, request status indicated in cmd->Status.
2980  *
2981  * Context:
2982  *	Kernel context.
2983  */
2984 static void
2985 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2986 {
2987 	uint32_t	bsize, boff, types, cpsize, hsize;
2988 	ql_fcache_t	*fptr;
2989 
2990 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
2991 
2992 	CACHE_LOCK(ha);
2993 
2994 	if (ha->fcache == NULL) {
2995 		CACHE_UNLOCK(ha);
2996 		cmd->Status = EXT_STATUS_ERR;
2997 		EL(ha, "failed, adapter fcache not setup\n");
2998 		return;
2999 	}
3000 
3001 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
3002 		bsize = 100;
3003 	} else {
3004 		bsize = 400;
3005 	}
3006 
3007 	if (cmd->ResponseLen < bsize) {
3008 		CACHE_UNLOCK(ha);
3009 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3010 		cmd->DetailStatus = bsize;
3011 		EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3012 		    bsize, cmd->ResponseLen);
3013 		return;
3014 	}
3015 
3016 	boff = 0;
3017 	bsize = 0;
3018 	fptr = ha->fcache;
3019 
3020 	/*
3021 	 * For backwards compatibility, get one of each image type
3022 	 */
3023 	types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3024 	while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3025 		/* Get the next image */
3026 		if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3027 
3028 			cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3029 
3030 			if (ddi_copyout(fptr->buf,
3031 			    (void *)(uintptr_t)(cmd->ResponseAdr +
3032 			    boff), cpsize, mode) != 0) {
3033 				CACHE_UNLOCK(ha);
3034 				EL(ha, "ddicopy failed, exiting\n");
3035 				cmd->Status = EXT_STATUS_COPY_ERR;
3036 				cmd->DetailStatus = 0;
3037 				return;
3038 			}
3039 			boff += 100;
3040 			bsize += cpsize;
3041 			types &= ~(fptr->type);
3042 		}
3043 	}
3044 
3045 	/*
3046 	 * Get the firmware image -- it needs to be last in the
3047 	 * buffer at offset 300 for backwards compatibility. Also for
3048 	 * backwards compatibility, the pci header is stripped off.
3049 	 */
3050 	if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3051 
3052 		hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3053 		if (hsize > fptr->buflen) {
3054 			CACHE_UNLOCK(ha);
3055 			EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3056 			    hsize, fptr->buflen);
3057 			cmd->Status = EXT_STATUS_COPY_ERR;
3058 			cmd->DetailStatus = 0;
3059 			return;
3060 		}
3061 
3062 		cpsize = ((fptr->buflen - hsize) < 100 ?
3063 		    fptr->buflen - hsize : 100);
3064 
3065 		if (ddi_copyout(fptr->buf+hsize,
3066 		    (void *)(uintptr_t)(cmd->ResponseAdr +
3067 		    300), cpsize, mode) != 0) {
3068 			CACHE_UNLOCK(ha);
3069 			EL(ha, "fw ddicopy failed, exiting\n");
3070 			cmd->Status = EXT_STATUS_COPY_ERR;
3071 			cmd->DetailStatus = 0;
3072 			return;
3073 		}
3074 		bsize += 100;
3075 	}
3076 
3077 	CACHE_UNLOCK(ha);
3078 	cmd->Status = EXT_STATUS_OK;
3079 	cmd->DetailStatus = bsize;
3080 
3081 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3082 }
3083 
3084 /*
3085  * ql_get_fcache_ex
3086  *	Dumps flash cache contents.
3087  *
3088  * Input:
3089  *	ha:	adapter state pointer.
3090  *	cmd:	EXT_IOCTL cmd struct pointer.
3091  *	mode:	flags.
3092  *
3093  * Returns:
3094  *	None, request status indicated in cmd->Status.
3095  *
3096  * Context:
3097  *	Kernel context.
3098  */
3099 static void
3100 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3101 {
3102 	uint32_t	bsize = 0;
3103 	uint32_t	boff = 0;
3104 	ql_fcache_t	*fptr;
3105 
3106 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3107 
3108 	CACHE_LOCK(ha);
3109 	if (ha->fcache == NULL) {
3110 		CACHE_UNLOCK(ha);
3111 		cmd->Status = EXT_STATUS_ERR;
3112 		EL(ha, "failed, adapter fcache not setup\n");
3113 		return;
3114 	}
3115 
3116 	/* Make sure user passed enough buffer space */
3117 	for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3118 		bsize += FBUFSIZE;
3119 	}
3120 
3121 	if (cmd->ResponseLen < bsize) {
3122 		CACHE_UNLOCK(ha);
3123 		if (cmd->ResponseLen != 0) {
3124 			EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3125 			    bsize, cmd->ResponseLen);
3126 		}
3127 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3128 		cmd->DetailStatus = bsize;
3129 		return;
3130 	}
3131 
3132 	boff = 0;
3133 	fptr = ha->fcache;
3134 	while ((fptr != NULL) && (fptr->buf != NULL)) {
3135 		/* Get the next image */
3136 		if (ddi_copyout(fptr->buf,
3137 		    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3138 		    (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3139 		    mode) != 0) {
3140 			CACHE_UNLOCK(ha);
3141 			EL(ha, "failed, ddicopy at %xh, exiting\n", boff);
3142 			cmd->Status = EXT_STATUS_COPY_ERR;
3143 			cmd->DetailStatus = 0;
3144 			return;
3145 		}
3146 		boff += FBUFSIZE;
3147 		fptr = fptr->next;
3148 	}
3149 
3150 	CACHE_UNLOCK(ha);
3151 	cmd->Status = EXT_STATUS_OK;
3152 	cmd->DetailStatus = bsize;
3153 
3154 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3155 }
3156 
3157 
3158 /*
3159  * ql_read_flash
3160  *	Get flash contents.
3161  *
3162  * Input:
3163  *	ha:	adapter state pointer.
3164  *	cmd:	EXT_IOCTL cmd struct pointer.
3165  *	mode:	flags.
3166  *
3167  * Returns:
3168  *	None, request status indicated in cmd->Status.
3169  *
3170  * Context:
3171  *	Kernel context.
3172  */
3173 static void
3174 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3175 {
3176 	ql_xioctl_t	*xp = ha->xioctl;
3177 
3178 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3179 
3180 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3181 		EL(ha, "ql_stall_driver failed\n");
3182 		cmd->Status = EXT_STATUS_BUSY;
3183 		cmd->DetailStatus = xp->fdesc.flash_size;
3184 		cmd->ResponseLen = 0;
3185 		return;
3186 	}
3187 
3188 	if (ql_setup_flash(ha) != QL_SUCCESS) {
3189 		cmd->Status = EXT_STATUS_ERR;
3190 		cmd->DetailStatus = xp->fdesc.flash_size;
3191 		EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n",
3192 		    cmd->ResponseLen, xp->fdesc.flash_size);
3193 		cmd->ResponseLen = 0;
3194 	} else {
3195 		/* adjust read size to flash size */
3196 		if (cmd->ResponseLen > xp->fdesc.flash_size) {
3197 			EL(ha, "adjusting req=%xh, max=%xh\n",
3198 			    cmd->ResponseLen, xp->fdesc.flash_size);
3199 			cmd->ResponseLen = xp->fdesc.flash_size;
3200 		}
3201 
3202 		/* Get flash data. */
3203 		if (ql_flash_fcode_dump(ha,
3204 		    (void *)(uintptr_t)(cmd->ResponseAdr),
3205 		    (size_t)(cmd->ResponseLen), mode) != 0) {
3206 			cmd->Status = EXT_STATUS_COPY_ERR;
3207 			cmd->ResponseLen = 0;
3208 			EL(ha, "failed,\n");
3209 		}
3210 	}
3211 
3212 	/* Resume I/O */
3213 	if (CFG_IST(ha, CFG_CTRL_2425)) {
3214 		ql_restart_driver(ha);
3215 	} else {
3216 		EL(ha, "isp_abort_needed for restart\n");
3217 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3218 		    DRIVER_STALL);
3219 	}
3220 
3221 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3222 }
3223 
3224 /*
3225  * ql_write_flash
3226  *	Loads flash contents.
3227  *
3228  * Input:
3229  *	ha:	adapter state pointer.
3230  *	cmd:	EXT_IOCTL cmd struct pointer.
3231  *	mode:	flags.
3232  *
3233  * Returns:
3234  *	None, request status indicated in cmd->Status.
3235  *
3236  * Context:
3237  *	Kernel context.
3238  */
3239 static void
3240 ql_write_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3241 {
3242 	ql_xioctl_t	*xp = ha->xioctl;
3243 
3244 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3245 
3246 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3247 		EL(ha, "ql_stall_driver failed\n");
3248 		cmd->Status = EXT_STATUS_BUSY;
3249 		cmd->DetailStatus = xp->fdesc.flash_size;
3250 		cmd->ResponseLen = 0;
3251 		return;
3252 	}
3253 
3254 	if (ql_setup_flash(ha) != QL_SUCCESS) {
3255 		cmd->Status = EXT_STATUS_ERR;
3256 		cmd->DetailStatus = xp->fdesc.flash_size;
3257 		EL(ha, "failed, RequestLen=%xh, size=%xh\n",
3258 		    cmd->RequestLen, xp->fdesc.flash_size);
3259 		cmd->ResponseLen = 0;
3260 	} else {
3261 		/* Load flash data. */
3262 		if (cmd->RequestLen > xp->fdesc.flash_size) {
3263 			cmd->Status = EXT_STATUS_ERR;
3264 			cmd->DetailStatus =  xp->fdesc.flash_size;
3265 			EL(ha, "failed, RequestLen=%xh, flash size=%xh\n",
3266 			    cmd->RequestLen, xp->fdesc.flash_size);
3267 		} else if (ql_flash_fcode_load(ha,
3268 		    (void *)(uintptr_t)(cmd->RequestAdr),
3269 		    (size_t)(cmd->RequestLen), mode) != 0) {
3270 			cmd->Status = EXT_STATUS_COPY_ERR;
3271 			EL(ha, "failed,\n");
3272 		}
3273 	}
3274 
3275 	/* Resume I/O */
3276 	if (CFG_IST(ha, CFG_CTRL_2425)) {
3277 		ql_restart_driver(ha);
3278 	} else {
3279 		EL(ha, "isp_abort_needed for restart\n");
3280 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3281 		    DRIVER_STALL);
3282 	}
3283 
3284 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3285 }
3286 
3287 /*
3288  * ql_diagnostic_loopback
3289  *	Performs EXT_CC_LOOPBACK Command
3290  *
3291  * Input:
3292  *	ha:	adapter state pointer.
3293  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3294  *	mode:	flags.
3295  *
3296  * Returns:
3297  *	None, request status indicated in cmd->Status.
3298  *
3299  * Context:
3300  *	Kernel context.
3301  */
3302 static void
3303 ql_diagnostic_loopback(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3304 {
3305 	EXT_LOOPBACK_REQ	plbreq;
3306 	EXT_LOOPBACK_RSP	plbrsp;
3307 	ql_mbx_data_t		mr;
3308 	uint32_t		rval;
3309 	caddr_t			bp;
3310 
3311 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3312 
3313 	/* Get loop back request. */
3314 	if (ddi_copyin((const void *)(uintptr_t)cmd->RequestAdr,
3315 	    (void *)&plbreq,
3316 	    sizeof (EXT_LOOPBACK_REQ), mode) != 0) {
3317 		EL(ha, "failed, ddi_copyin\n");
3318 		cmd->Status = EXT_STATUS_COPY_ERR;
3319 		cmd->ResponseLen = 0;
3320 		return;
3321 	}
3322 
3323 	/* Check transfer length fits in buffer. */
3324 	if (plbreq.BufferLength < plbreq.TransferCount &&
3325 	    plbreq.TransferCount < MAILBOX_BUFFER_SIZE) {
3326 		EL(ha, "failed, BufferLength=%d, xfercnt=%d, "
3327 		    "mailbox_buffer_size=%d\n", plbreq.BufferLength,
3328 		    plbreq.TransferCount, MAILBOX_BUFFER_SIZE);
3329 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3330 		cmd->ResponseLen = 0;
3331 		return;
3332 	}
3333 
3334 	/* Allocate command memory. */
3335 	bp = kmem_zalloc(plbreq.TransferCount, KM_SLEEP);
3336 	if (bp == NULL) {
3337 		EL(ha, "failed, kmem_zalloc\n");
3338 		cmd->Status = EXT_STATUS_NO_MEMORY;
3339 		cmd->ResponseLen = 0;
3340 		return;
3341 	}
3342 
3343 	/* Get loopback data. */
3344 	if (ql_get_buffer_data((caddr_t)(uintptr_t)plbreq.BufferAddress,
3345 	    bp, plbreq.TransferCount, mode) != plbreq.TransferCount) {
3346 		EL(ha, "failed, ddi_copyin-2\n");
3347 		kmem_free(bp, plbreq.TransferCount);
3348 		cmd->Status = EXT_STATUS_COPY_ERR;
3349 		cmd->ResponseLen = 0;
3350 		return;
3351 	}
3352 
3353 	if (DRIVER_SUSPENDED(ha) || ql_stall_driver(ha, 0) != QL_SUCCESS) {
3354 		EL(ha, "failed, LOOP_NOT_READY\n");
3355 		kmem_free(bp, plbreq.TransferCount);
3356 		cmd->Status = EXT_STATUS_BUSY;
3357 		cmd->ResponseLen = 0;
3358 		return;
3359 	}
3360 
3361 	/* Shutdown IP. */
3362 	if (ha->flags & IP_INITIALIZED) {
3363 		(void) ql_shutdown_ip(ha);
3364 	}
3365 
3366 	/* determine topology so we can send the loopback or the echo */
3367 	/* Echo is supported on 2300's only and above */
3368 
3369 	if ((ha->topology & QL_F_PORT) && ha->device_id >= 0x2300) {
3370 		QL_PRINT_9(CE_CONT, "(%d): F_PORT topology -- using echo\n",
3371 		    ha->instance);
3372 		plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3373 		rval = ql_diag_echo(ha, bp, plbreq.TransferCount, 0, &mr);
3374 	} else {
3375 		plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3376 		rval = ql_diag_loopback(ha, bp, plbreq.TransferCount,
3377 		    plbreq.Options, plbreq.IterationCount, &mr);
3378 	}
3379 
3380 	ql_restart_driver(ha);
3381 
3382 	/* Restart IP if it was shutdown. */
3383 	if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
3384 		(void) ql_initialize_ip(ha);
3385 		ql_isp_rcvbuf(ha);
3386 	}
3387 
3388 	if (rval != QL_SUCCESS) {
3389 		EL(ha, "failed, diagnostic_loopback_mbx=%xh\n", rval);
3390 		kmem_free(bp, plbreq.TransferCount);
3391 		cmd->Status = EXT_STATUS_MAILBOX;
3392 		cmd->DetailStatus = rval;
3393 		cmd->ResponseLen = 0;
3394 		return;
3395 	}
3396 
3397 	/* Return loopback data. */
3398 	if (ql_send_buffer_data(bp, (caddr_t)(uintptr_t)plbreq.BufferAddress,
3399 	    plbreq.TransferCount, mode) != plbreq.TransferCount) {
3400 		EL(ha, "failed, ddi_copyout\n");
3401 		kmem_free(bp, plbreq.TransferCount);
3402 		cmd->Status = EXT_STATUS_COPY_ERR;
3403 		cmd->ResponseLen = 0;
3404 		return;
3405 	}
3406 	kmem_free(bp, plbreq.TransferCount);
3407 
3408 	/* Return loopback results. */
3409 	plbrsp.BufferAddress = plbreq.BufferAddress;
3410 	plbrsp.BufferLength = plbreq.TransferCount;
3411 	plbrsp.CompletionStatus = mr.mb[0];
3412 
3413 	if (plbrsp.CommandSent == INT_DEF_LB_ECHO_CMD) {
3414 		plbrsp.CrcErrorCount = 0;
3415 		plbrsp.DisparityErrorCount = 0;
3416 		plbrsp.FrameLengthErrorCount = 0;
3417 		plbrsp.IterationCountLastError = 0;
3418 	} else {
3419 		plbrsp.CrcErrorCount = mr.mb[1];
3420 		plbrsp.DisparityErrorCount = mr.mb[2];
3421 		plbrsp.FrameLengthErrorCount = mr.mb[3];
3422 		plbrsp.IterationCountLastError = (mr.mb[19] >> 16) | mr.mb[18];
3423 	}
3424 
3425 	rval = ddi_copyout((void *)(uintptr_t)&plbrsp,
3426 	    (void *)(uintptr_t)cmd->ResponseAdr,
3427 	    sizeof (EXT_LOOPBACK_RSP), mode);
3428 	if (rval != 0) {
3429 		EL(ha, "failed, ddi_copyout-2\n");
3430 		cmd->Status = EXT_STATUS_COPY_ERR;
3431 		cmd->ResponseLen = 0;
3432 		return;
3433 	}
3434 	cmd->ResponseLen = sizeof (EXT_LOOPBACK_RSP);
3435 
3436 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3437 }
3438 
3439 /*
3440  * ql_send_els_rnid
3441  *	IOCTL for extended link service RNID command.
3442  *
3443  * Input:
3444  *	ha:	adapter state pointer.
3445  *	cmd:	User space CT arguments pointer.
3446  *	mode:	flags.
3447  *
3448  * Returns:
3449  *	None, request status indicated in cmd->Status.
3450  *
3451  * Context:
3452  *	Kernel context.
3453  */
3454 static void
3455 ql_send_els_rnid(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3456 {
3457 	EXT_RNID_REQ	tmp_rnid;
3458 	port_id_t	tmp_fcid;
3459 	caddr_t		tmp_buf, bptr;
3460 	uint32_t	copy_len;
3461 	ql_tgt_t	*tq;
3462 	EXT_RNID_DATA	rnid_data;
3463 	uint32_t	loop_ready_wait = 10 * 60 * 10;
3464 	int		rval = 0;
3465 	uint32_t	local_hba = 0;
3466 
3467 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3468 
3469 	if (DRIVER_SUSPENDED(ha)) {
3470 		EL(ha, "failed, LOOP_NOT_READY\n");
3471 		cmd->Status = EXT_STATUS_BUSY;
3472 		cmd->ResponseLen = 0;
3473 		return;
3474 	}
3475 
3476 	if (cmd->RequestLen != sizeof (EXT_RNID_REQ)) {
3477 		/* parameter error */
3478 		EL(ha, "failed, RequestLen < EXT_RNID_REQ, Len=%xh\n",
3479 		    cmd->RequestLen);
3480 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3481 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
3482 		cmd->ResponseLen = 0;
3483 		return;
3484 	}
3485 
3486 	if (ddi_copyin((void*)(uintptr_t)(cmd->RequestAdr),
3487 	    &tmp_rnid, cmd->RequestLen,
3488 	    mode) != 0) {
3489 		EL(ha, "failed, ddi_copyin\n");
3490 		cmd->Status = EXT_STATUS_COPY_ERR;
3491 		cmd->ResponseLen = 0;
3492 		return;
3493 	}
3494 
3495 	/* Find loop ID of the device */
3496 	if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWNN) {
3497 		bptr = CFG_IST(ha, CFG_CTRL_2425) ?
3498 		    (caddr_t)&ha->init_ctrl_blk.cb24.node_name :
3499 		    (caddr_t)&ha->init_ctrl_blk.cb.node_name;
3500 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWNN,
3501 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3502 			local_hba = 1;
3503 		} else {
3504 			tq = ql_find_port(ha,
3505 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWNN, QLNT_NODE);
3506 		}
3507 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWPN) {
3508 		bptr = CFG_IST(ha, CFG_CTRL_2425) ?
3509 		    (caddr_t)&ha->init_ctrl_blk.cb24.port_name :
3510 		    (caddr_t)&ha->init_ctrl_blk.cb.port_name;
3511 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWPN,
3512 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3513 			local_hba = 1;
3514 		} else {
3515 			tq = ql_find_port(ha,
3516 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWPN, QLNT_PORT);
3517 		}
3518 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_PORTID) {
3519 		/*
3520 		 * Copy caller's d_id to tmp space.
3521 		 */
3522 		bcopy(&tmp_rnid.Addr.FcAddr.Id[1], tmp_fcid.r.d_id,
3523 		    EXT_DEF_PORTID_SIZE_ACTUAL);
3524 		BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]);
3525 
3526 		if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id,
3527 		    EXT_DEF_PORTID_SIZE_ACTUAL) == 0) {
3528 			local_hba = 1;
3529 		} else {
3530 			tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id,
3531 			    QLNT_PID);
3532 		}
3533 	}
3534 
3535 	/* Allocate memory for command. */
3536 	tmp_buf = kmem_zalloc(SEND_RNID_RSP_SIZE, KM_SLEEP);
3537 	if (tmp_buf == NULL) {
3538 		EL(ha, "failed, kmem_zalloc\n");
3539 		cmd->Status = EXT_STATUS_NO_MEMORY;
3540 		cmd->ResponseLen = 0;
3541 		return;
3542 	}
3543 
3544 	if (local_hba) {
3545 		rval = ql_get_rnid_params(ha, SEND_RNID_RSP_SIZE, tmp_buf);
3546 		if (rval != QL_SUCCESS) {
3547 			EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
3548 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3549 			cmd->Status = EXT_STATUS_ERR;
3550 			cmd->ResponseLen = 0;
3551 			return;
3552 		}
3553 
3554 		/* Save gotten RNID data. */
3555 		bcopy(tmp_buf, &rnid_data, sizeof (EXT_RNID_DATA));
3556 
3557 		/* Now build the Send RNID response */
3558 		tmp_buf[0] = (char)(EXT_DEF_RNID_DFORMAT_TOPO_DISC);
3559 		tmp_buf[1] = (2 * EXT_DEF_WWN_NAME_SIZE);
3560 		tmp_buf[2] = 0;
3561 		tmp_buf[3] = sizeof (EXT_RNID_DATA);
3562 
3563 		if (CFG_IST(ha, CFG_CTRL_2425)) {
3564 			bcopy(ha->init_ctrl_blk.cb24.port_name, &tmp_buf[4],
3565 			    EXT_DEF_WWN_NAME_SIZE);
3566 			bcopy(ha->init_ctrl_blk.cb24.node_name,
3567 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3568 			    EXT_DEF_WWN_NAME_SIZE);
3569 		} else {
3570 			bcopy(ha->init_ctrl_blk.cb.port_name, &tmp_buf[4],
3571 			    EXT_DEF_WWN_NAME_SIZE);
3572 			bcopy(ha->init_ctrl_blk.cb.node_name,
3573 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3574 			    EXT_DEF_WWN_NAME_SIZE);
3575 		}
3576 
3577 		bcopy((uint8_t *)&rnid_data,
3578 		    &tmp_buf[4 + 2 * EXT_DEF_WWN_NAME_SIZE],
3579 		    sizeof (EXT_RNID_DATA));
3580 	} else {
3581 		if (tq == NULL) {
3582 			/* no matching device */
3583 			EL(ha, "failed, device not found\n");
3584 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3585 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
3586 			cmd->DetailStatus = EXT_DSTATUS_TARGET;
3587 			cmd->ResponseLen = 0;
3588 			return;
3589 		}
3590 
3591 		/* Send command */
3592 		rval = ql_send_rnid_els(ha, tq->loop_id,
3593 		    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, tmp_buf);
3594 		if (rval != QL_SUCCESS) {
3595 			EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3596 			    rval, tq->loop_id);
3597 			while (LOOP_NOT_READY(ha)) {
3598 				ql_delay(ha, 100000);
3599 				if (loop_ready_wait-- == 0) {
3600 					EL(ha, "failed, loop not ready\n");
3601 					cmd->Status = EXT_STATUS_ERR;
3602 					cmd->ResponseLen = 0;
3603 				}
3604 			}
3605 			rval = ql_send_rnid_els(ha, tq->loop_id,
3606 			    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE,
3607 			    tmp_buf);
3608 			if (rval != QL_SUCCESS) {
3609 				/* error */
3610 				EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3611 				    rval, tq->loop_id);
3612 				kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3613 				cmd->Status = EXT_STATUS_ERR;
3614 				cmd->ResponseLen = 0;
3615 				return;
3616 			}
3617 		}
3618 	}
3619 
3620 	/* Copy the response */
3621 	copy_len = (cmd->ResponseLen > SEND_RNID_RSP_SIZE) ?
3622 	    SEND_RNID_RSP_SIZE : cmd->ResponseLen;
3623 
3624 	if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)cmd->ResponseAdr,
3625 	    copy_len, mode) != copy_len) {
3626 		cmd->Status = EXT_STATUS_COPY_ERR;
3627 		EL(ha, "failed, ddi_copyout\n");
3628 	} else {
3629 		cmd->ResponseLen = copy_len;
3630 		if (copy_len < SEND_RNID_RSP_SIZE) {
3631 			cmd->Status = EXT_STATUS_DATA_OVERRUN;
3632 			EL(ha, "failed, EXT_STATUS_DATA_OVERRUN\n");
3633 
3634 		} else if (cmd->ResponseLen > SEND_RNID_RSP_SIZE) {
3635 			cmd->Status = EXT_STATUS_DATA_UNDERRUN;
3636 			EL(ha, "failed, EXT_STATUS_DATA_UNDERRUN\n");
3637 		} else {
3638 			cmd->Status = EXT_STATUS_OK;
3639 			QL_PRINT_9(CE_CONT, "(%d): exiting\n",
3640 			    ha->instance);
3641 		}
3642 	}
3643 
3644 	kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3645 }
3646 
3647 /*
3648  * ql_set_host_data
3649  *	Process IOCTL subcommand to set host/adapter related data.
3650  *
3651  * Input:
3652  *	ha:	adapter state pointer.
3653  *	cmd:	User space CT arguments pointer.
3654  *	mode:	flags.
3655  *
3656  * Returns:
3657  *	None, request status indicated in cmd->Status.
3658  *
3659  * Context:
3660  *	Kernel context.
3661  */
3662 static void
3663 ql_set_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3664 {
3665 	QL_PRINT_9(CE_CONT, "(%d): entered, SubCode=%d\n", ha->instance,
3666 	    cmd->SubCode);
3667 
3668 	/*
3669 	 * case off on command subcode
3670 	 */
3671 	switch (cmd->SubCode) {
3672 	case EXT_SC_SET_RNID:
3673 		ql_set_rnid_parameters(ha, cmd, mode);
3674 		break;
3675 	case EXT_SC_RST_STATISTICS:
3676 		(void) ql_reset_statistics(ha, cmd);
3677 		break;
3678 	case EXT_SC_SET_BEACON_STATE:
3679 		ql_set_led_state(ha, cmd, mode);
3680 		break;
3681 	case EXT_SC_SET_PARMS:
3682 	case EXT_SC_SET_BUS_MODE:
3683 	case EXT_SC_SET_DR_DUMP_BUF:
3684 	case EXT_SC_SET_RISC_CODE:
3685 	case EXT_SC_SET_FLASH_RAM:
3686 	case EXT_SC_SET_LUN_BITMASK:
3687 	case EXT_SC_SET_RETRY_CNT:
3688 	case EXT_SC_SET_RTIN:
3689 	case EXT_SC_SET_FC_LUN_BITMASK:
3690 	case EXT_SC_ADD_TARGET_DEVICE:
3691 	case EXT_SC_SWAP_TARGET_DEVICE:
3692 	case EXT_SC_SET_SEL_TIMEOUT:
3693 	default:
3694 		/* function not supported. */
3695 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3696 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3697 		break;
3698 	}
3699 
3700 	if (cmd->Status != EXT_STATUS_OK) {
3701 		EL(ha, "failed, Status=%d\n", cmd->Status);
3702 	} else {
3703 		/*EMPTY*/
3704 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3705 	}
3706 }
3707 
3708 /*
3709  * ql_get_host_data
3710  *	Performs EXT_CC_GET_DATA subcommands.
3711  *
3712  * Input:
3713  *	ha:	adapter state pointer.
3714  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3715  *	mode:	flags.
3716  *
3717  * Returns:
3718  *	None, request status indicated in cmd->Status.
3719  *
3720  * Context:
3721  *	Kernel context.
3722  */
3723 static void
3724 ql_get_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3725 {
3726 	int	out_size = 0;
3727 
3728 	QL_PRINT_9(CE_CONT, "(%d): entered, SubCode=%d\n", ha->instance,
3729 	    cmd->SubCode);
3730 
3731 	/* case off on command subcode */
3732 	switch (cmd->SubCode) {
3733 	case EXT_SC_GET_STATISTICS:
3734 		out_size = sizeof (EXT_HBA_PORT_STAT);
3735 		break;
3736 	case EXT_SC_GET_FC_STATISTICS:
3737 		out_size = sizeof (EXT_HBA_PORT_STAT);
3738 		break;
3739 	case EXT_SC_GET_PORT_SUMMARY:
3740 		out_size = sizeof (EXT_DEVICEDATA);
3741 		break;
3742 	case EXT_SC_GET_RNID:
3743 		out_size = sizeof (EXT_RNID_DATA);
3744 		break;
3745 	case EXT_SC_GET_TARGET_ID:
3746 		out_size = sizeof (EXT_DEST_ADDR);
3747 		break;
3748 	case EXT_SC_GET_BEACON_STATE:
3749 		out_size = sizeof (EXT_BEACON_CONTROL);
3750 		break;
3751 	case EXT_SC_GET_FC4_STATISTICS:
3752 		out_size = sizeof (EXT_HBA_FC4STATISTICS);
3753 		break;
3754 	case EXT_SC_GET_SCSI_ADDR:
3755 	case EXT_SC_GET_ERR_DETECTIONS:
3756 	case EXT_SC_GET_BUS_MODE:
3757 	case EXT_SC_GET_DR_DUMP_BUF:
3758 	case EXT_SC_GET_RISC_CODE:
3759 	case EXT_SC_GET_FLASH_RAM:
3760 	case EXT_SC_GET_LINK_STATUS:
3761 	case EXT_SC_GET_LOOP_ID:
3762 	case EXT_SC_GET_LUN_BITMASK:
3763 	case EXT_SC_GET_PORT_DATABASE:
3764 	case EXT_SC_GET_PORT_DATABASE_MEM:
3765 	case EXT_SC_GET_POSITION_MAP:
3766 	case EXT_SC_GET_RETRY_CNT:
3767 	case EXT_SC_GET_RTIN:
3768 	case EXT_SC_GET_FC_LUN_BITMASK:
3769 	case EXT_SC_GET_SEL_TIMEOUT:
3770 	default:
3771 		/* function not supported. */
3772 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3773 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3774 		cmd->ResponseLen = 0;
3775 		return;
3776 	}
3777 
3778 	if (cmd->ResponseLen < out_size) {
3779 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3780 		cmd->DetailStatus = out_size;
3781 		EL(ha, "failed, ResponseLen=%xh, size=%xh\n",
3782 		    cmd->ResponseLen, out_size);
3783 		cmd->ResponseLen = 0;
3784 		return;
3785 	}
3786 
3787 	switch (cmd->SubCode) {
3788 	case EXT_SC_GET_RNID:
3789 		ql_get_rnid_parameters(ha, cmd, mode);
3790 		break;
3791 	case EXT_SC_GET_STATISTICS:
3792 		ql_get_statistics(ha, cmd, mode);
3793 		break;
3794 	case EXT_SC_GET_FC_STATISTICS:
3795 		ql_get_statistics_fc(ha, cmd, mode);
3796 		break;
3797 	case EXT_SC_GET_FC4_STATISTICS:
3798 		ql_get_statistics_fc4(ha, cmd, mode);
3799 		break;
3800 	case EXT_SC_GET_PORT_SUMMARY:
3801 		ql_get_port_summary(ha, cmd, mode);
3802 		break;
3803 	case EXT_SC_GET_TARGET_ID:
3804 		ql_get_target_id(ha, cmd, mode);
3805 		break;
3806 	case EXT_SC_GET_BEACON_STATE:
3807 		ql_get_led_state(ha, cmd, mode);
3808 		break;
3809 	}
3810 
3811 	if (cmd->Status != EXT_STATUS_OK) {
3812 		EL(ha, "failed, Status=%d\n", cmd->Status);
3813 	} else {
3814 		/*EMPTY*/
3815 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3816 	}
3817 }
3818 
3819 /* ******************************************************************** */
3820 /*			Helper Functions				*/
3821 /* ******************************************************************** */
3822 
3823 /*
3824  * ql_lun_count
3825  *	Get numbers of LUNS on target.
3826  *
3827  * Input:
3828  *	ha:	adapter state pointer.
3829  *	q:	device queue pointer.
3830  *
3831  * Returns:
3832  *	Number of LUNs.
3833  *
3834  * Context:
3835  *	Kernel context.
3836  */
3837 static int
3838 ql_lun_count(ql_adapter_state_t *ha, ql_tgt_t *tq)
3839 {
3840 	int	cnt;
3841 
3842 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3843 
3844 	/* Bypass LUNs that failed. */
3845 	cnt = ql_report_lun(ha, tq);
3846 	if (cnt == 0) {
3847 		cnt = ql_inq_scan(ha, tq, ha->maximum_luns_per_target);
3848 	}
3849 
3850 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
3851 
3852 	return (cnt);
3853 }
3854 
3855 /*
3856  * ql_report_lun
3857  *	Get numbers of LUNS using report LUN command.
3858  *
3859  * Input:
3860  *	ha:	adapter state pointer.
3861  *	q:	target queue pointer.
3862  *
3863  * Returns:
3864  *	Number of LUNs.
3865  *
3866  * Context:
3867  *	Kernel context.
3868  */
3869 static int
3870 ql_report_lun(ql_adapter_state_t *ha, ql_tgt_t *tq)
3871 {
3872 	int			rval;
3873 	uint8_t			retries;
3874 	ql_mbx_iocb_t		*pkt;
3875 	ql_rpt_lun_lst_t	*rpt;
3876 	dma_mem_t		dma_mem;
3877 	uint32_t		pkt_size, cnt;
3878 	uint16_t		comp_status;
3879 	uint8_t			scsi_status_h, scsi_status_l, *reqs;
3880 
3881 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
3882 
3883 	if (DRIVER_SUSPENDED(ha)) {
3884 		EL(ha, "failed, LOOP_NOT_READY\n");
3885 		return (0);
3886 	}
3887 
3888 	pkt_size = sizeof (ql_mbx_iocb_t) + sizeof (ql_rpt_lun_lst_t);
3889 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
3890 	if (pkt == NULL) {
3891 		EL(ha, "failed, kmem_zalloc\n");
3892 		return (0);
3893 	}
3894 	rpt = (ql_rpt_lun_lst_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
3895 
3896 	/* Get DMA memory for the IOCB */
3897 	if (ql_get_dma_mem(ha, &dma_mem, sizeof (ql_rpt_lun_lst_t),
3898 	    LITTLE_ENDIAN_DMA, MEM_RING_ALIGN) != QL_SUCCESS) {
3899 		cmn_err(CE_WARN, "%s(%d): DMA memory "
3900 		    "alloc failed", QL_NAME, ha->instance);
3901 		kmem_free(pkt, pkt_size);
3902 		return (0);
3903 	}
3904 
3905 	for (retries = 0; retries < 4; retries++) {
3906 		if (CFG_IST(ha, CFG_CTRL_2425)) {
3907 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
3908 			pkt->cmd24.entry_count = 1;
3909 
3910 			/* Set N_port handle */
3911 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
3912 
3913 			/* Set target ID */
3914 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
3915 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
3916 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
3917 
3918 			/* Set ISP command timeout. */
3919 			pkt->cmd24.timeout = LE_16(15);
3920 
3921 			/* Load SCSI CDB */
3922 			pkt->cmd24.scsi_cdb[0] = SCMD_REPORT_LUNS;
3923 			pkt->cmd24.scsi_cdb[6] =
3924 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3925 			pkt->cmd24.scsi_cdb[7] =
3926 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3927 			pkt->cmd24.scsi_cdb[8] =
3928 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
3929 			pkt->cmd24.scsi_cdb[9] =
3930 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
3931 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
3932 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
3933 				    + cnt, 4);
3934 			}
3935 
3936 			/* Set tag queue control flags */
3937 			pkt->cmd24.task = TA_STAG;
3938 
3939 			/* Set transfer direction. */
3940 			pkt->cmd24.control_flags = CF_RD;
3941 
3942 			/* Set data segment count. */
3943 			pkt->cmd24.dseg_count = LE_16(1);
3944 
3945 			/* Load total byte count. */
3946 			/* Load data descriptor. */
3947 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
3948 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
3949 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
3950 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
3951 			pkt->cmd24.total_byte_count =
3952 			    LE_32(sizeof (ql_rpt_lun_lst_t));
3953 			pkt->cmd24.dseg_0_length =
3954 			    LE_32(sizeof (ql_rpt_lun_lst_t));
3955 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
3956 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
3957 			pkt->cmd3.entry_count = 1;
3958 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
3959 				pkt->cmd3.target_l = LSB(tq->loop_id);
3960 				pkt->cmd3.target_h = MSB(tq->loop_id);
3961 			} else {
3962 				pkt->cmd3.target_h = LSB(tq->loop_id);
3963 			}
3964 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
3965 			pkt->cmd3.timeout = LE_16(15);
3966 			pkt->cmd3.dseg_count = LE_16(1);
3967 			pkt->cmd3.scsi_cdb[0] = SCMD_REPORT_LUNS;
3968 			pkt->cmd3.scsi_cdb[6] =
3969 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3970 			pkt->cmd3.scsi_cdb[7] =
3971 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3972 			pkt->cmd3.scsi_cdb[8] =
3973 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
3974 			pkt->cmd3.scsi_cdb[9] =
3975 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
3976 			pkt->cmd3.byte_count =
3977 			    LE_32(sizeof (ql_rpt_lun_lst_t));
3978 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
3979 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
3980 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
3981 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
3982 			pkt->cmd3.dseg_0_length =
3983 			    LE_32(sizeof (ql_rpt_lun_lst_t));
3984 		} else {
3985 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
3986 			pkt->cmd.entry_count = 1;
3987 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
3988 				pkt->cmd.target_l = LSB(tq->loop_id);
3989 				pkt->cmd.target_h = MSB(tq->loop_id);
3990 			} else {
3991 				pkt->cmd.target_h = LSB(tq->loop_id);
3992 			}
3993 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
3994 			pkt->cmd.timeout = LE_16(15);
3995 			pkt->cmd.dseg_count = LE_16(1);
3996 			pkt->cmd.scsi_cdb[0] = SCMD_REPORT_LUNS;
3997 			pkt->cmd.scsi_cdb[6] =
3998 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3999 			pkt->cmd.scsi_cdb[7] =
4000 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4001 			pkt->cmd.scsi_cdb[8] =
4002 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4003 			pkt->cmd.scsi_cdb[9] =
4004 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4005 			pkt->cmd.byte_count =
4006 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4007 			pkt->cmd.dseg_0_address = (uint32_t)
4008 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4009 			pkt->cmd.dseg_0_length =
4010 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4011 		}
4012 
4013 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4014 		    sizeof (ql_mbx_iocb_t));
4015 
4016 		/* Sync in coming DMA buffer. */
4017 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4018 		    DDI_DMA_SYNC_FORKERNEL);
4019 		/* Copy in coming DMA data. */
4020 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)rpt,
4021 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4022 
4023 		if (CFG_IST(ha, CFG_CTRL_2425)) {
4024 			pkt->sts24.entry_status = (uint8_t)
4025 			    (pkt->sts24.entry_status & 0x3c);
4026 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4027 			scsi_status_h = pkt->sts24.scsi_status_h;
4028 			scsi_status_l = pkt->sts24.scsi_status_l;
4029 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4030 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4031 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4032 		} else {
4033 			pkt->sts.entry_status = (uint8_t)
4034 			    (pkt->sts.entry_status & 0x7e);
4035 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4036 			scsi_status_h = pkt->sts.scsi_status_h;
4037 			scsi_status_l = pkt->sts.scsi_status_l;
4038 			reqs = &pkt->sts.req_sense_data[0];
4039 		}
4040 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4041 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4042 			    pkt->sts.entry_status, tq->d_id.b24);
4043 			rval = QL_FUNCTION_PARAMETER_ERROR;
4044 		}
4045 
4046 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4047 		    scsi_status_l & STATUS_CHECK) {
4048 			/* Device underrun, treat as OK. */
4049 			if (rval == QL_SUCCESS &&
4050 			    comp_status == CS_DATA_UNDERRUN &&
4051 			    scsi_status_h & FCP_RESID_UNDER) {
4052 				break;
4053 			}
4054 
4055 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4056 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4057 			    comp_status, scsi_status_h, scsi_status_l);
4058 
4059 			if (rval == QL_SUCCESS) {
4060 				if ((comp_status == CS_TIMEOUT) ||
4061 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4062 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4063 					rval = QL_FUNCTION_TIMEOUT;
4064 					break;
4065 				}
4066 				rval = QL_FUNCTION_FAILED;
4067 			} else if (rval == QL_ABORTED) {
4068 				break;
4069 			}
4070 
4071 			if (scsi_status_l & STATUS_CHECK) {
4072 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4073 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4074 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4075 				    reqs[1], reqs[2], reqs[3], reqs[4],
4076 				    reqs[5], reqs[6], reqs[7], reqs[8],
4077 				    reqs[9], reqs[10], reqs[11], reqs[12],
4078 				    reqs[13], reqs[14], reqs[15], reqs[16],
4079 				    reqs[17]);
4080 			}
4081 		} else {
4082 			break;
4083 		}
4084 		bzero((caddr_t)pkt, pkt_size);
4085 	}
4086 
4087 	if (rval != QL_SUCCESS) {
4088 		EL(ha, "failed=%xh\n", rval);
4089 		rval = 0;
4090 	} else {
4091 		QL_PRINT_9(CE_CONT, "(%d): LUN list\n", ha->instance);
4092 		QL_DUMP_9(rpt, 8, rpt->hdr.len + 8);
4093 		rval = (int)(BE_32(rpt->hdr.len) / 8);
4094 	}
4095 
4096 	kmem_free(pkt, pkt_size);
4097 	ql_free_dma_resource(ha, &dma_mem);
4098 
4099 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
4100 
4101 	return (rval);
4102 }
4103 
4104 /*
4105  * ql_inq_scan
4106  *	Get numbers of LUNS using inquiry command.
4107  *
4108  * Input:
4109  *	ha:		adapter state pointer.
4110  *	tq:		target queue pointer.
4111  *	count:		scan for the number of existing LUNs.
4112  *
4113  * Returns:
4114  *	Number of LUNs.
4115  *
4116  * Context:
4117  *	Kernel context.
4118  */
4119 static int
4120 ql_inq_scan(ql_adapter_state_t *ha, ql_tgt_t *tq, int count)
4121 {
4122 	int		lun, cnt, rval;
4123 	ql_mbx_iocb_t	*pkt;
4124 	uint8_t		*inq;
4125 	uint32_t	pkt_size;
4126 
4127 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
4128 
4129 	pkt_size = sizeof (ql_mbx_iocb_t) + INQ_DATA_SIZE;
4130 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4131 	if (pkt == NULL) {
4132 		EL(ha, "failed, kmem_zalloc\n");
4133 		return (0);
4134 	}
4135 	inq = (uint8_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4136 
4137 	cnt = 0;
4138 	for (lun = 0; lun < MAX_LUNS; lun++) {
4139 
4140 		if (DRIVER_SUSPENDED(ha)) {
4141 			rval = QL_LOOP_DOWN;
4142 			cnt = 0;
4143 			break;
4144 		}
4145 
4146 		rval = ql_inq(ha, tq, lun, pkt, INQ_DATA_SIZE);
4147 		if (rval == QL_SUCCESS) {
4148 			switch (*inq) {
4149 			case DTYPE_DIRECT:
4150 			case DTYPE_PROCESSOR:	/* Appliance. */
4151 			case DTYPE_WORM:
4152 			case DTYPE_RODIRECT:
4153 			case DTYPE_SCANNER:
4154 			case DTYPE_OPTICAL:
4155 			case DTYPE_CHANGER:
4156 			case DTYPE_ESI:
4157 				cnt++;
4158 				break;
4159 			case DTYPE_SEQUENTIAL:
4160 				cnt++;
4161 				tq->flags |= TQF_TAPE_DEVICE;
4162 				break;
4163 			default:
4164 				QL_PRINT_9(CE_CONT, "(%d): failed, "
4165 				    "unsupported device id=%xh, lun=%d, "
4166 				    "type=%xh\n", ha->instance, tq->loop_id,
4167 				    lun, *inq);
4168 				break;
4169 			}
4170 
4171 			if (*inq == DTYPE_ESI || cnt >= count) {
4172 				break;
4173 			}
4174 		} else if (rval == QL_ABORTED || rval == QL_FUNCTION_TIMEOUT) {
4175 			cnt = 0;
4176 			break;
4177 		}
4178 	}
4179 
4180 	kmem_free(pkt, pkt_size);
4181 
4182 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
4183 
4184 	return (cnt);
4185 }
4186 
4187 /*
4188  * ql_inq
4189  *	Issue inquiry command.
4190  *
4191  * Input:
4192  *	ha:		adapter state pointer.
4193  *	tq:		target queue pointer.
4194  *	lun:		LUN number.
4195  *	pkt:		command and buffer pointer.
4196  *	inq_len:	amount of inquiry data.
4197  *
4198  * Returns:
4199  *	ql local function return status code.
4200  *
4201  * Context:
4202  *	Kernel context.
4203  */
4204 static int
4205 ql_inq(ql_adapter_state_t *ha, ql_tgt_t *tq, int lun, ql_mbx_iocb_t *pkt,
4206     uint8_t inq_len)
4207 {
4208 	dma_mem_t	dma_mem;
4209 	int		rval, retries;
4210 	uint32_t	pkt_size, cnt;
4211 	uint16_t	comp_status;
4212 	uint8_t		scsi_status_h, scsi_status_l, *reqs;
4213 	caddr_t		inq_data;
4214 
4215 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
4216 
4217 	if (DRIVER_SUSPENDED(ha)) {
4218 		EL(ha, "failed, loop down\n");
4219 		return (QL_FUNCTION_TIMEOUT);
4220 	}
4221 
4222 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + inq_len);
4223 	bzero((caddr_t)pkt, pkt_size);
4224 
4225 	inq_data = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
4226 
4227 	/* Get DMA memory for the IOCB */
4228 	if (ql_get_dma_mem(ha, &dma_mem, inq_len,
4229 	    LITTLE_ENDIAN_DMA, MEM_RING_ALIGN) != QL_SUCCESS) {
4230 		cmn_err(CE_WARN, "%s(%d): DMA memory "
4231 		    "alloc failed", QL_NAME, ha->instance);
4232 		return (0);
4233 	}
4234 
4235 	for (retries = 0; retries < 4; retries++) {
4236 		if (CFG_IST(ha, CFG_CTRL_2425)) {
4237 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4238 			pkt->cmd24.entry_count = 1;
4239 
4240 			/* Set LUN number */
4241 			pkt->cmd24.fcp_lun[2] = LSB(lun);
4242 			pkt->cmd24.fcp_lun[3] = MSB(lun);
4243 
4244 			/* Set N_port handle */
4245 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4246 
4247 			/* Set target ID */
4248 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4249 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
4250 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4251 
4252 			/* Set ISP command timeout. */
4253 			pkt->cmd24.timeout = LE_16(15);
4254 
4255 			/* Load SCSI CDB */
4256 			pkt->cmd24.scsi_cdb[0] = SCMD_INQUIRY;
4257 			pkt->cmd24.scsi_cdb[4] = inq_len;
4258 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4259 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4260 				    + cnt, 4);
4261 			}
4262 
4263 			/* Set tag queue control flags */
4264 			pkt->cmd24.task = TA_STAG;
4265 
4266 			/* Set transfer direction. */
4267 			pkt->cmd24.control_flags = CF_RD;
4268 
4269 			/* Set data segment count. */
4270 			pkt->cmd24.dseg_count = LE_16(1);
4271 
4272 			/* Load total byte count. */
4273 			pkt->cmd24.total_byte_count = LE_32(inq_len);
4274 
4275 			/* Load data descriptor. */
4276 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
4277 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4278 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
4279 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4280 			pkt->cmd24.dseg_0_length = LE_32(inq_len);
4281 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4282 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4283 			cnt = CMD_TYPE_3_DATA_SEGMENTS;
4284 
4285 			pkt->cmd3.entry_count = 1;
4286 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4287 				pkt->cmd3.target_l = LSB(tq->loop_id);
4288 				pkt->cmd3.target_h = MSB(tq->loop_id);
4289 			} else {
4290 				pkt->cmd3.target_h = LSB(tq->loop_id);
4291 			}
4292 			pkt->cmd3.lun_l = LSB(lun);
4293 			pkt->cmd3.lun_h = MSB(lun);
4294 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4295 			pkt->cmd3.timeout = LE_16(15);
4296 			pkt->cmd3.scsi_cdb[0] = SCMD_INQUIRY;
4297 			pkt->cmd3.scsi_cdb[4] = inq_len;
4298 			pkt->cmd3.dseg_count = LE_16(1);
4299 			pkt->cmd3.byte_count = LE_32(inq_len);
4300 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
4301 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4302 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
4303 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4304 			pkt->cmd3.dseg_0_length = LE_32(inq_len);
4305 		} else {
4306 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4307 			cnt = CMD_TYPE_2_DATA_SEGMENTS;
4308 
4309 			pkt->cmd.entry_count = 1;
4310 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4311 				pkt->cmd.target_l = LSB(tq->loop_id);
4312 				pkt->cmd.target_h = MSB(tq->loop_id);
4313 			} else {
4314 				pkt->cmd.target_h = LSB(tq->loop_id);
4315 			}
4316 			pkt->cmd.lun_l = LSB(lun);
4317 			pkt->cmd.lun_h = MSB(lun);
4318 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4319 			pkt->cmd.timeout = LE_16(15);
4320 			pkt->cmd.scsi_cdb[0] = SCMD_INQUIRY;
4321 			pkt->cmd.scsi_cdb[4] = inq_len;
4322 			pkt->cmd.dseg_count = LE_16(1);
4323 			pkt->cmd.byte_count = LE_32(inq_len);
4324 			pkt->cmd.dseg_0_address = (uint32_t)
4325 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4326 			pkt->cmd.dseg_0_length = LE_32(inq_len);
4327 		}
4328 
4329 /*		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); */
4330 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4331 		    sizeof (ql_mbx_iocb_t));
4332 
4333 		/* Sync in coming IOCB DMA buffer. */
4334 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4335 		    DDI_DMA_SYNC_FORKERNEL);
4336 		/* Copy in coming DMA data. */
4337 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)inq_data,
4338 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4339 
4340 		if (CFG_IST(ha, CFG_CTRL_2425)) {
4341 			pkt->sts24.entry_status = (uint8_t)
4342 			    (pkt->sts24.entry_status & 0x3c);
4343 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4344 			scsi_status_h = pkt->sts24.scsi_status_h;
4345 			scsi_status_l = pkt->sts24.scsi_status_l;
4346 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4347 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4348 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4349 		} else {
4350 			pkt->sts.entry_status = (uint8_t)
4351 			    (pkt->sts.entry_status & 0x7e);
4352 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4353 			scsi_status_h = pkt->sts.scsi_status_h;
4354 			scsi_status_l = pkt->sts.scsi_status_l;
4355 			reqs = &pkt->sts.req_sense_data[0];
4356 		}
4357 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4358 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4359 			    pkt->sts.entry_status, tq->d_id.b24);
4360 			rval = QL_FUNCTION_PARAMETER_ERROR;
4361 		}
4362 
4363 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4364 		    scsi_status_l & STATUS_CHECK) {
4365 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4366 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4367 			    comp_status, scsi_status_h, scsi_status_l);
4368 
4369 			if (rval == QL_SUCCESS) {
4370 				if ((comp_status == CS_TIMEOUT) ||
4371 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4372 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4373 					rval = QL_FUNCTION_TIMEOUT;
4374 					break;
4375 				}
4376 				rval = QL_FUNCTION_FAILED;
4377 			}
4378 
4379 			if (scsi_status_l & STATUS_CHECK) {
4380 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4381 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4382 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4383 				    reqs[1], reqs[2], reqs[3], reqs[4],
4384 				    reqs[5], reqs[6], reqs[7], reqs[8],
4385 				    reqs[9], reqs[10], reqs[11], reqs[12],
4386 				    reqs[13], reqs[14], reqs[15], reqs[16],
4387 				    reqs[17]);
4388 			}
4389 		} else {
4390 			break;
4391 		}
4392 	}
4393 	ql_free_dma_resource(ha, &dma_mem);
4394 
4395 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
4396 
4397 	return (rval);
4398 }
4399 
4400 /*
4401  * ql_get_buffer_data
4402  *	Copies data from user space to kernal buffer.
4403  *
4404  * Input:
4405  *	src:	User source buffer address.
4406  *	dst:	Kernal destination buffer address.
4407  *	size:	Amount of data.
4408  *	mode:	flags.
4409  *
4410  * Returns:
4411  *	Returns number of bytes transferred.
4412  *
4413  * Context:
4414  *	Kernel context.
4415  */
4416 static uint32_t
4417 ql_get_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4418 {
4419 	uint32_t	cnt;
4420 
4421 	for (cnt = 0; cnt < size; cnt++) {
4422 		if (ddi_copyin(src++, dst++, 1, mode) != 0) {
4423 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4424 			break;
4425 		}
4426 	}
4427 
4428 	return (cnt);
4429 }
4430 
4431 /*
4432  * ql_send_buffer_data
4433  *	Copies data from kernal buffer to user space.
4434  *
4435  * Input:
4436  *	src:	Kernal source buffer address.
4437  *	dst:	User destination buffer address.
4438  *	size:	Amount of data.
4439  *	mode:	flags.
4440  *
4441  * Returns:
4442  *	Returns number of bytes transferred.
4443  *
4444  * Context:
4445  *	Kernel context.
4446  */
4447 static uint32_t
4448 ql_send_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4449 {
4450 	uint32_t	cnt;
4451 
4452 	for (cnt = 0; cnt < size; cnt++) {
4453 		if (ddi_copyout(src++, dst++, 1, mode) != 0) {
4454 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4455 			break;
4456 		}
4457 	}
4458 
4459 	return (cnt);
4460 }
4461 
4462 /*
4463  * ql_find_port
4464  *	Locates device queue.
4465  *
4466  * Input:
4467  *	ha:	adapter state pointer.
4468  *	name:	device port name.
4469  *
4470  * Returns:
4471  *	Returns target queue pointer.
4472  *
4473  * Context:
4474  *	Kernel context.
4475  */
4476 static ql_tgt_t *
4477 ql_find_port(ql_adapter_state_t *ha, uint8_t *name, uint16_t type)
4478 {
4479 	ql_link_t	*link;
4480 	ql_tgt_t	*tq;
4481 	uint16_t	index;
4482 
4483 	/* Scan port list for requested target */
4484 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
4485 		for (link = ha->dev[index].first; link != NULL;
4486 		    link = link->next) {
4487 			tq = link->base_address;
4488 
4489 			switch (type) {
4490 			case QLNT_LOOP_ID:
4491 				if (bcmp(name, &tq->loop_id,
4492 				    sizeof (uint16_t)) == 0) {
4493 					return (tq);
4494 				}
4495 				break;
4496 			case QLNT_PORT:
4497 				if (bcmp(name, tq->port_name, 8) == 0) {
4498 					return (tq);
4499 				}
4500 				break;
4501 			case QLNT_NODE:
4502 				if (bcmp(name, tq->node_name, 8) == 0) {
4503 					return (tq);
4504 				}
4505 				break;
4506 			case QLNT_PID:
4507 				if (bcmp(name, tq->d_id.r.d_id,
4508 				    sizeof (tq->d_id.r.d_id)) == 0) {
4509 					return (tq);
4510 				}
4511 				break;
4512 			default:
4513 				EL(ha, "failed, invalid type=%d\n",  type);
4514 				return (NULL);
4515 			}
4516 		}
4517 	}
4518 
4519 	return (NULL);
4520 }
4521 
4522 /*
4523  * ql_24xx_flash_desc
4524  *	Get flash descriptor table.
4525  *
4526  * Input:
4527  *	ha:		adapter state pointer.
4528  *
4529  * Returns:
4530  *	ql local function return status code.
4531  *
4532  * Context:
4533  *	Kernel context.
4534  */
4535 static int
4536 ql_24xx_flash_desc(ql_adapter_state_t *ha)
4537 {
4538 	uint8_t		w8;
4539 	uint32_t	cnt;
4540 	uint16_t	chksum, *bp, data;
4541 	int		rval;
4542 	ql_xioctl_t	*xp = ha->xioctl;
4543 
4544 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
4545 
4546 	rval = ql_dump_fcode(ha, (uint8_t *)&xp->fdesc,
4547 	    sizeof (flash_desc_t), FLASH_2500_DESCRIPTOR_TABLE << 2);
4548 	if (rval != QL_SUCCESS) {
4549 		EL(ha, "read status=%xh\n", rval);
4550 		bzero(&xp->fdesc, sizeof (flash_desc_t));
4551 		return (rval);
4552 	}
4553 
4554 	QL_DUMP_9(&xp->fdesc, 8, sizeof (flash_desc_t));
4555 
4556 	chksum = 0;
4557 	data = 0;
4558 	bp = (uint16_t *)&xp->fdesc;
4559 	for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) {
4560 		data = *bp;
4561 		LITTLE_ENDIAN_16(&data);
4562 		chksum += data;
4563 		bp++;
4564 	}
4565 
4566 	LITTLE_ENDIAN_32(&xp->fdesc.flash_valid);
4567 	LITTLE_ENDIAN_16(&xp->fdesc.flash_version);
4568 	LITTLE_ENDIAN_16(&xp->fdesc.flash_len);
4569 	LITTLE_ENDIAN_16(&xp->fdesc.flash_checksum);
4570 	LITTLE_ENDIAN_16(&xp->fdesc.flash_manuf);
4571 	LITTLE_ENDIAN_16(&xp->fdesc.flash_id);
4572 	LITTLE_ENDIAN_32(&xp->fdesc.block_size);
4573 	LITTLE_ENDIAN_32(&xp->fdesc.alt_block_size);
4574 	LITTLE_ENDIAN_32(&xp->fdesc.flash_size);
4575 	LITTLE_ENDIAN_32(&xp->fdesc.write_enable_data);
4576 	LITTLE_ENDIAN_32(&xp->fdesc.read_timeout);
4577 
4578 	/* flash size in desc table is in 1024 bytes */
4579 	xp->fdesc.flash_size = xp->fdesc.flash_size * 0x400;
4580 
4581 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_valid=%xh\n", ha->instance,
4582 	    xp->fdesc.flash_valid);
4583 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_version=%xh\n", ha->instance,
4584 	    xp->fdesc.flash_version);
4585 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_len=%xh\n", ha->instance,
4586 	    xp->fdesc.flash_len);
4587 
4588 	w8 = xp->fdesc.flash_model[17];
4589 	xp->fdesc.flash_model[17] = 0;
4590 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_model=%s\n", ha->instance,
4591 	    xp->fdesc.flash_model);
4592 	xp->fdesc.flash_model[17] = w8;
4593 
4594 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_checksum=%xh\n",
4595 	    ha->instance, xp->fdesc.flash_checksum);
4596 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_unused=%xh\n", ha->instance,
4597 	    xp->fdesc.flash_unused);
4598 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_manuf=%xh\n", ha->instance,
4599 	    xp->fdesc.flash_manuf);
4600 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_id=%xh\n", ha->instance,
4601 	    xp->fdesc.flash_id);
4602 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_flag=%xh\n", ha->instance,
4603 	    xp->fdesc.flash_flag);
4604 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.erase_cmd=%xh\n", ha->instance,
4605 	    xp->fdesc.erase_cmd);
4606 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.alt_erase_cmd=%xh\n", ha->instance,
4607 	    xp->fdesc.alt_erase_cmd);
4608 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.write_enable_cmd=%xh\n",
4609 	    ha->instance, xp->fdesc.write_enable_cmd);
4610 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.write_enable_bits=%xh\n",
4611 	    ha->instance, xp->fdesc.write_enable_bits);
4612 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.write_statusreg_cmd=%xh\n",
4613 	    ha->instance, xp->fdesc.write_statusreg_cmd);
4614 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.unprotect_sector_cmd=%xh\n",
4615 	    ha->instance, xp->fdesc.unprotect_sector_cmd);
4616 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.read_manuf_cmd=%xh\n",
4617 	    ha->instance, xp->fdesc.read_manuf_cmd);
4618 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.block_size=%xh\n", ha->instance,
4619 	    xp->fdesc.block_size);
4620 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.alt_block_size=%xh\n",
4621 	    ha->instance, xp->fdesc.alt_block_size);
4622 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.flash_size=%xh\n", ha->instance,
4623 	    xp->fdesc.flash_size);
4624 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.write_enable_data=%xh\n",
4625 	    ha->instance, xp->fdesc.write_enable_data);
4626 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.readid_address_len=%xh\n",
4627 	    ha->instance, xp->fdesc.readid_address_len);
4628 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.write_disable_bits=%xh\n",
4629 	    ha->instance, xp->fdesc.write_disable_bits);
4630 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.read_device_id_len=%xh\n",
4631 	    ha->instance, xp->fdesc.read_device_id_len);
4632 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.chip_erase_cmd=%xh\n",
4633 	    ha->instance, xp->fdesc.chip_erase_cmd);
4634 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.read_timeout=%xh\n", ha->instance,
4635 	    xp->fdesc.read_timeout);
4636 	QL_PRINT_9(CE_CONT, "(%d): xp->fdesc.protect_sector_cmd=%xh\n",
4637 	    ha->instance, xp->fdesc.protect_sector_cmd);
4638 
4639 	if (chksum != 0 || xp->fdesc.flash_valid != FLASH_DESC_VAILD ||
4640 	    xp->fdesc.flash_version != FLASH_DESC_VERSION) {
4641 		EL(ha, "invalid descriptor table\n");
4642 		bzero(&xp->fdesc, sizeof (flash_desc_t));
4643 		return (QL_FUNCTION_FAILED);
4644 	}
4645 
4646 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
4647 
4648 	return (QL_SUCCESS);
4649 }
4650 /*
4651  * ql_setup_flash
4652  *	Gets the manufacturer and id number of the flash chip, and
4653  *	sets up the size parameter.
4654  *
4655  * Input:
4656  *	ha:	adapter state pointer.
4657  *
4658  * Returns:
4659  *	int:	ql local function return status code.
4660  *
4661  * Context:
4662  *	Kernel context.
4663  */
4664 int
4665 ql_setup_flash(ql_adapter_state_t *ha)
4666 {
4667 	ql_xioctl_t	*xp = ha->xioctl;
4668 	int		rval = QL_SUCCESS;
4669 
4670 	if (xp->fdesc.flash_size != 0) {
4671 		return (rval);
4672 	}
4673 
4674 	if (CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id) {
4675 		return (QL_FUNCTION_FAILED);
4676 	}
4677 
4678 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
4679 		/*
4680 		 * Temporarily set the ha->xioctl->fdesc.flash_size to
4681 		 * 25xx flash size to avoid failing of ql_dump_focde.
4682 		 */
4683 		ha->xioctl->fdesc.flash_size = 0x200000;
4684 		if (ql_24xx_flash_desc(ha) == QL_SUCCESS) {
4685 			QL_PRINT_9(CE_CONT, "(%d): flash_desc exit\n",
4686 			    ha->instance);
4687 			return (rval);
4688 		}
4689 		(void) ql_24xx_flash_id(ha);
4690 
4691 	} else if (CFG_IST(ha, CFG_CTRL_2425)) {
4692 		(void) ql_24xx_flash_id(ha);
4693 	} else {
4694 		ql_flash_enable(ha);
4695 
4696 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4697 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4698 		ql_write_flash_byte(ha, 0x5555, 0x90);
4699 		xp->fdesc.flash_manuf = (uint8_t)ql_read_flash_byte(ha, 0x0000);
4700 
4701 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
4702 			ql_write_flash_byte(ha, 0xaaaa, 0xaa);
4703 			ql_write_flash_byte(ha, 0x5555, 0x55);
4704 			ql_write_flash_byte(ha, 0xaaaa, 0x90);
4705 			xp->fdesc.flash_id = (uint16_t)
4706 			    ql_read_flash_byte(ha, 0x0002);
4707 		} else {
4708 			ql_write_flash_byte(ha, 0x5555, 0xaa);
4709 			ql_write_flash_byte(ha, 0x2aaa, 0x55);
4710 			ql_write_flash_byte(ha, 0x5555, 0x90);
4711 			xp->fdesc.flash_id = (uint16_t)
4712 			    ql_read_flash_byte(ha, 0x0001);
4713 		}
4714 
4715 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4716 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4717 		ql_write_flash_byte(ha, 0x5555, 0xf0);
4718 
4719 		ql_flash_disable(ha);
4720 	}
4721 
4722 	/* Default flash descriptor table. */
4723 	xp->fdesc.write_statusreg_cmd = 1;
4724 	xp->fdesc.write_enable_bits = 0;
4725 	xp->fdesc.unprotect_sector_cmd = 0;
4726 	xp->fdesc.protect_sector_cmd = 0;
4727 	xp->fdesc.write_disable_bits = 0x9c;
4728 	xp->fdesc.block_size = 0x10000;
4729 	xp->fdesc.erase_cmd = 0xd8;
4730 
4731 	switch (xp->fdesc.flash_manuf) {
4732 	case AMD_FLASH:
4733 		switch (xp->fdesc.flash_id) {
4734 		case SPAN_FLASHID_2048K:
4735 			xp->fdesc.flash_size = 0x200000;
4736 			break;
4737 		case AMD_FLASHID_1024K:
4738 			xp->fdesc.flash_size = 0x100000;
4739 			break;
4740 		case AMD_FLASHID_512K:
4741 		case AMD_FLASHID_512Kt:
4742 		case AMD_FLASHID_512Kb:
4743 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
4744 				xp->fdesc.flash_size = QL_SBUS_FCODE_SIZE;
4745 			} else {
4746 				xp->fdesc.flash_size = 0x80000;
4747 			}
4748 			break;
4749 		case AMD_FLASHID_128K:
4750 			xp->fdesc.flash_size = 0x20000;
4751 			break;
4752 		default:
4753 			rval = QL_FUNCTION_FAILED;
4754 			break;
4755 		}
4756 		break;
4757 	case ST_FLASH:
4758 		switch (xp->fdesc.flash_id) {
4759 		case ST_FLASHID_128K:
4760 			xp->fdesc.flash_size = 0x20000;
4761 			break;
4762 		case ST_FLASHID_512K:
4763 			xp->fdesc.flash_size = 0x80000;
4764 			break;
4765 		case ST_FLASHID_M25PXX:
4766 			if (xp->fdesc.flash_len == 0x14) {
4767 				xp->fdesc.flash_size = 0x100000;
4768 			} else if (xp->fdesc.flash_len == 0x15) {
4769 				xp->fdesc.flash_size = 0x200000;
4770 			} else {
4771 				rval = QL_FUNCTION_FAILED;
4772 			}
4773 			break;
4774 		default:
4775 			rval = QL_FUNCTION_FAILED;
4776 			break;
4777 		}
4778 		break;
4779 	case SST_FLASH:
4780 		switch (xp->fdesc.flash_id) {
4781 		case SST_FLASHID_128K:
4782 			xp->fdesc.flash_size = 0x20000;
4783 			break;
4784 		case SST_FLASHID_1024K_A:
4785 			xp->fdesc.flash_size = 0x100000;
4786 			xp->fdesc.block_size = 0x8000;
4787 			xp->fdesc.erase_cmd = 0x52;
4788 			break;
4789 		case SST_FLASHID_1024K:
4790 		case SST_FLASHID_1024K_B:
4791 			xp->fdesc.flash_size = 0x100000;
4792 			break;
4793 		case SST_FLASHID_2048K:
4794 			xp->fdesc.flash_size = 0x200000;
4795 			break;
4796 		default:
4797 			rval = QL_FUNCTION_FAILED;
4798 			break;
4799 		}
4800 		break;
4801 	case MXIC_FLASH:
4802 		switch (xp->fdesc.flash_id) {
4803 		case MXIC_FLASHID_512K:
4804 			xp->fdesc.flash_size = 0x80000;
4805 			break;
4806 		case MXIC_FLASHID_1024K:
4807 			xp->fdesc.flash_size = 0x100000;
4808 			break;
4809 		case MXIC_FLASHID_25LXX:
4810 			if (xp->fdesc.flash_len == 0x14) {
4811 				xp->fdesc.flash_size = 0x100000;
4812 			} else if (xp->fdesc.flash_len == 0x15) {
4813 				xp->fdesc.flash_size = 0x200000;
4814 			} else {
4815 				rval = QL_FUNCTION_FAILED;
4816 			}
4817 			break;
4818 		default:
4819 			rval = QL_FUNCTION_FAILED;
4820 			break;
4821 		}
4822 		break;
4823 	case ATMEL_FLASH:
4824 		switch (xp->fdesc.flash_id) {
4825 		case ATMEL_FLASHID_1024K:
4826 			xp->fdesc.flash_size = 0x100000;
4827 			xp->fdesc.write_disable_bits = 0xbc;
4828 			xp->fdesc.unprotect_sector_cmd = 0x39;
4829 			xp->fdesc.protect_sector_cmd = 0x36;
4830 			break;
4831 		default:
4832 			rval = QL_FUNCTION_FAILED;
4833 			break;
4834 		}
4835 		break;
4836 	case WINBOND_FLASH:
4837 		switch (xp->fdesc.flash_id) {
4838 		case WINBOND_FLASHID:
4839 			if (xp->fdesc.flash_len == 0x15) {
4840 				xp->fdesc.flash_size = 0x200000;
4841 			} else if (xp->fdesc.flash_len == 0x16) {
4842 				xp->fdesc.flash_size = 0x400000;
4843 			} else if (xp->fdesc.flash_len == 0x17) {
4844 				xp->fdesc.flash_size = 0x800000;
4845 			} else {
4846 				rval = QL_FUNCTION_FAILED;
4847 			}
4848 			break;
4849 		default:
4850 			rval = QL_FUNCTION_FAILED;
4851 			break;
4852 		}
4853 		break;
4854 	case INTEL_FLASH:
4855 		switch (xp->fdesc.flash_id) {
4856 		case INTEL_FLASHID:
4857 			if (xp->fdesc.flash_len == 0x11) {
4858 				xp->fdesc.flash_size = 0x200000;
4859 			} else if (xp->fdesc.flash_len == 0x12) {
4860 				xp->fdesc.flash_size = 0x400000;
4861 			} else if (xp->fdesc.flash_len == 0x13) {
4862 				xp->fdesc.flash_size = 0x800000;
4863 			} else {
4864 				rval = QL_FUNCTION_FAILED;
4865 			}
4866 			break;
4867 		default:
4868 			rval = QL_FUNCTION_FAILED;
4869 			break;
4870 		}
4871 		break;
4872 	default:
4873 		rval = QL_FUNCTION_FAILED;
4874 		break;
4875 	}
4876 
4877 	/*
4878 	 * hack for non std 2312 and 6312 boards. hardware people need to
4879 	 * use either the 128k flash chip (original), or something larger.
4880 	 * For driver purposes, we'll treat it as a 128k flash chip.
4881 	 */
4882 	if ((ha->device_id == 0x2312 || ha->device_id == 0x6312 ||
4883 	    ha->device_id == 0x6322) && (xp->fdesc.flash_size > 0x20000) &&
4884 	    (CFG_IST(ha, CFG_SBUS_CARD) ==  0)) {
4885 		EL(ha, "chip exceeds max size: %xh, using 128k\n",
4886 		    xp->fdesc.flash_size);
4887 		xp->fdesc.flash_size = 0x20000;
4888 	}
4889 
4890 	if (rval == QL_SUCCESS) {
4891 		EL(ha, "man_id = %xh, size = %xh\n",
4892 		    xp->fdesc.flash_manuf, xp->fdesc.flash_size);
4893 	} else {
4894 		EL(ha, "unsupported mfr / type: man_id = %xh, flash_id = "
4895 		    "%xh\n", xp->fdesc.flash_manuf, xp->fdesc.flash_id);
4896 	}
4897 
4898 	return (rval);
4899 }
4900 
4901 /*
4902  * ql_flash_fcode_load
4903  *	Loads fcode data into flash from application.
4904  *
4905  * Input:
4906  *	ha:	adapter state pointer.
4907  *	bp:	user buffer address.
4908  *	size:	user buffer size.
4909  *	mode:	flags
4910  *
4911  * Returns:
4912  *
4913  * Context:
4914  *	Kernel context.
4915  */
4916 static int
4917 ql_flash_fcode_load(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
4918     int mode)
4919 {
4920 	uint8_t		*bfp;
4921 	ql_xioctl_t	*xp = ha->xioctl;
4922 	int		rval = 0;
4923 
4924 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
4925 
4926 	if (bsize > xp->fdesc.flash_size) {
4927 		EL(ha, "failed, bufsize: %xh, flash size: %xh\n", bsize,
4928 		    xp->fdesc.flash_size);
4929 		return (ENOMEM);
4930 	}
4931 
4932 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
4933 		EL(ha, "failed, kmem_zalloc\n");
4934 		rval = ENOMEM;
4935 	} else  {
4936 		if (ddi_copyin(bp, bfp, bsize, mode) != 0) {
4937 			EL(ha, "failed, ddi_copyin\n");
4938 			rval = EFAULT;
4939 		} else if (ql_load_fcode(ha, bfp, bsize) != QL_SUCCESS) {
4940 			EL(ha, "failed, load_fcode\n");
4941 			rval = EFAULT;
4942 		} else {
4943 			/* update the fcache */
4944 			ql_update_fcache(ha, bfp, bsize);
4945 			rval = 0;
4946 		}
4947 		kmem_free(bfp, bsize);
4948 	}
4949 
4950 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
4951 
4952 	return (rval);
4953 }
4954 
4955 /*
4956  * ql_load_fcode
4957  *	Loads fcode in to flash.
4958  *
4959  * Input:
4960  *	ha:	adapter state pointer.
4961  *	dp:	data pointer.
4962  *	size:	data length.
4963  *
4964  * Returns:
4965  *	ql local function return status code.
4966  *
4967  * Context:
4968  *	Kernel context.
4969  */
4970 static int
4971 ql_load_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
4972 {
4973 	uint32_t	cnt;
4974 	int		rval;
4975 
4976 	if (CFG_IST(ha, CFG_CTRL_2425)) {
4977 		return (ql_24xx_load_flash(ha, dp, size, 0));
4978 	}
4979 
4980 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
4981 
4982 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
4983 		/*
4984 		 * sbus has an additional check to make
4985 		 * sure they don't brick the HBA.
4986 		 */
4987 		if (dp[0] != 0xf1) {
4988 			EL(ha, "failed, incorrect fcode for sbus\n");
4989 			return (QL_FUNCTION_PARAMETER_ERROR);
4990 		}
4991 	}
4992 
4993 	GLOBAL_HW_LOCK();
4994 
4995 	/* Enable Flash Read/Write. */
4996 	ql_flash_enable(ha);
4997 
4998 	/* Erase flash prior to write. */
4999 	rval = ql_erase_flash(ha, 0);
5000 
5001 	if (rval == QL_SUCCESS) {
5002 		/* Write fcode data to flash. */
5003 		for (cnt = 0; cnt < (uint32_t)size; cnt++) {
5004 			/* Allow other system activity. */
5005 			if (cnt % 0x1000 == 0) {
5006 				drv_usecwait(1);
5007 			}
5008 			rval = ql_program_flash_address(ha, cnt, *dp++);
5009 			if (rval != QL_SUCCESS)
5010 				break;
5011 		}
5012 	}
5013 
5014 	ql_flash_disable(ha);
5015 
5016 	GLOBAL_HW_UNLOCK();
5017 
5018 	if (rval != QL_SUCCESS) {
5019 		EL(ha, "failed, rval=%xh\n", rval);
5020 	} else {
5021 		/*EMPTY*/
5022 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5023 	}
5024 	return (rval);
5025 }
5026 
5027 /*
5028  * ql_flash_util_dump
5029  *	Dumps FLASH to application.
5030  *
5031  * Input:
5032  *	ha:	adapter state pointer.
5033  *	bp:	user buffer address.
5034  *	bsize:	user buffer size
5035  *	mode:	flags
5036  *
5037  * Returns:
5038  *
5039  * Context:
5040  *	Kernel context.
5041  */
5042 static int
5043 ql_flash_fcode_dump(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5044     int mode)
5045 {
5046 	uint8_t		*bfp;
5047 	int		rval;
5048 	ql_xioctl_t	*xp = ha->xioctl;
5049 
5050 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5051 
5052 	/* adjust max read size to flash size */
5053 	if (bsize > xp->fdesc.flash_size) {
5054 		EL(ha, "adjusting req=%xh, max=%xh\n", bsize,
5055 		    xp->fdesc.flash_size);
5056 		bsize = xp->fdesc.flash_size;
5057 	}
5058 
5059 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5060 		EL(ha, "failed, kmem_zalloc\n");
5061 		rval = ENOMEM;
5062 	} else {
5063 		/* Dump Flash fcode. */
5064 		rval = ql_dump_fcode(ha, bfp, bsize, 0);
5065 
5066 		if (rval != QL_SUCCESS) {
5067 			EL(ha, "failed, dump_fcode = %x\n", rval);
5068 			rval = EFAULT;
5069 		} else if (ddi_copyout(bfp, bp, bsize, mode) != 0) {
5070 			EL(ha, "failed, ddi_copyout\n");
5071 			rval = EFAULT;
5072 		} else {
5073 			rval = 0;
5074 		}
5075 		kmem_free(bfp, bsize);
5076 	}
5077 
5078 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5079 
5080 	return (rval);
5081 }
5082 
5083 /*
5084  * ql_dump_fcode
5085  *	Dumps fcode from flash.
5086  *
5087  * Input:
5088  *	ha:		adapter state pointer.
5089  *	dp:		data pointer.
5090  *	size:		data length.
5091  *	startpos:	starting position in flash.
5092  *			(start position must be 4 byte aligned)
5093  *
5094  * Returns:
5095  *	ql local function return status code.
5096  *
5097  * Context:
5098  *	Kernel context.
5099  *
5100  */
5101 int
5102 ql_dump_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size,
5103     uint32_t startpos)
5104 {
5105 	uint32_t	cnt, data, addr;
5106 	int		rval = QL_SUCCESS;
5107 
5108 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5109 
5110 	/* make sure startpos+size doesn't exceed flash */
5111 	if (size + startpos > ha->xioctl->fdesc.flash_size) {
5112 		EL(ha, "exceeded flash range, sz=%xh, stp=%xh, flsz=%xh\n",
5113 		    size, startpos, ha->xioctl->fdesc.flash_size);
5114 		return (QL_FUNCTION_PARAMETER_ERROR);
5115 	}
5116 
5117 	if (CFG_IST(ha, CFG_CTRL_2425)) {
5118 
5119 		/* check start addr is 32 bit aligned for 24xx */
5120 		if ((startpos & 0x3) != 0) {
5121 			EL(ha, "incorrect buffer size alignment\n");
5122 			return (QL_FUNCTION_PARAMETER_ERROR);
5123 		}
5124 
5125 		/* adjust 24xx start addr for 32 bit words */
5126 		addr = startpos / 4 | FLASH_DATA_ADDR;
5127 	}
5128 
5129 	GLOBAL_HW_LOCK();
5130 
5131 	/* Enable Flash Read/Write. */
5132 	if (CFG_IST(ha, CFG_CTRL_2425) == 0) {
5133 		ql_flash_enable(ha);
5134 	}
5135 
5136 	/* Read fcode data from flash. */
5137 	cnt = startpos;
5138 	size += startpos;
5139 	while (cnt < size) {
5140 		/* Allow other system activity. */
5141 		if (cnt % 0x1000 == 0) {
5142 			drv_usecwait(1);
5143 		}
5144 		if (CFG_IST(ha, CFG_CTRL_2425)) {
5145 			rval = ql_24xx_read_flash(ha, addr++, &data);
5146 			if (rval != QL_SUCCESS) {
5147 				break;
5148 			}
5149 			*dp++ = LSB(LSW(data));
5150 			*dp++ = MSB(LSW(data));
5151 			*dp++ = LSB(MSW(data));
5152 			*dp++ = MSB(MSW(data));
5153 			cnt += 4;
5154 		} else {
5155 			*dp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
5156 			cnt++;
5157 		}
5158 	}
5159 
5160 	if (CFG_IST(ha, CFG_CTRL_2425) == 0) {
5161 		ql_flash_disable(ha);
5162 	}
5163 
5164 	GLOBAL_HW_UNLOCK();
5165 
5166 	if (rval != QL_SUCCESS) {
5167 		EL(ha, "failed, rval = %xh\n", rval);
5168 	} else {
5169 		/*EMPTY*/
5170 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5171 	}
5172 	return (rval);
5173 }
5174 
5175 /*
5176  * ql_program_flash_address
5177  *	Program flash address.
5178  *
5179  * Input:
5180  *	ha:	adapter state pointer.
5181  *	addr:	flash byte address.
5182  *	data:	data to be written to flash.
5183  *
5184  * Returns:
5185  *	ql local function return status code.
5186  *
5187  * Context:
5188  *	Kernel context.
5189  */
5190 static int
5191 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr,
5192     uint8_t data)
5193 {
5194 	int	rval;
5195 
5196 	/* Write Program Command Sequence */
5197 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
5198 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5199 		ql_write_flash_byte(ha, addr, data);
5200 	} else {
5201 		ql_write_flash_byte(ha, 0x5555, 0xaa);
5202 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
5203 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5204 		ql_write_flash_byte(ha, addr, data);
5205 	}
5206 
5207 	/* Wait for write to complete. */
5208 	rval = ql_poll_flash(ha, addr, data);
5209 
5210 	if (rval != QL_SUCCESS) {
5211 		EL(ha, "failed, rval=%xh\n", rval);
5212 	}
5213 	return (rval);
5214 }
5215 
5216 /*
5217  * ql_set_rnid_parameters
5218  *	Set RNID parameters.
5219  *
5220  * Input:
5221  *	ha:	adapter state pointer.
5222  *	cmd:	User space CT arguments pointer.
5223  *	mode:	flags.
5224  */
5225 static void
5226 ql_set_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5227 {
5228 	EXT_SET_RNID_REQ	tmp_set;
5229 	EXT_RNID_DATA		*tmp_buf;
5230 	int			rval = 0;
5231 
5232 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5233 
5234 	if (DRIVER_SUSPENDED(ha)) {
5235 		EL(ha, "failed, LOOP_NOT_READY\n");
5236 		cmd->Status = EXT_STATUS_BUSY;
5237 		cmd->ResponseLen = 0;
5238 		return;
5239 	}
5240 
5241 	cmd->ResponseLen = 0; /* NO response to caller. */
5242 	if (cmd->RequestLen != sizeof (EXT_SET_RNID_REQ)) {
5243 		/* parameter error */
5244 		EL(ha, "failed, RequestLen < EXT_SET_RNID_REQ, Len=%xh\n",
5245 		    cmd->RequestLen);
5246 		cmd->Status = EXT_STATUS_INVALID_PARAM;
5247 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
5248 		cmd->ResponseLen = 0;
5249 		return;
5250 	}
5251 
5252 	rval = ddi_copyin((void*)(uintptr_t)(cmd->RequestAdr), &tmp_set,
5253 	    cmd->RequestLen, mode);
5254 	if (rval != 0) {
5255 		EL(ha, "failed, ddi_copyin\n");
5256 		cmd->Status = EXT_STATUS_COPY_ERR;
5257 		cmd->ResponseLen = 0;
5258 		return;
5259 	}
5260 
5261 	/* Allocate memory for command. */
5262 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5263 	if (tmp_buf == NULL) {
5264 		EL(ha, "failed, kmem_zalloc\n");
5265 		cmd->Status = EXT_STATUS_NO_MEMORY;
5266 		cmd->ResponseLen = 0;
5267 		return;
5268 	}
5269 
5270 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5271 	    (caddr_t)tmp_buf);
5272 	if (rval != QL_SUCCESS) {
5273 		/* error */
5274 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5275 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5276 		cmd->Status = EXT_STATUS_ERR;
5277 		cmd->ResponseLen = 0;
5278 		return;
5279 	}
5280 
5281 	/* Now set the requested params. */
5282 	bcopy(tmp_set.IPVersion, tmp_buf->IPVersion, 2);
5283 	bcopy(tmp_set.UDPPortNumber, tmp_buf->UDPPortNumber, 2);
5284 	bcopy(tmp_set.IPAddress, tmp_buf->IPAddress, 16);
5285 
5286 	rval = ql_set_rnid_params(ha, sizeof (EXT_RNID_DATA),
5287 	    (caddr_t)tmp_buf);
5288 	if (rval != QL_SUCCESS) {
5289 		/* error */
5290 		EL(ha, "failed, set_rnid_params_mbx=%xh\n", rval);
5291 		cmd->Status = EXT_STATUS_ERR;
5292 		cmd->ResponseLen = 0;
5293 	}
5294 
5295 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5296 
5297 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5298 }
5299 
5300 /*
5301  * ql_get_rnid_parameters
5302  *	Get RNID parameters.
5303  *
5304  * Input:
5305  *	ha:	adapter state pointer.
5306  *	cmd:	User space CT arguments pointer.
5307  *	mode:	flags.
5308  */
5309 static void
5310 ql_get_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5311 {
5312 	EXT_RNID_DATA	*tmp_buf;
5313 	uint32_t	rval;
5314 
5315 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5316 
5317 	if (DRIVER_SUSPENDED(ha)) {
5318 		EL(ha, "failed, LOOP_NOT_READY\n");
5319 		cmd->Status = EXT_STATUS_BUSY;
5320 		cmd->ResponseLen = 0;
5321 		return;
5322 	}
5323 
5324 	/* Allocate memory for command. */
5325 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5326 	if (tmp_buf == NULL) {
5327 		EL(ha, "failed, kmem_zalloc\n");
5328 		cmd->Status = EXT_STATUS_NO_MEMORY;
5329 		cmd->ResponseLen = 0;
5330 		return;
5331 	}
5332 
5333 	/* Send command */
5334 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5335 	    (caddr_t)tmp_buf);
5336 	if (rval != QL_SUCCESS) {
5337 		/* error */
5338 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5339 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5340 		cmd->Status = EXT_STATUS_ERR;
5341 		cmd->ResponseLen = 0;
5342 		return;
5343 	}
5344 
5345 	/* Copy the response */
5346 	if (ql_send_buffer_data((caddr_t)tmp_buf,
5347 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
5348 	    sizeof (EXT_RNID_DATA), mode) != sizeof (EXT_RNID_DATA)) {
5349 		EL(ha, "failed, ddi_copyout\n");
5350 		cmd->Status = EXT_STATUS_COPY_ERR;
5351 		cmd->ResponseLen = 0;
5352 	} else {
5353 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5354 		cmd->ResponseLen = sizeof (EXT_RNID_DATA);
5355 	}
5356 
5357 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5358 }
5359 
5360 /*
5361  * ql_reset_statistics
5362  *	Performs EXT_SC_RST_STATISTICS subcommand. of EXT_CC_SET_DATA.
5363  *
5364  * Input:
5365  *	ha:	adapter state pointer.
5366  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5367  *
5368  * Returns:
5369  *	None, request status indicated in cmd->Status.
5370  *
5371  * Context:
5372  *	Kernel context.
5373  */
5374 static int
5375 ql_reset_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
5376 {
5377 	ql_xioctl_t		*xp = ha->xioctl;
5378 	int			rval = 0;
5379 
5380 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5381 
5382 	if (DRIVER_SUSPENDED(ha)) {
5383 		EL(ha, "failed, LOOP_NOT_READY\n");
5384 		cmd->Status = EXT_STATUS_BUSY;
5385 		cmd->ResponseLen = 0;
5386 		return (QL_FUNCTION_SUSPENDED);
5387 	}
5388 
5389 	rval = ql_reset_link_status(ha);
5390 	if (rval != QL_SUCCESS) {
5391 		EL(ha, "failed, reset_link_status_mbx=%xh\n", rval);
5392 		cmd->Status = EXT_STATUS_MAILBOX;
5393 		cmd->DetailStatus = rval;
5394 		cmd->ResponseLen = 0;
5395 	}
5396 
5397 	TASK_DAEMON_LOCK(ha);
5398 	xp->IosRequested = 0;
5399 	xp->BytesRequested = 0;
5400 	xp->IOInputRequests = 0;
5401 	xp->IOOutputRequests = 0;
5402 	xp->IOControlRequests = 0;
5403 	xp->IOInputMByteCnt = 0;
5404 	xp->IOOutputMByteCnt = 0;
5405 	xp->IOOutputByteCnt = 0;
5406 	xp->IOInputByteCnt = 0;
5407 	TASK_DAEMON_UNLOCK(ha);
5408 
5409 	INTR_LOCK(ha);
5410 	xp->ControllerErrorCount = 0;
5411 	xp->DeviceErrorCount = 0;
5412 	xp->TotalLipResets = 0;
5413 	xp->TotalInterrupts = 0;
5414 	INTR_UNLOCK(ha);
5415 
5416 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5417 
5418 	return (rval);
5419 }
5420 
5421 /*
5422  * ql_get_statistics
5423  *	Performs EXT_SC_GET_STATISTICS subcommand. of EXT_CC_GET_DATA.
5424  *
5425  * Input:
5426  *	ha:	adapter state pointer.
5427  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5428  *	mode:	flags.
5429  *
5430  * Returns:
5431  *	None, request status indicated in cmd->Status.
5432  *
5433  * Context:
5434  *	Kernel context.
5435  */
5436 static void
5437 ql_get_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5438 {
5439 	EXT_HBA_PORT_STAT	ps = {0};
5440 	ql_link_stats_t		*ls;
5441 	int			rval;
5442 	ql_xioctl_t		*xp = ha->xioctl;
5443 	int			retry = 10;
5444 
5445 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5446 
5447 	while (ha->task_daemon_flags &
5448 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5449 		ql_delay(ha, 10000000);	/* 10 second delay */
5450 
5451 		retry--;
5452 
5453 		if (retry == 0) { /* effectively 100 seconds */
5454 			EL(ha, "failed, LOOP_NOT_READY\n");
5455 			cmd->Status = EXT_STATUS_BUSY;
5456 			cmd->ResponseLen = 0;
5457 			return;
5458 		}
5459 	}
5460 
5461 	/* Allocate memory for command. */
5462 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5463 	if (ls == NULL) {
5464 		EL(ha, "failed, kmem_zalloc\n");
5465 		cmd->Status = EXT_STATUS_NO_MEMORY;
5466 		cmd->ResponseLen = 0;
5467 		return;
5468 	}
5469 
5470 	/*
5471 	 * I think these are supposed to be port statistics
5472 	 * the loop ID or port ID should be in cmd->Instance.
5473 	 */
5474 	rval = ql_get_status_counts(ha, (uint16_t)
5475 	    (ha->task_daemon_flags & LOOP_DOWN ? 0xFF : ha->loop_id),
5476 	    sizeof (ql_link_stats_t), (caddr_t)ls, 0);
5477 	if (rval != QL_SUCCESS) {
5478 		EL(ha, "failed, get_link_status=%xh, id=%xh\n", rval,
5479 		    ha->loop_id);
5480 		cmd->Status = EXT_STATUS_MAILBOX;
5481 		cmd->DetailStatus = rval;
5482 		cmd->ResponseLen = 0;
5483 	} else {
5484 		ps.ControllerErrorCount = xp->ControllerErrorCount;
5485 		ps.DeviceErrorCount = xp->DeviceErrorCount;
5486 		ps.IoCount = (uint32_t)(xp->IOInputRequests +
5487 		    xp->IOOutputRequests + xp->IOControlRequests);
5488 		ps.MBytesCount = (uint32_t)(xp->IOInputMByteCnt +
5489 		    xp->IOOutputMByteCnt);
5490 		ps.LipResetCount = xp->TotalLipResets;
5491 		ps.InterruptCount = xp->TotalInterrupts;
5492 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5493 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5494 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5495 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5496 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5497 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5498 
5499 		rval = ddi_copyout((void *)(uintptr_t)&ps,
5500 		    (void *)(uintptr_t)cmd->ResponseAdr,
5501 		    sizeof (EXT_HBA_PORT_STAT), mode);
5502 		if (rval != 0) {
5503 			EL(ha, "failed, ddi_copyout\n");
5504 			cmd->Status = EXT_STATUS_COPY_ERR;
5505 			cmd->ResponseLen = 0;
5506 		} else {
5507 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5508 		}
5509 	}
5510 
5511 	kmem_free(ls, sizeof (ql_link_stats_t));
5512 
5513 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5514 }
5515 
5516 /*
5517  * ql_get_statistics_fc
5518  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5519  *
5520  * Input:
5521  *	ha:	adapter state pointer.
5522  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5523  *	mode:	flags.
5524  *
5525  * Returns:
5526  *	None, request status indicated in cmd->Status.
5527  *
5528  * Context:
5529  *	Kernel context.
5530  */
5531 static void
5532 ql_get_statistics_fc(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5533 {
5534 	EXT_HBA_PORT_STAT	ps = {0};
5535 	ql_link_stats_t		*ls;
5536 	int			rval;
5537 	uint16_t		qlnt;
5538 	EXT_DEST_ADDR		pextdestaddr;
5539 	uint8_t			*name;
5540 	ql_tgt_t		*tq = NULL;
5541 	int			retry = 10;
5542 
5543 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5544 
5545 	if (ddi_copyin((const void *)(uintptr_t)cmd->RequestAdr,
5546 	    (void *)&pextdestaddr,
5547 	    sizeof (EXT_DEST_ADDR), mode) != 0) {
5548 		EL(ha, "failed, ddi_copyin\n");
5549 		cmd->Status = EXT_STATUS_COPY_ERR;
5550 		cmd->ResponseLen = 0;
5551 		return;
5552 	}
5553 
5554 	qlnt = QLNT_PORT;
5555 	name = pextdestaddr.DestAddr.WWPN;
5556 
5557 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
5558 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
5559 	    name[5], name[6], name[7]);
5560 
5561 	tq = ql_find_port(ha, name, qlnt);
5562 
5563 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
5564 		EL(ha, "failed, fc_port not found\n");
5565 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
5566 		cmd->ResponseLen = 0;
5567 		return;
5568 	}
5569 
5570 	while (ha->task_daemon_flags &
5571 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE  | DRIVER_STALL)) {
5572 		ql_delay(ha, 10000000);	/* 10 second delay */
5573 
5574 		retry--;
5575 
5576 		if (retry == 0) { /* effectively 100 seconds */
5577 			EL(ha, "failed, LOOP_NOT_READY\n");
5578 			cmd->Status = EXT_STATUS_BUSY;
5579 			cmd->ResponseLen = 0;
5580 			return;
5581 		}
5582 	}
5583 
5584 	/* Allocate memory for command. */
5585 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5586 	if (ls == NULL) {
5587 		EL(ha, "failed, kmem_zalloc\n");
5588 		cmd->Status = EXT_STATUS_NO_MEMORY;
5589 		cmd->ResponseLen = 0;
5590 		return;
5591 	}
5592 
5593 	rval = ql_get_link_status(ha, tq->loop_id, sizeof (ql_link_stats_t),
5594 	    (caddr_t)ls, 0);
5595 	if (rval != QL_SUCCESS) {
5596 		EL(ha, "failed, get_link_status=%xh, d_id=%xh\n", rval,
5597 		    tq->d_id.b24);
5598 		cmd->Status = EXT_STATUS_MAILBOX;
5599 		cmd->DetailStatus = rval;
5600 		cmd->ResponseLen = 0;
5601 	} else {
5602 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5603 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5604 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5605 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5606 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5607 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5608 
5609 		rval = ddi_copyout((void *)(uintptr_t)&ps,
5610 		    (void *)(uintptr_t)cmd->ResponseAdr,
5611 		    sizeof (EXT_HBA_PORT_STAT), mode);
5612 
5613 		if (rval != 0) {
5614 			EL(ha, "failed, ddi_copyout\n");
5615 			cmd->Status = EXT_STATUS_COPY_ERR;
5616 			cmd->ResponseLen = 0;
5617 		} else {
5618 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5619 		}
5620 	}
5621 
5622 	kmem_free(ls, sizeof (ql_link_stats_t));
5623 
5624 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5625 }
5626 
5627 /*
5628  * ql_get_statistics_fc4
5629  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5630  *
5631  * Input:
5632  *	ha:	adapter state pointer.
5633  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5634  *	mode:	flags.
5635  *
5636  * Returns:
5637  *	None, request status indicated in cmd->Status.
5638  *
5639  * Context:
5640  *	Kernel context.
5641  */
5642 static void
5643 ql_get_statistics_fc4(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5644 {
5645 	uint32_t		rval;
5646 	EXT_HBA_FC4STATISTICS	fc4stats = {0};
5647 	ql_xioctl_t		*xp = ha->xioctl;
5648 
5649 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5650 
5651 	fc4stats.InputRequests = xp->IOInputRequests;
5652 	fc4stats.OutputRequests = xp->IOOutputRequests;
5653 	fc4stats.ControlRequests = xp->IOControlRequests;
5654 	fc4stats.InputMegabytes = xp->IOInputMByteCnt;
5655 	fc4stats.OutputMegabytes = xp->IOOutputMByteCnt;
5656 
5657 	rval = ddi_copyout((void *)&fc4stats,
5658 	    (void *)(uintptr_t)cmd->ResponseAdr,
5659 	    sizeof (EXT_HBA_FC4STATISTICS), mode);
5660 
5661 	if (rval != 0) {
5662 		EL(ha, "failed, ddi_copyout\n");
5663 		cmd->Status = EXT_STATUS_COPY_ERR;
5664 		cmd->ResponseLen = 0;
5665 	} else {
5666 		cmd->ResponseLen = sizeof (EXT_HBA_FC4STATISTICS);
5667 	}
5668 
5669 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5670 }
5671 
5672 /*
5673  * ql_set_led_state
5674  *	Performs EXT_SET_BEACON_STATE subcommand of EXT_CC_SET_DATA.
5675  *
5676  * Input:
5677  *	ha:	adapter state pointer.
5678  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5679  *	mode:	flags.
5680  *
5681  * Returns:
5682  *	None, request status indicated in cmd->Status.
5683  *
5684  * Context:
5685  *	Kernel context.
5686  */
5687 static void
5688 ql_set_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5689 {
5690 	EXT_BEACON_CONTROL	bstate;
5691 	uint32_t		rval;
5692 	ql_xioctl_t		*xp = ha->xioctl;
5693 
5694 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5695 
5696 	if (cmd->RequestLen < sizeof (EXT_BEACON_CONTROL)) {
5697 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5698 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5699 		EL(ha, "exiting - failed, RequestLen < EXT_BEACON_CONTROL,"
5700 		    " Len=%xh\n", cmd->RequestLen);
5701 		cmd->ResponseLen = 0;
5702 		return;
5703 	}
5704 
5705 	if (ha->device_id < 0x2300) {
5706 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5707 		cmd->DetailStatus = 0;
5708 		EL(ha, "exiting - failed, Invalid function for HBA model\n");
5709 		cmd->ResponseLen = 0;
5710 		return;
5711 	}
5712 
5713 	rval = ddi_copyin((void*)(uintptr_t)(cmd->RequestAdr), &bstate,
5714 	    cmd->RequestLen, mode);
5715 
5716 	if (rval != 0) {
5717 		cmd->Status = EXT_STATUS_COPY_ERR;
5718 		EL(ha, "exiting -  failed, ddi_copyin\n");
5719 		return;
5720 	}
5721 
5722 	switch (bstate.State) {
5723 	case EXT_DEF_GRN_BLINK_OFF:	/* turn beacon off */
5724 		if (xp->ledstate.BeaconState == BEACON_OFF) {
5725 			/* not quite an error -- LED state is already off */
5726 			cmd->Status = EXT_STATUS_OK;
5727 			EL(ha, "LED off request -- LED is already off\n");
5728 			break;
5729 		}
5730 
5731 		xp->ledstate.BeaconState = BEACON_OFF;
5732 		xp->ledstate.LEDflags = LED_ALL_OFF;
5733 
5734 		if ((rval = ql_wrapup_led(ha)) != QL_SUCCESS) {
5735 			cmd->Status = EXT_STATUS_MAILBOX;
5736 		} else {
5737 			cmd->Status = EXT_STATUS_OK;
5738 		}
5739 		break;
5740 
5741 	case EXT_DEF_GRN_BLINK_ON:	/* turn beacon on */
5742 		if (xp->ledstate.BeaconState == BEACON_ON) {
5743 			/* not quite an error -- LED state is already on */
5744 			cmd->Status = EXT_STATUS_OK;
5745 			EL(ha, "LED on request  - LED is already on\n");
5746 			break;
5747 		}
5748 
5749 		if ((rval = ql_setup_led(ha)) != QL_SUCCESS) {
5750 			cmd->Status = EXT_STATUS_MAILBOX;
5751 			break;
5752 		}
5753 
5754 		if (CFG_IST(ha, CFG_CTRL_2425)) {
5755 			xp->ledstate.LEDflags = LED_YELLOW_24 | LED_AMBER_24;
5756 		} else {
5757 			xp->ledstate.LEDflags = LED_GREEN;
5758 		}
5759 		xp->ledstate.BeaconState = BEACON_ON;
5760 
5761 		cmd->Status = EXT_STATUS_OK;
5762 		break;
5763 	default:
5764 		cmd->Status = EXT_STATUS_ERR;
5765 		EL(ha, "failed, unknown state request %xh\n", bstate.State);
5766 		break;
5767 	}
5768 
5769 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5770 }
5771 
5772 /*
5773  * ql_get_led_state
5774  *	Performs EXT_GET_BEACON_STATE subcommand of EXT_CC_GET_DATA.
5775  *
5776  * Input:
5777  *	ha:	adapter state pointer.
5778  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5779  *	mode:	flags.
5780  *
5781  * Returns:
5782  *	None, request status indicated in cmd->Status.
5783  *
5784  * Context:
5785  *	Kernel context.
5786  */
5787 static void
5788 ql_get_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5789 {
5790 	EXT_BEACON_CONTROL	bstate = {0};
5791 	uint32_t		rval;
5792 	ql_xioctl_t		*xp = ha->xioctl;
5793 
5794 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
5795 
5796 	if (cmd->ResponseLen < sizeof (EXT_BEACON_CONTROL)) {
5797 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5798 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5799 		EL(ha, "exiting - failed, ResponseLen < EXT_BEACON_CONTROL,"
5800 		    "Len=%xh\n", cmd->ResponseLen);
5801 		cmd->ResponseLen = 0;
5802 		return;
5803 	}
5804 
5805 	if (ha->device_id < 0x2300) {
5806 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5807 		cmd->DetailStatus = 0;
5808 		EL(ha, "exiting - failed, Invalid function for HBA model\n");
5809 		cmd->ResponseLen = 0;
5810 		return;
5811 	}
5812 
5813 	if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
5814 		cmd->Status = EXT_STATUS_BUSY;
5815 		EL(ha, "exiting -  failed, isp abort active\n");
5816 		cmd->ResponseLen = 0;
5817 		return;
5818 	}
5819 
5820 	/* inform the user of the current beacon state (off or on) */
5821 	bstate.State = xp->ledstate.BeaconState;
5822 
5823 	rval = ddi_copyout((void *)&bstate,
5824 	    (void *)(uintptr_t)cmd->ResponseAdr,
5825 	    sizeof (EXT_BEACON_CONTROL), mode);
5826 
5827 	if (rval != 0) {
5828 		EL(ha, "failed, ddi_copyout\n");
5829 		cmd->Status = EXT_STATUS_COPY_ERR;
5830 		cmd->ResponseLen = 0;
5831 	} else {
5832 		cmd->Status = EXT_STATUS_OK;
5833 		cmd->ResponseLen = sizeof (EXT_BEACON_CONTROL);
5834 	}
5835 
5836 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5837 }
5838 
5839 /*
5840  * ql_blink_led
5841  *	Determine the next state of the LED and drive it
5842  *
5843  * Input:
5844  *	ha:	adapter state pointer.
5845  *
5846  * Context:
5847  *	Interrupt context.
5848  */
5849 void
5850 ql_blink_led(ql_adapter_state_t *ha)
5851 {
5852 	uint32_t		nextstate;
5853 	ql_xioctl_t		*xp = ha->xioctl;
5854 
5855 	QL_PRINT_9(CE_CONT, "(%d): entering\n", ha->instance);
5856 
5857 	if (xp->ledstate.BeaconState == BEACON_ON) {
5858 		/* determine the next led state */
5859 		if (CFG_IST(ha, CFG_CTRL_2425)) {
5860 			nextstate = (xp->ledstate.LEDflags) &
5861 			    (~(RD32_IO_REG(ha, gpiod)));
5862 		} else {
5863 			nextstate = (xp->ledstate.LEDflags) &
5864 			    (~(RD16_IO_REG(ha, gpiod)));
5865 		}
5866 
5867 		/* turn the led on or off */
5868 		ql_drive_led(ha, nextstate);
5869 	}
5870 
5871 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5872 }
5873 
5874 /*
5875  * ql_drive_led
5876  *	drive the led's as determined by LEDflags
5877  *
5878  * Input:
5879  *	ha:		adapter state pointer.
5880  *	LEDflags:	LED flags
5881  *
5882  * Context:
5883  *	Kernel/Interrupt context.
5884  */
5885 static void
5886 ql_drive_led(ql_adapter_state_t *ha, uint32_t LEDflags)
5887 {
5888 
5889 	QL_PRINT_9(CE_CONT, "(%d): entering\n", ha->instance);
5890 
5891 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
5892 
5893 		uint16_t	gpio_enable, gpio_data;
5894 
5895 		/* setup to send new data */
5896 		gpio_enable = (uint16_t)RD16_IO_REG(ha, gpioe);
5897 		gpio_enable = (uint16_t)(gpio_enable | LED_MASK);
5898 		WRT16_IO_REG(ha, gpioe, gpio_enable);
5899 
5900 		/* read current data and clear out old led data */
5901 		gpio_data = (uint16_t)RD16_IO_REG(ha, gpiod);
5902 		gpio_data = (uint16_t)(gpio_data & ~LED_MASK);
5903 
5904 		/* set in the new led data. */
5905 		gpio_data = (uint16_t)(gpio_data | LEDflags);
5906 
5907 		/* write out the new led data */
5908 		WRT16_IO_REG(ha, gpiod, gpio_data);
5909 
5910 	} else if (CFG_IST(ha, CFG_CTRL_2425)) {
5911 
5912 		uint32_t	gpio_data;
5913 
5914 		/* setup to send new data */
5915 		gpio_data = RD32_IO_REG(ha, gpiod);
5916 		gpio_data |= LED_MASK_UPDATE_24;
5917 		WRT32_IO_REG(ha, gpiod, gpio_data);
5918 
5919 		/* read current data and clear out old led data */
5920 		gpio_data = RD32_IO_REG(ha, gpiod);
5921 		gpio_data &= ~LED_MASK_COLORS_24;
5922 
5923 		/* set in the new led data */
5924 		gpio_data |= LEDflags;
5925 
5926 		/* write out the new led data */
5927 		WRT32_IO_REG(ha, gpiod, gpio_data);
5928 
5929 	} else {
5930 		EL(ha, "unsupported HBA: %xh", ha->device_id);
5931 	}
5932 
5933 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5934 }
5935 
5936 /*
5937  * ql_setup_led
5938  *	Setup LED for driver control
5939  *
5940  * Input:
5941  *	ha:	adapter state pointer.
5942  *
5943  * Context:
5944  *	Kernel/Interrupt context.
5945  */
5946 static uint32_t
5947 ql_setup_led(ql_adapter_state_t *ha)
5948 {
5949 	uint32_t	rval;
5950 	ql_mbx_data_t	mr;
5951 
5952 	QL_PRINT_9(CE_CONT, "(%d): entering\n", ha->instance);
5953 
5954 	/* decouple the LED control from the fw */
5955 	rval = ql_get_firmware_option(ha, &mr);
5956 	if (rval != QL_SUCCESS) {
5957 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
5958 		return (rval);
5959 	}
5960 
5961 	/* set the appropriate options */
5962 	mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_GPIO);
5963 
5964 	/* send it back to the firmware */
5965 	rval = ql_set_firmware_option(ha, &mr);
5966 	if (rval != QL_SUCCESS) {
5967 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
5968 		return (rval);
5969 	}
5970 
5971 	/* initally, turn the LED's off */
5972 	ql_drive_led(ha, LED_ALL_OFF);
5973 
5974 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
5975 
5976 	return (rval);
5977 }
5978 
5979 /*
5980  * ql_wrapup_led
5981  *	Return LED control to the firmware
5982  *
5983  * Input:
5984  *	ha:	adapter state pointer.
5985  *
5986  * Context:
5987  *	Kernel/Interrupt context.
5988  */
5989 static uint32_t
5990 ql_wrapup_led(ql_adapter_state_t *ha)
5991 {
5992 	uint32_t	rval;
5993 	ql_mbx_data_t	mr;
5994 
5995 	QL_PRINT_9(CE_CONT, "(%d): entering\n", ha->instance);
5996 
5997 	/* Turn all LED's off */
5998 	ql_drive_led(ha, LED_ALL_OFF);
5999 
6000 	if (CFG_IST(ha, CFG_CTRL_2425)) {
6001 
6002 		uint32_t	gpio_data;
6003 
6004 		/* disable the LED update mask */
6005 		gpio_data = RD32_IO_REG(ha, gpiod);
6006 		gpio_data &= ~LED_MASK_UPDATE_24;
6007 
6008 		/* write out the data */
6009 		WRT32_IO_REG(ha, gpiod, gpio_data);
6010 	}
6011 
6012 	/* give LED control back to the f/w */
6013 	rval = ql_get_firmware_option(ha, &mr);
6014 	if (rval != QL_SUCCESS) {
6015 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
6016 		return (rval);
6017 	}
6018 
6019 	mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_GPIO);
6020 
6021 	rval = ql_set_firmware_option(ha, &mr);
6022 	if (rval != QL_SUCCESS) {
6023 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
6024 		return (rval);
6025 	}
6026 
6027 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
6028 
6029 	return (rval);
6030 }
6031 
6032 /*
6033  * ql_get_port_summary
6034  *	Performs EXT_SC_GET_PORT_SUMMARY subcommand. of EXT_CC_GET_DATA.
6035  *
6036  *	The EXT_IOCTL->RequestAdr points to a single
6037  *	UINT32 which identifies the device type.
6038  *
6039  * Input:
6040  *	ha:	adapter state pointer.
6041  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6042  *	mode:	flags.
6043  *
6044  * Returns:
6045  *	None, request status indicated in cmd->Status.
6046  *
6047  * Context:
6048  *	Kernel context.
6049  */
6050 static void
6051 ql_get_port_summary(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6052 {
6053 	EXT_DEVICEDATA		dd = {0};
6054 	EXT_DEVICEDATA		*uddp;
6055 	ql_link_t		*link;
6056 	ql_tgt_t		*tq;
6057 	uint32_t		rlen, dev_type, index;
6058 	int			rval = 0;
6059 	EXT_DEVICEDATAENTRY	*uddep, *ddep;
6060 
6061 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
6062 
6063 	ddep = &dd.EntryList[0];
6064 
6065 	/*
6066 	 * Get the type of device the requestor is looking for.
6067 	 *
6068 	 * We ignore this for now.
6069 	 */
6070 	rval = ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6071 	    (void *)&dev_type,
6072 	    sizeof (dev_type), mode);
6073 	if (rval != 0) {
6074 		cmd->Status = EXT_STATUS_COPY_ERR;
6075 		cmd->ResponseLen = 0;
6076 		EL(ha, "failed, ddi_copyin\n");
6077 		return;
6078 	}
6079 	/*
6080 	 * Count the number of entries to be returned. Count devices
6081 	 * that are offlline, but have been persistently bound.
6082 	 */
6083 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6084 		for (link = ha->dev[index].first; link != NULL;
6085 		    link = link->next) {
6086 			tq = link->base_address;
6087 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6088 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6089 				continue;	/* Skip this one */
6090 			}
6091 			dd.TotalDevices++;
6092 		}
6093 	}
6094 	/*
6095 	 * Compute the number of entries that can be returned
6096 	 * based upon the size of caller's response buffer.
6097 	 */
6098 	dd.ReturnListEntryCount = 0;
6099 	if (dd.TotalDevices == 0) {
6100 		rlen = sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY);
6101 	} else {
6102 		rlen = (uint32_t)(sizeof (EXT_DEVICEDATA) +
6103 		    (sizeof (EXT_DEVICEDATAENTRY) * (dd.TotalDevices - 1)));
6104 	}
6105 	if (rlen > cmd->ResponseLen) {
6106 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6107 		cmd->DetailStatus = rlen;
6108 		EL(ha, "failed, rlen > ResponseLen, rlen=%d, Len=%d\n",
6109 		    rlen, cmd->ResponseLen);
6110 		cmd->ResponseLen = 0;
6111 		return;
6112 	}
6113 	cmd->ResponseLen = 0;
6114 	uddp = (EXT_DEVICEDATA *)(uintptr_t)cmd->ResponseAdr;
6115 	uddep = &uddp->EntryList[0];
6116 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6117 		for (link = ha->dev[index].first; link != NULL;
6118 		    link = link->next) {
6119 			tq = link->base_address;
6120 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6121 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6122 				continue;	/* Skip this one */
6123 			}
6124 
6125 			bzero((void *)ddep, sizeof (EXT_DEVICEDATAENTRY));
6126 
6127 			bcopy(tq->node_name, ddep->NodeWWN, 8);
6128 			bcopy(tq->port_name, ddep->PortWWN, 8);
6129 
6130 			ddep->PortID[0] = tq->d_id.b.domain;
6131 			ddep->PortID[1] = tq->d_id.b.area;
6132 			ddep->PortID[2] = tq->d_id.b.al_pa;
6133 
6134 			bcopy(tq->port_name,
6135 			    (caddr_t)&ddep->TargetAddress.Target, 8);
6136 
6137 			ddep->DeviceFlags = tq->flags;
6138 			ddep->LoopID = tq->loop_id;
6139 			QL_PRINT_9(CE_CONT, "(%d): Tgt=%lld, loop=%xh, "
6140 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x, "
6141 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6142 			    ha->instance, ddep->TargetAddress.Target,
6143 			    ddep->LoopID, ddep->NodeWWN[0], ddep->NodeWWN[1],
6144 			    ddep->NodeWWN[2], ddep->NodeWWN[3],
6145 			    ddep->NodeWWN[4], ddep->NodeWWN[5],
6146 			    ddep->NodeWWN[6], ddep->NodeWWN[7],
6147 			    ddep->PortWWN[0], ddep->PortWWN[1],
6148 			    ddep->PortWWN[2], ddep->PortWWN[3],
6149 			    ddep->PortWWN[4], ddep->PortWWN[5],
6150 			    ddep->PortWWN[6], ddep->PortWWN[7]);
6151 			rval = ddi_copyout((void *)ddep, (void *)uddep,
6152 			    sizeof (EXT_DEVICEDATAENTRY), mode);
6153 
6154 			if (rval != 0) {
6155 				cmd->Status = EXT_STATUS_COPY_ERR;
6156 				cmd->ResponseLen = 0;
6157 				EL(ha, "failed, ddi_copyout\n");
6158 				break;
6159 			}
6160 			dd.ReturnListEntryCount++;
6161 			uddep++;
6162 			cmd->ResponseLen += (uint32_t)
6163 			    sizeof (EXT_DEVICEDATAENTRY);
6164 		}
6165 	}
6166 	rval = ddi_copyout((void *)&dd, (void *)uddp,
6167 	    sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY), mode);
6168 
6169 	if (rval != 0) {
6170 		cmd->Status = EXT_STATUS_COPY_ERR;
6171 		cmd->ResponseLen = 0;
6172 		EL(ha, "failed, ddi_copyout-2\n");
6173 	} else {
6174 		cmd->ResponseLen += (uint32_t)sizeof (EXT_DEVICEDATAENTRY);
6175 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
6176 	}
6177 }
6178 
6179 /*
6180  * ql_get_target_id
6181  *	Performs EXT_SC_GET_TARGET_ID subcommand. of EXT_CC_GET_DATA.
6182  *
6183  * Input:
6184  *	ha:	adapter state pointer.
6185  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6186  *	mode:	flags.
6187  *
6188  * Returns:
6189  *	None, request status indicated in cmd->Status.
6190  *
6191  * Context:
6192  *	Kernel context.
6193  */
6194 static void
6195 ql_get_target_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6196 {
6197 	uint32_t		rval;
6198 	uint16_t		qlnt;
6199 	EXT_DEST_ADDR		extdestaddr = {0};
6200 	uint8_t			*name;
6201 	uint8_t			wwpn[EXT_DEF_WWN_NAME_SIZE];
6202 	ql_tgt_t		*tq;
6203 
6204 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
6205 
6206 	if (ddi_copyin((const void *)(uintptr_t)cmd->RequestAdr, wwpn,
6207 	    sizeof (EXT_DEST_ADDR), mode) != 0) {
6208 		EL(ha, "failed, ddi_copyin\n");
6209 		cmd->Status = EXT_STATUS_COPY_ERR;
6210 		cmd->ResponseLen = 0;
6211 		return;
6212 	}
6213 
6214 	qlnt = QLNT_PORT;
6215 	name = wwpn;
6216 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6217 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
6218 	    name[5], name[6], name[7]);
6219 
6220 	tq = ql_find_port(ha, name, qlnt);
6221 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
6222 		EL(ha, "failed, fc_port not found\n");
6223 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6224 		cmd->ResponseLen = 0;
6225 		return;
6226 	}
6227 
6228 	bcopy(tq->port_name, (caddr_t)&extdestaddr.DestAddr.ScsiAddr.Target, 8);
6229 
6230 	rval = ddi_copyout((void *)&extdestaddr,
6231 	    (void *)(uintptr_t)cmd->ResponseAdr,
6232 	    sizeof (EXT_DEST_ADDR), mode);
6233 	if (rval != 0) {
6234 		EL(ha, "failed, ddi_copyout\n");
6235 		cmd->Status = EXT_STATUS_COPY_ERR;
6236 		cmd->ResponseLen = 0;
6237 	}
6238 
6239 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
6240 }
6241 
6242 /*
6243  * ql_setup_fcache
6244  *	Populates selected flash sections into the cache
6245  *
6246  * Input:
6247  *	ha = adapter state pointer.
6248  *
6249  * Returns:
6250  *
6251  * Context:
6252  *	Kernel context.
6253  *
6254  * Note:
6255  *	Driver must be in stalled state prior to entering or
6256  *	add code to this function prior to calling ql_setup_flash()
6257  */
6258 void
6259 ql_setup_fcache(ql_adapter_state_t *ha)
6260 {
6261 	int		rval;
6262 	uint32_t	freadpos = 0;
6263 	uint32_t	fw_done = 0;
6264 	ql_fcache_t	*head = NULL;
6265 	ql_fcache_t	*tail = NULL;
6266 	ql_fcache_t	*ftmp;
6267 
6268 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6269 
6270 	CACHE_LOCK(ha);
6271 
6272 	/* If we already have populated it, rtn */
6273 	if (ha->fcache != NULL) {
6274 		CACHE_UNLOCK(ha);
6275 		EL(ha, "buffer already populated\n");
6276 		return;
6277 	}
6278 
6279 	if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) {
6280 		CACHE_UNLOCK(ha);
6281 		EL(ha, "unable to setup flash; rval=%xh\n", rval);
6282 		return;
6283 	}
6284 
6285 	while (freadpos != 0xffffffff) {
6286 
6287 		/* Allocate & populate this node */
6288 
6289 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6290 			EL(ha, "node alloc failed\n");
6291 			rval = QL_FUNCTION_FAILED;
6292 			break;
6293 		}
6294 
6295 		/* link in the new node */
6296 		if (head == NULL) {
6297 			head = tail = ftmp;
6298 		} else {
6299 			tail->next = ftmp;
6300 			tail = ftmp;
6301 		}
6302 
6303 		/* Do the firmware node first for 24xx/25xx's */
6304 		if (fw_done == 0) {
6305 			if (CFG_IST(ha, CFG_CTRL_2425)) {
6306 				freadpos = FLASH_24XX_FIRMWARE_ADDR;
6307 			}
6308 			fw_done = 1;
6309 		}
6310 
6311 		if ((rval = ql_dump_fcode(ha, ftmp->buf, FBUFSIZE,
6312 		    freadpos)) != QL_SUCCESS) {
6313 			EL(ha, "failed, 24xx dump_fcode"
6314 			    " pos=%xh rval=%xh\n", freadpos, rval);
6315 			rval = QL_FUNCTION_FAILED;
6316 			break;
6317 		}
6318 
6319 		/* checkout the pci data / format */
6320 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6321 			EL(ha, "flash header incorrect\n");
6322 			rval = QL_FUNCTION_FAILED;
6323 			break;
6324 		}
6325 	}
6326 
6327 	if (rval != QL_SUCCESS) {
6328 		/* release all resources we have */
6329 		ftmp = head;
6330 		while (ftmp != NULL) {
6331 			tail = ftmp->next;
6332 			kmem_free(ftmp->buf, FBUFSIZE);
6333 			kmem_free(ftmp, sizeof (ql_fcache_t));
6334 			ftmp = tail;
6335 		}
6336 
6337 		EL(ha, "failed, exiting\n");
6338 	} else {
6339 		ha->fcache = head;
6340 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6341 	}
6342 	CACHE_UNLOCK(ha);
6343 }
6344 
6345 /*
6346  * ql_update_fcache
6347  *	re-populates updated flash into the fcache. If
6348  *	fcache does not exist (e.g., flash was empty/invalid on
6349  *	boot), this routine will create and the populate it.
6350  *
6351  * Input:
6352  *	ha	= adapter state pointer.
6353  *	*bpf 	= Pointer to flash buffer.
6354  *	bsize	= Size of flash buffer.
6355  *
6356  * Returns:
6357  *
6358  * Context:
6359  *	Kernel context.
6360  */
6361 static void
6362 ql_update_fcache(ql_adapter_state_t *ha, uint8_t *bfp, uint32_t bsize)
6363 {
6364 	int		rval = QL_SUCCESS;
6365 	uint32_t	freadpos = 0;
6366 	uint32_t	fw_done = 0;
6367 	ql_fcache_t	*head = NULL;
6368 	ql_fcache_t	*tail = NULL;
6369 	ql_fcache_t	*ftmp;
6370 
6371 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6372 
6373 	while (freadpos != 0xffffffff) {
6374 
6375 		/* Allocate & populate this node */
6376 
6377 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6378 			EL(ha, "node alloc failed\n");
6379 			rval = QL_FUNCTION_FAILED;
6380 			break;
6381 		}
6382 
6383 		/* link in the new node */
6384 		if (head == NULL) {
6385 			head = tail = ftmp;
6386 		} else {
6387 			tail->next = ftmp;
6388 			tail = ftmp;
6389 		}
6390 
6391 		/* Do the firmware node first for 24xx's */
6392 		if (fw_done == 0) {
6393 			if (CFG_IST(ha, CFG_CTRL_2425)) {
6394 				freadpos = FLASH_24XX_FIRMWARE_ADDR;
6395 			}
6396 			fw_done = 1;
6397 		}
6398 
6399 		/* read in first FBUFSIZE bytes of this flash section */
6400 		if (freadpos+FBUFSIZE > bsize) {
6401 			EL(ha, "passed buffer too small; fr=%xh, bsize=%xh\n",
6402 			    freadpos, bsize);
6403 			rval = QL_FUNCTION_FAILED;
6404 			break;
6405 		}
6406 		bcopy(bfp+freadpos, ftmp->buf, FBUFSIZE);
6407 
6408 		/* checkout the pci data / format */
6409 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6410 			EL(ha, "flash header incorrect\n");
6411 			rval = QL_FUNCTION_FAILED;
6412 			break;
6413 		}
6414 	}
6415 
6416 	if (rval != QL_SUCCESS) {
6417 		/*
6418 		 * release all resources we have
6419 		 */
6420 		ql_fcache_rel(head);
6421 		EL(ha, "failed, exiting\n");
6422 	} else {
6423 		/*
6424 		 * Release previous fcache resources and update with new
6425 		 */
6426 		CACHE_LOCK(ha);
6427 		ql_fcache_rel(ha->fcache);
6428 		ha->fcache = head;
6429 		CACHE_UNLOCK(ha);
6430 
6431 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6432 	}
6433 }
6434 
6435 /*
6436  * ql_setup_fnode
6437  *	Allocates fcache node
6438  *
6439  * Input:
6440  *	ha = adapter state pointer.
6441  *	node = point to allocated fcache node (NULL = failed)
6442  *
6443  * Returns:
6444  *
6445  * Context:
6446  *	Kernel context.
6447  *
6448  * Note:
6449  *	Driver must be in stalled state prior to entering or
6450  *	add code to this function prior to calling ql_setup_flash()
6451  */
6452 static ql_fcache_t *
6453 ql_setup_fnode(ql_adapter_state_t *ha)
6454 {
6455 	ql_fcache_t	*fnode = NULL;
6456 
6457 	if ((fnode = (ql_fcache_t *)(kmem_zalloc(sizeof (ql_fcache_t),
6458 	    KM_SLEEP))) == NULL) {
6459 		EL(ha, "fnode alloc failed\n");
6460 		fnode = NULL;
6461 	} else if ((fnode->buf = (uint8_t *)(kmem_zalloc(FBUFSIZE,
6462 	    KM_SLEEP))) == NULL) {
6463 		EL(ha, "buf alloc failed\n");
6464 		kmem_free(fnode, sizeof (ql_fcache_t));
6465 		fnode = NULL;
6466 	} else {
6467 		fnode->buflen = FBUFSIZE;
6468 	}
6469 
6470 	return (fnode);
6471 }
6472 
6473 /*
6474  * ql_fcache_rel
6475  *	Releases the fcache resources
6476  *
6477  * Input:
6478  *	ha	= adapter state pointer.
6479  *	head	= Pointer to fcache linked list
6480  *
6481  * Returns:
6482  *
6483  * Context:
6484  *	Kernel context.
6485  *
6486  */
6487 void
6488 ql_fcache_rel(ql_fcache_t *head)
6489 {
6490 	ql_fcache_t	*ftmp = head;
6491 	ql_fcache_t	*tail;
6492 
6493 	/* release all resources we have */
6494 	while (ftmp != NULL) {
6495 		tail = ftmp->next;
6496 		kmem_free(ftmp->buf, FBUFSIZE);
6497 		kmem_free(ftmp, sizeof (ql_fcache_t));
6498 		ftmp = tail;
6499 	}
6500 }
6501 
6502 /*
6503  * ql_get_fbuf
6504  *	Search the fcache list for the type specified
6505  *
6506  * Input:
6507  *	fptr	= Pointer to fcache linked list
6508  *	ftype	= Type of image to be returned.
6509  *
6510  * Returns:
6511  *	Pointer to ql_fcache_t.
6512  *	NULL means not found.
6513  *
6514  * Context:
6515  *	Kernel context.
6516  *
6517  *
6518  */
6519 ql_fcache_t *
6520 ql_get_fbuf(ql_fcache_t *fptr, uint32_t ftype)
6521 {
6522 	while (fptr != NULL) {
6523 		/* does this image meet criteria? */
6524 		if (ftype & fptr->type) {
6525 			break;
6526 		}
6527 		fptr = fptr->next;
6528 	}
6529 	return (fptr);
6530 }
6531 
6532 /*
6533  * ql_check_pci
6534  *
6535  *	checks the passed buffer for a valid pci signature and
6536  *	expected (and in range) pci length values.
6537  *
6538  *	For firmware type, a pci header is added since the image in
6539  *	the flash does not have one (!!!).
6540  *
6541  *	On successful pci check, nextpos adjusted to next pci header.
6542  *
6543  * Returns:
6544  *	-1 --> last pci image
6545  *	0 --> pci header valid
6546  *	1 --> pci header invalid.
6547  *
6548  * Context:
6549  *	Kernel context.
6550  */
6551 static int
6552 ql_check_pci(ql_adapter_state_t *ha, ql_fcache_t *fcache, uint32_t *nextpos)
6553 {
6554 	pci_header_t	*pcih;
6555 	pci_data_t	*pcid;
6556 	uint32_t	doff;
6557 	uint8_t		*pciinfo;
6558 
6559 	if (fcache != NULL) {
6560 		pciinfo = fcache->buf;
6561 	} else {
6562 		EL(ha, "failed, null fcache ptr passed\n");
6563 		return (1);
6564 	}
6565 
6566 	if (pciinfo == NULL) {
6567 		EL(ha, "failed, null pciinfo ptr passed\n");
6568 		return (1);
6569 	}
6570 
6571 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
6572 		caddr_t	bufp;
6573 		uint_t	len;
6574 
6575 		if (pciinfo[0] != SBUS_CODE_FCODE) {
6576 			EL(ha, "failed, unable to detect sbus fcode\n");
6577 			return (1);
6578 		}
6579 		fcache->type = FTYPE_FCODE;
6580 
6581 		/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
6582 		if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
6583 		    PROP_LEN_AND_VAL_ALLOC | DDI_PROP_DONTPASS |
6584 		    DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
6585 		    (int *)&len) == DDI_PROP_SUCCESS) {
6586 
6587 			(void) snprintf(fcache->verstr,
6588 			    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
6589 			kmem_free(bufp, len);
6590 		}
6591 
6592 		*nextpos = 0xffffffff;
6593 		return (0);
6594 	}
6595 
6596 	if (*nextpos == FLASH_24XX_FIRMWARE_ADDR) {
6597 
6598 		pci_header_t	fwh = {0};
6599 		pci_data_t	fwd = {0};
6600 		uint8_t		*buf, *bufp;
6601 
6602 		/*
6603 		 * Build a pci header for the firmware module
6604 		 */
6605 		if ((buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, KM_SLEEP))) ==
6606 		    NULL) {
6607 			EL(ha, "failed, unable to allocate buffer\n");
6608 			return (1);
6609 		}
6610 
6611 		fwh.signature[0] = PCI_HEADER0;
6612 		fwh.signature[1] = PCI_HEADER1;
6613 		fwh.dataoffset[0] = LSB(sizeof (pci_header_t));
6614 		fwh.dataoffset[1] = MSB(sizeof (pci_header_t));
6615 
6616 		fwd.signature[0] = 'P';
6617 		fwd.signature[1] = 'C';
6618 		fwd.signature[2] = 'I';
6619 		fwd.signature[3] = 'R';
6620 		fwd.codetype = PCI_CODE_FW;
6621 		fwd.pcidatalen[0] = LSB(sizeof (pci_data_t));
6622 		fwd.pcidatalen[1] = MSB(sizeof (pci_data_t));
6623 
6624 		bufp = buf;
6625 		bcopy(&fwh, bufp, sizeof (pci_header_t));
6626 		bufp += sizeof (pci_header_t);
6627 		bcopy(&fwd, bufp, sizeof (pci_data_t));
6628 		bufp += sizeof (pci_data_t);
6629 
6630 		bcopy(fcache->buf, bufp, (FBUFSIZE - sizeof (pci_header_t) -
6631 		    sizeof (pci_data_t)));
6632 		bcopy(buf, fcache->buf, FBUFSIZE);
6633 
6634 		fcache->type = FTYPE_FW;
6635 
6636 		/* TODO: check offsets are correct! */
6637 
6638 		(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6639 		    "%d.%d.%d", fcache->buf[19], fcache->buf[23],
6640 		    fcache->buf[27]);
6641 
6642 		*nextpos = 0;
6643 		kmem_free(buf, FBUFSIZE);
6644 		return (0);
6645 	}
6646 
6647 	/* get to the pci header image length */
6648 	pcih = (pci_header_t *)pciinfo;
6649 
6650 	doff = pcih->dataoffset[0] | (pcih->dataoffset[1] << 8);
6651 
6652 	/* some header section sanity check */
6653 	if (pcih->signature[0] != PCI_HEADER0 ||
6654 	    pcih->signature[1] != PCI_HEADER1 || doff > 50) {
6655 		EL(ha, "buffer format error: s0=%xh, s1=%xh, off=%xh\n",
6656 		    pcih->signature[0], pcih->signature[1], doff);
6657 		return (1);
6658 	}
6659 
6660 	pcid = (pci_data_t *)(pciinfo + doff);
6661 
6662 	/* a slight sanity data section check */
6663 	if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' ||
6664 	    pcid->signature[2] != 'I' || pcid->signature[3] != 'R') {
6665 		EL(ha, "failed, data sig mismatch!\n");
6666 		return (1);
6667 	}
6668 
6669 	if (pcid->indicator == PCI_IND_LAST_IMAGE) {
6670 		EL(ha, "last image\n");
6671 		*nextpos = 0xffffffff;
6672 	} else {
6673 		/* adjust the next flash read start position */
6674 		*nextpos += (pcid->imagelength[0] |
6675 		    (pcid->imagelength[1] << 8)) * PCI_SECTOR_SIZE;
6676 	}
6677 
6678 	switch (pcid->codetype) {
6679 	case PCI_CODE_X86PC:
6680 		fcache->type = FTYPE_BIOS;
6681 		break;
6682 	case PCI_CODE_FCODE:
6683 		fcache->type = FTYPE_FCODE;
6684 		break;
6685 	case PCI_CODE_EFI:
6686 		fcache->type = FTYPE_EFI;
6687 		break;
6688 	case PCI_CODE_HPPA:
6689 		fcache->type = FTYPE_HPPA;
6690 		break;
6691 	default:
6692 		fcache->type = FTYPE_UNKNOWN;
6693 		break;
6694 	}
6695 
6696 	(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6697 	    "%d.%d", pcid->revisionlevel[1], pcid->revisionlevel[0]);
6698 
6699 	return (0);
6700 }
6701 
6702 /*
6703  * ql_get_sfp
6704  *	Returns sfp data to sdmapi caller
6705  *
6706  * Input:
6707  *	ha:	adapter state pointer.
6708  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6709  *	mode:	flags.
6710  *
6711  * Returns:
6712  *	None, request status indicated in cmd->Status.
6713  *
6714  * Context:
6715  *	Kernel context.
6716  */
6717 static void
6718 ql_get_sfp(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6719 {
6720 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
6721 
6722 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
6723 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
6724 		EL(ha, "failed, invalid request for HBA\n");
6725 		return;
6726 	}
6727 
6728 	if (cmd->ResponseLen < QL_24XX_SFP_SIZE) {
6729 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6730 		cmd->DetailStatus = QL_24XX_SFP_SIZE;
6731 		EL(ha, "failed, ResponseLen < SFP len, len passed=%xh\n",
6732 		    cmd->ResponseLen);
6733 		return;
6734 	}
6735 
6736 	/* Dump SFP data in user buffer */
6737 	if ((ql_dump_sfp(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
6738 	    mode)) != 0) {
6739 		cmd->Status = EXT_STATUS_COPY_ERR;
6740 		EL(ha, "failed, copy error\n");
6741 	} else {
6742 		cmd->Status = EXT_STATUS_OK;
6743 	}
6744 
6745 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
6746 }
6747 
6748 /*
6749  * ql_dump_sfp
6750  *	Dumps SFP.
6751  *
6752  * Input:
6753  *	ha:	adapter state pointer.
6754  *	bp:	buffer address.
6755  *	mode:	flags
6756  *
6757  * Returns:
6758  *
6759  * Context:
6760  *	Kernel context.
6761  */
6762 static int
6763 ql_dump_sfp(ql_adapter_state_t *ha, void *bp, int mode)
6764 {
6765 	dma_mem_t	mem;
6766 	uint32_t	cnt;
6767 	int		rval2, rval = 0;
6768 	uint32_t	dxfer;
6769 
6770 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
6771 
6772 	/* Get memory for SFP. */
6773 
6774 	if ((rval2 = ql_get_dma_mem(ha, &mem, 64, LITTLE_ENDIAN_DMA,
6775 	    MEM_DATA_ALIGN)) != QL_SUCCESS) {
6776 		EL(ha, "failed, ql_get_dma_mem=%xh\n", rval2);
6777 		return (ENOMEM);
6778 	}
6779 
6780 	for (cnt = 0; cnt < QL_24XX_SFP_SIZE; cnt += mem.size) {
6781 		rval2 = ql_read_sfp(ha, &mem,
6782 		    (uint16_t)(cnt < 256 ? 0xA0 : 0xA2),
6783 		    (uint16_t)(cnt & 0xff));
6784 		if (rval2 != QL_SUCCESS) {
6785 			EL(ha, "failed, read_sfp=%xh\n", rval2);
6786 			rval = EFAULT;
6787 			break;
6788 		}
6789 
6790 		/* copy the data back */
6791 		if ((dxfer = ql_send_buffer_data(mem.bp, bp, mem.size,
6792 		    mode)) != mem.size) {
6793 			/* ddi copy error */
6794 			EL(ha, "failed, ddi copy; byte cnt = %xh", dxfer);
6795 			rval = EFAULT;
6796 			break;
6797 		}
6798 
6799 		/* adjust the buffer pointer */
6800 		bp = (caddr_t)bp + mem.size;
6801 	}
6802 
6803 	ql_free_phys(ha, &mem);
6804 
6805 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
6806 
6807 	return (rval);
6808 }
6809 
6810 /*
6811  * ql_port_param
6812  *	Retrieves or sets the firmware port speed settings
6813  *
6814  * Input:
6815  *	ha:	adapter state pointer.
6816  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6817  *	mode:	flags.
6818  *
6819  * Returns:
6820  *	None, request status indicated in cmd->Status.
6821  *
6822  * Context:
6823  *	Kernel context.
6824  *
6825  */
6826 static void
6827 ql_port_param(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6828 {
6829 	uint8_t			*name;
6830 	ql_tgt_t		*tq;
6831 	EXT_PORT_PARAM		port_param = {0};
6832 	uint32_t		rval = QL_SUCCESS;
6833 	uint32_t		idma_rate;
6834 
6835 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
6836 
6837 	if (CFG_IST(ha, CFG_CTRL_2425) == 0) {
6838 		EL(ha, "invalid request for this HBA\n");
6839 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
6840 		cmd->ResponseLen = 0;
6841 		return;
6842 	}
6843 
6844 	if (LOOP_NOT_READY(ha)) {
6845 		EL(ha, "failed, loop not ready\n");
6846 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
6847 		cmd->ResponseLen = 0;
6848 		return;
6849 	}
6850 
6851 	if (ddi_copyin((const void *)(uintptr_t)cmd->RequestAdr,
6852 	    (void*)&port_param,
6853 	    sizeof (EXT_PORT_PARAM), mode) != 0) {
6854 		EL(ha, "failed, ddi_copyin\n");
6855 		cmd->Status = EXT_STATUS_COPY_ERR;
6856 		cmd->ResponseLen = 0;
6857 		return;
6858 	}
6859 
6860 	if (port_param.FCScsiAddr.DestType != EXT_DEF_DESTTYPE_WWPN) {
6861 		EL(ha, "Unsupported dest lookup type: %xh\n",
6862 		    port_param.FCScsiAddr.DestType);
6863 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6864 		cmd->ResponseLen = 0;
6865 		return;
6866 	}
6867 
6868 	name = port_param.FCScsiAddr.DestAddr.WWPN;
6869 
6870 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6871 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
6872 	    name[5], name[6], name[7]);
6873 
6874 	tq = ql_find_port(ha, name, (uint16_t)QLNT_PORT);
6875 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
6876 		EL(ha, "failed, fc_port not found\n");
6877 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6878 		cmd->ResponseLen = 0;
6879 		return;
6880 	}
6881 
6882 	cmd->Status = EXT_STATUS_OK;
6883 	cmd->DetailStatus = EXT_STATUS_OK;
6884 
6885 	switch (port_param.Mode) {
6886 	case EXT_IIDMA_MODE_GET:
6887 		/*
6888 		 * Report the firmware's port rate for the wwpn
6889 		 */
6890 		rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
6891 		    port_param.Mode);
6892 
6893 		if (rval != QL_SUCCESS) {
6894 			EL(ha, "iidma get failed: %xh\n", rval);
6895 			cmd->Status = EXT_STATUS_MAILBOX;
6896 			cmd->DetailStatus = rval;
6897 			cmd->ResponseLen = 0;
6898 		} else {
6899 			switch (idma_rate) {
6900 			case IIDMA_RATE_1GB:
6901 				port_param.Speed =
6902 				    EXT_DEF_PORTSPEED_1GBIT;
6903 				break;
6904 			case IIDMA_RATE_2GB:
6905 				port_param.Speed =
6906 				    EXT_DEF_PORTSPEED_2GBIT;
6907 				break;
6908 			case IIDMA_RATE_4GB:
6909 				port_param.Speed =
6910 				    EXT_DEF_PORTSPEED_4GBIT;
6911 				break;
6912 			case IIDMA_RATE_8GB:
6913 				port_param.Speed =
6914 				    EXT_DEF_PORTSPEED_8GBIT;
6915 				break;
6916 			default:
6917 				port_param.Speed =
6918 				    EXT_DEF_PORTSPEED_UNKNOWN;
6919 				EL(ha, "failed, Port speed rate=%xh\n",
6920 				    idma_rate);
6921 				break;
6922 			}
6923 
6924 			/* Copy back the data */
6925 			rval = ddi_copyout((void *)&port_param,
6926 			    (void *)(uintptr_t)cmd->ResponseAdr,
6927 			    sizeof (EXT_PORT_PARAM),
6928 			    mode);
6929 
6930 			if (rval != 0) {
6931 				cmd->Status = EXT_STATUS_COPY_ERR;
6932 				cmd->ResponseLen = 0;
6933 				EL(ha, "failed, ddi_copyout\n");
6934 			} else {
6935 				cmd->ResponseLen = (uint32_t)
6936 				    sizeof (EXT_PORT_PARAM);
6937 			}
6938 		}
6939 		break;
6940 
6941 	case EXT_IIDMA_MODE_SET:
6942 		/*
6943 		 * Set the firmware's port rate for the wwpn
6944 		 */
6945 		switch (port_param.Speed) {
6946 		case EXT_DEF_PORTSPEED_1GBIT:
6947 			idma_rate = IIDMA_RATE_1GB;
6948 			break;
6949 		case EXT_DEF_PORTSPEED_2GBIT:
6950 			idma_rate = IIDMA_RATE_2GB;
6951 			break;
6952 		case EXT_DEF_PORTSPEED_4GBIT:
6953 			idma_rate = IIDMA_RATE_4GB;
6954 			break;
6955 		case EXT_DEF_PORTSPEED_8GBIT:
6956 			idma_rate = IIDMA_RATE_8GB;
6957 			break;
6958 		default:
6959 			EL(ha, "invalid set iidma rate: %x\n",
6960 			    port_param.Speed);
6961 			cmd->Status = EXT_STATUS_INVALID_PARAM;
6962 			cmd->ResponseLen = 0;
6963 			rval = QL_PARAMETER_ERROR;
6964 			break;
6965 		}
6966 
6967 		if (rval == QL_SUCCESS) {
6968 			rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
6969 			    port_param.Mode);
6970 			if (rval != QL_SUCCESS) {
6971 				EL(ha, "iidma set failed: %xh\n", rval);
6972 				cmd->Status = EXT_STATUS_MAILBOX;
6973 				cmd->DetailStatus = rval;
6974 				cmd->ResponseLen = 0;
6975 			}
6976 		}
6977 		break;
6978 	default:
6979 		EL(ha, "invalid mode specified: %x\n", port_param.Mode);
6980 		cmd->Status = EXT_STATUS_INVALID_PARAM;
6981 		cmd->ResponseLen = 0;
6982 		cmd->DetailStatus = 0;
6983 		break;
6984 	}
6985 
6986 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
6987 }
6988 
6989 /*
6990  * ql_get_fwexttrace
6991  *	Dumps f/w extended trace buffer
6992  *
6993  * Input:
6994  *	ha:	adapter state pointer.
6995  *	bp:	buffer address.
6996  *	mode:	flags
6997  *
6998  * Returns:
6999  *
7000  * Context:
7001  *	Kernel context.
7002  */
7003 /* ARGSUSED */
7004 static void
7005 ql_get_fwexttrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7006 {
7007 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7008 
7009 	if (CFG_IST(ha, CFG_CTRL_2425) == 0) {
7010 		EL(ha, "invalid request for this HBA\n");
7011 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7012 		cmd->ResponseLen = 0;
7013 		return;
7014 	}
7015 
7016 	if ((CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) == 0) ||
7017 	    (ha->fwexttracebuf.bp == NULL)) {
7018 		EL(ha, "f/w extrended trace is not enabled\n");
7019 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7020 		cmd->ResponseLen = 0;
7021 		return;
7022 	}
7023 }
7024 
7025 /*
7026  * ql_get_fwfcetrace
7027  *	Dumps f/w fibre channel trace buffer
7028  *
7029  * Input:
7030  *	ha:	adapter state pointer.
7031  *	bp:	buffer address.
7032  *	mode:	flags
7033  *
7034  * Returns:
7035  *
7036  * Context:
7037  *	Kernel context.
7038  */
7039 /* ARGSUSED */
7040 static void
7041 ql_get_fwfcetrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7042 {
7043 
7044 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7045 
7046 	if (CFG_IST(ha, CFG_CTRL_2425) == 0) {
7047 		EL(ha, "invalid request for this HBA\n");
7048 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7049 		cmd->ResponseLen = 0;
7050 		return;
7051 	}
7052 
7053 	if ((CFG_IST(ha, CFG_ENABLE_FWFCETRACE) == 0) ||
7054 	    (ha->fwfcetracebuf.bp == NULL)) {
7055 		EL(ha, "f/w FCE trace is not enabled\n");
7056 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7057 		cmd->ResponseLen = 0;
7058 		return;
7059 	}
7060 }
7061 
7062 /*
7063  * ql_get_pci_data
7064  *	Retrieves pci config space data
7065  *
7066  * Input:
7067  *	ha:	adapter state pointer.
7068  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7069  *	mode:	flags.
7070  *
7071  * Returns:
7072  *	None, request status indicated in cmd->Status.
7073  *
7074  * Context:
7075  *	Kernel context.
7076  *
7077  */
7078 static void
7079 ql_get_pci_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7080 {
7081 	uint8_t		cap_ptr;
7082 	uint8_t		cap_id;
7083 	uint32_t	buf_size = 256;
7084 
7085 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7086 
7087 	/*
7088 	 * First check the "Capabilities List" bit of the status register.
7089 	 */
7090 	if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
7091 		/*
7092 		 * Now get the capability pointer
7093 		 */
7094 		cap_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
7095 		while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
7096 			/*
7097 			 * Check for the pcie capability.
7098 			 */
7099 			cap_id = (uint8_t)ql_pci_config_get8(ha, cap_ptr);
7100 			if (cap_id == PCI_CAP_ID_PCI_E) {
7101 				buf_size = 4096;
7102 				break;
7103 			}
7104 			cap_ptr = (uint8_t)ql_pci_config_get8(ha,
7105 			    (cap_ptr + PCI_CAP_NEXT_PTR));
7106 		}
7107 	}
7108 
7109 	if (cmd->ResponseLen < buf_size) {
7110 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7111 		cmd->DetailStatus = buf_size;
7112 		EL(ha, "failed ResponseLen < buf_size, len passed=%xh\n",
7113 		    cmd->ResponseLen);
7114 		return;
7115 	}
7116 
7117 	/* Dump PCI config data. */
7118 	if ((ql_pci_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7119 	    buf_size, mode))
7120 	    != 0) {
7121 		cmd->Status = EXT_STATUS_COPY_ERR;
7122 		cmd->DetailStatus = 0;
7123 		EL(ha, "failed, copy err pci_dump\n");
7124 	} else {
7125 		cmd->Status = EXT_STATUS_OK;
7126 		cmd->DetailStatus = buf_size;
7127 	}
7128 
7129 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
7130 }
7131 
7132 /*
7133  * ql_pci_dump
7134  *	Dumps PCI config data to application buffer.
7135  *
7136  * Input:
7137  *	ha = adapter state pointer.
7138  *	bp = user buffer address.
7139  *
7140  * Returns:
7141  *
7142  * Context:
7143  *	Kernel context.
7144  */
7145 int
7146 ql_pci_dump(ql_adapter_state_t *ha, uint32_t *bp, uint32_t pci_size, int mode)
7147 {
7148 	uint32_t	pci_os;
7149 	uint32_t	*ptr32, *org_ptr32;
7150 
7151 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7152 
7153 	ptr32 = kmem_zalloc(pci_size, KM_SLEEP);
7154 	if (ptr32 == NULL) {
7155 		EL(ha, "failed kmem_zalloc\n");
7156 		return (ENOMEM);
7157 	}
7158 
7159 	/* store the initial value of ptr32 */
7160 	org_ptr32 = ptr32;
7161 	for (pci_os = 0; pci_os < pci_size; pci_os += 4) {
7162 		*ptr32 = (uint32_t)ql_pci_config_get32(ha, pci_os);
7163 		LITTLE_ENDIAN_32(ptr32);
7164 		ptr32++;
7165 	}
7166 
7167 	if (ddi_copyout((void *)org_ptr32, (void *)bp, pci_size, mode) !=
7168 	    0) {
7169 		EL(ha, "failed ddi_copyout\n");
7170 		kmem_free(org_ptr32, pci_size);
7171 		return (EFAULT);
7172 	}
7173 
7174 	QL_DUMP_9(org_ptr32, 8, pci_size);
7175 
7176 	kmem_free(org_ptr32, pci_size);
7177 
7178 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
7179 
7180 	return (0);
7181 }
7182 
7183 /*
7184  * ql_menlo_reset
7185  *	Reset Menlo
7186  *
7187  * Input:
7188  *	ha:	adapter state pointer.
7189  *	bp:	buffer address.
7190  *	mode:	flags
7191  *
7192  * Returns:
7193  *
7194  * Context:
7195  *	Kernel context.
7196  */
7197 static void
7198 ql_menlo_reset(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7199 {
7200 	MENLO_RESET	rst;
7201 	ql_mbx_data_t	mr;
7202 	int		rval;
7203 
7204 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7205 
7206 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7207 		EL(ha, "failed, invalid request for HBA\n");
7208 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7209 		cmd->ResponseLen = 0;
7210 		return;
7211 	}
7212 
7213 	/*
7214 	 * TODO: only vp_index 0 can do this (?)
7215 	 */
7216 
7217 
7218 	/*  Verify the size of request structure. */
7219 	if (cmd->RequestLen < sizeof (MENLO_RESET)) {
7220 		/* Return error */
7221 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7222 		    sizeof (MENLO_RESET));
7223 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7224 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7225 		cmd->ResponseLen = 0;
7226 		return;
7227 	}
7228 
7229 	/* Get reset request. */
7230 	if (ddi_copyin((const void *)(uintptr_t)cmd->RequestAdr, (void *)&rst,
7231 	    sizeof (MENLO_RESET), mode) != 0) {
7232 		EL(ha, "failed, ddi_copyin\n");
7233 		cmd->Status = EXT_STATUS_COPY_ERR;
7234 		cmd->ResponseLen = 0;
7235 		return;
7236 	}
7237 
7238 	/* Wait for I/O to stop and daemon to stall. */
7239 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
7240 		EL(ha, "ql_stall_driver failed\n");
7241 		ql_restart_hba(ha);
7242 		cmd->Status = EXT_STATUS_BUSY;
7243 		cmd->ResponseLen = 0;
7244 		return;
7245 	}
7246 
7247 	rval = ql_reset_menlo(ha, &mr, rst.Flags);
7248 	if (rval != QL_SUCCESS) {
7249 		EL(ha, "failed, status=%xh\n", rval);
7250 		cmd->Status = EXT_STATUS_MAILBOX;
7251 		cmd->DetailStatus = rval;
7252 		cmd->ResponseLen = 0;
7253 	} else if (mr.mb[1] != 0) {
7254 		EL(ha, "failed, substatus=%d\n", mr.mb[1]);
7255 		cmd->Status = EXT_STATUS_ERR;
7256 		cmd->DetailStatus = mr.mb[1];
7257 		cmd->ResponseLen = 0;
7258 	}
7259 
7260 	ql_restart_hba(ha);
7261 
7262 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
7263 }
7264 
7265 /*
7266  * ql_menlo_get_fw_version
7267  *	Get Menlo firmware version.
7268  *
7269  * Input:
7270  *	ha:	adapter state pointer.
7271  *	bp:	buffer address.
7272  *	mode:	flags
7273  *
7274  * Returns:
7275  *
7276  * Context:
7277  *	Kernel context.
7278  */
7279 static void
7280 ql_menlo_get_fw_version(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7281 {
7282 	int			rval;
7283 	ql_mbx_iocb_t		*pkt;
7284 	MENLO_GET_FW_VERSION	ver = {0};
7285 
7286 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7287 
7288 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7289 		EL(ha, "failed, invalid request for HBA\n");
7290 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7291 		cmd->ResponseLen = 0;
7292 		return;
7293 	}
7294 
7295 	if (cmd->ResponseLen < sizeof (MENLO_GET_FW_VERSION)) {
7296 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7297 		cmd->DetailStatus = sizeof (MENLO_GET_FW_VERSION);
7298 		EL(ha, "ResponseLen=%d < %d\n", cmd->ResponseLen,
7299 		    sizeof (MENLO_GET_FW_VERSION));
7300 		cmd->ResponseLen = 0;
7301 		return;
7302 	}
7303 
7304 	/* Allocate packet. */
7305 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7306 	if (pkt == NULL) {
7307 		EL(ha, "failed, kmem_zalloc\n");
7308 		cmd->Status = EXT_STATUS_NO_MEMORY;
7309 		cmd->ResponseLen = 0;
7310 		return;
7311 	}
7312 
7313 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
7314 	pkt->mvfy.entry_count = 1;
7315 	pkt->mvfy.options_status = LE_16(VMF_DO_NOT_UPDATE_FW);
7316 
7317 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
7318 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
7319 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
7320 	ver.FwVersion = LE_32(pkt->mvfy.fw_version);
7321 
7322 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
7323 	    pkt->mvfy.options_status != CS_COMPLETE) {
7324 		/* Command error */
7325 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
7326 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
7327 		    pkt->mvfy.failure_code);
7328 		cmd->Status = EXT_STATUS_ERR;
7329 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
7330 		    QL_FUNCTION_FAILED;
7331 		cmd->ResponseLen = 0;
7332 	} else if (ddi_copyout((void *)&ver,
7333 	    (void *)(uintptr_t)cmd->ResponseAdr,
7334 	    sizeof (MENLO_GET_FW_VERSION), mode) != 0) {
7335 		EL(ha, "failed, ddi_copyout\n");
7336 		cmd->Status = EXT_STATUS_COPY_ERR;
7337 		cmd->ResponseLen = 0;
7338 	} else {
7339 		cmd->ResponseLen = sizeof (MENLO_GET_FW_VERSION);
7340 	}
7341 
7342 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7343 
7344 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
7345 }
7346 
7347 /*
7348  * ql_menlo_update_fw
7349  *	Get Menlo update firmware.
7350  *
7351  * Input:
7352  *	ha:	adapter state pointer.
7353  *	bp:	buffer address.
7354  *	mode:	flags
7355  *
7356  * Returns:
7357  *
7358  * Context:
7359  *	Kernel context.
7360  */
7361 static void
7362 ql_menlo_update_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7363 {
7364 	ql_mbx_iocb_t		*pkt;
7365 	dma_mem_t		*dma_mem;
7366 	MENLO_UPDATE_FW		fw;
7367 	uint32_t		*ptr32;
7368 	int			rval;
7369 
7370 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7371 
7372 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7373 		EL(ha, "failed, invalid request for HBA\n");
7374 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7375 		cmd->ResponseLen = 0;
7376 		return;
7377 	}
7378 
7379 	/*
7380 	 * TODO: only vp_index 0 can do this (?)
7381 	 */
7382 
7383 	/*  Verify the size of request structure. */
7384 	if (cmd->RequestLen < sizeof (MENLO_UPDATE_FW)) {
7385 		/* Return error */
7386 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7387 		    sizeof (MENLO_UPDATE_FW));
7388 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7389 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7390 		cmd->ResponseLen = 0;
7391 		return;
7392 	}
7393 
7394 	/* Get update fw request. */
7395 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, (caddr_t)&fw,
7396 	    sizeof (MENLO_UPDATE_FW), mode) != 0) {
7397 		EL(ha, "failed, ddi_copyin\n");
7398 		cmd->Status = EXT_STATUS_COPY_ERR;
7399 		cmd->ResponseLen = 0;
7400 		return;
7401 	}
7402 
7403 	/* Wait for I/O to stop and daemon to stall. */
7404 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
7405 		EL(ha, "ql_stall_driver failed\n");
7406 		ql_restart_hba(ha);
7407 		cmd->Status = EXT_STATUS_BUSY;
7408 		cmd->ResponseLen = 0;
7409 		return;
7410 	}
7411 
7412 	/* Allocate packet. */
7413 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
7414 	if (dma_mem == NULL) {
7415 		EL(ha, "failed, kmem_zalloc\n");
7416 		cmd->Status = EXT_STATUS_NO_MEMORY;
7417 		cmd->ResponseLen = 0;
7418 		return;
7419 	}
7420 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7421 	if (pkt == NULL) {
7422 		EL(ha, "failed, kmem_zalloc\n");
7423 		kmem_free(dma_mem, sizeof (dma_mem_t));
7424 		ql_restart_hba(ha);
7425 		cmd->Status = EXT_STATUS_NO_MEMORY;
7426 		cmd->ResponseLen = 0;
7427 		return;
7428 	}
7429 
7430 	/* Get DMA memory for the IOCB */
7431 	if (ql_get_dma_mem(ha, dma_mem, fw.TotalByteCount, LITTLE_ENDIAN_DMA,
7432 	    MEM_DATA_ALIGN) != QL_SUCCESS) {
7433 		cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
7434 		    "alloc failed", QL_NAME, ha->instance);
7435 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7436 		kmem_free(dma_mem, sizeof (dma_mem_t));
7437 		ql_restart_hba(ha);
7438 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
7439 		cmd->ResponseLen = 0;
7440 		return;
7441 	}
7442 
7443 	/* Get firmware data. */
7444 	if (ql_get_buffer_data((caddr_t)fw.pFwDataBytes, dma_mem->bp,
7445 	    fw.TotalByteCount, mode) != fw.TotalByteCount) {
7446 		EL(ha, "failed, get_buffer_data\n");
7447 		ql_free_dma_resource(ha, dma_mem);
7448 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7449 		kmem_free(dma_mem, sizeof (dma_mem_t));
7450 		ql_restart_hba(ha);
7451 		cmd->Status = EXT_STATUS_COPY_ERR;
7452 		cmd->ResponseLen = 0;
7453 		return;
7454 	}
7455 
7456 	/* Sync DMA buffer. */
7457 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
7458 	    DDI_DMA_SYNC_FORDEV);
7459 
7460 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
7461 	pkt->mvfy.entry_count = 1;
7462 	pkt->mvfy.options_status = (uint16_t)LE_16(fw.Flags);
7463 	ptr32 = dma_mem->bp;
7464 	pkt->mvfy.fw_version = LE_32(ptr32[2]);
7465 	pkt->mvfy.fw_size = LE_32(fw.TotalByteCount);
7466 	pkt->mvfy.fw_sequence_size = LE_32(fw.TotalByteCount);
7467 	pkt->mvfy.dseg_count = LE_16(1);
7468 	pkt->mvfy.dseg_0_address[0] = (uint32_t)
7469 	    LE_32(LSD(dma_mem->cookie.dmac_laddress));
7470 	pkt->mvfy.dseg_0_address[1] = (uint32_t)
7471 	    LE_32(MSD(dma_mem->cookie.dmac_laddress));
7472 	pkt->mvfy.dseg_0_length = LE_32(fw.TotalByteCount);
7473 
7474 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
7475 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
7476 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
7477 
7478 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
7479 	    pkt->mvfy.options_status != CS_COMPLETE) {
7480 		/* Command error */
7481 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
7482 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
7483 		    pkt->mvfy.failure_code);
7484 		cmd->Status = EXT_STATUS_ERR;
7485 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
7486 		    QL_FUNCTION_FAILED;
7487 		cmd->ResponseLen = 0;
7488 	}
7489 
7490 	ql_free_dma_resource(ha, dma_mem);
7491 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7492 	kmem_free(dma_mem, sizeof (dma_mem_t));
7493 	ql_restart_hba(ha);
7494 
7495 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
7496 }
7497 
7498 /*
7499  * ql_menlo_manage_info
7500  *	Get Menlo manage info.
7501  *
7502  * Input:
7503  *	ha:	adapter state pointer.
7504  *	bp:	buffer address.
7505  *	mode:	flags
7506  *
7507  * Returns:
7508  *
7509  * Context:
7510  *	Kernel context.
7511  */
7512 static void
7513 ql_menlo_manage_info(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7514 {
7515 	ql_mbx_iocb_t		*pkt;
7516 	dma_mem_t		*dma_mem = NULL;
7517 	MENLO_MANAGE_INFO	info;
7518 	int			rval;
7519 
7520 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7521 
7522 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7523 		EL(ha, "failed, invalid request for HBA\n");
7524 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7525 		cmd->ResponseLen = 0;
7526 		return;
7527 	}
7528 
7529 	/*  Verify the size of request structure. */
7530 	if (cmd->RequestLen < sizeof (MENLO_MANAGE_INFO)) {
7531 		/* Return error */
7532 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7533 		    sizeof (MENLO_MANAGE_INFO));
7534 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7535 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7536 		cmd->ResponseLen = 0;
7537 		return;
7538 	}
7539 
7540 	/* Get manage info request. */
7541 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, (caddr_t)&info,
7542 	    sizeof (MENLO_MANAGE_INFO), mode) != 0) {
7543 		EL(ha, "failed, ddi_copyin\n");
7544 		cmd->Status = EXT_STATUS_COPY_ERR;
7545 		cmd->ResponseLen = 0;
7546 		return;
7547 	}
7548 
7549 	/* Allocate packet. */
7550 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7551 	if (pkt == NULL) {
7552 		EL(ha, "failed, kmem_zalloc\n");
7553 		ql_restart_driver(ha);
7554 		cmd->Status = EXT_STATUS_NO_MEMORY;
7555 		cmd->ResponseLen = 0;
7556 		return;
7557 	}
7558 
7559 	pkt->mdata.entry_type = MENLO_DATA_TYPE;
7560 	pkt->mdata.entry_count = 1;
7561 	pkt->mdata.options_status = (uint16_t)LE_16(info.Operation);
7562 
7563 	/* Get DMA memory for the IOCB */
7564 	if (info.Operation == MENLO_OP_READ_MEM ||
7565 	    info.Operation == MENLO_OP_WRITE_MEM) {
7566 		pkt->mdata.total_byte_count = LE_32(info.TotalByteCount);
7567 		pkt->mdata.parameter_1 =
7568 		    LE_32(info.Parameters.ap.MenloMemory.StartingAddr);
7569 		dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t),
7570 		    KM_SLEEP);
7571 		if (dma_mem == NULL) {
7572 			EL(ha, "failed, kmem_zalloc\n");
7573 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7574 			cmd->Status = EXT_STATUS_NO_MEMORY;
7575 			cmd->ResponseLen = 0;
7576 			return;
7577 		}
7578 		if (ql_get_dma_mem(ha, dma_mem, info.TotalByteCount,
7579 		    LITTLE_ENDIAN_DMA, MEM_DATA_ALIGN) != QL_SUCCESS) {
7580 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
7581 			    "alloc failed", QL_NAME, ha->instance);
7582 			kmem_free(dma_mem, sizeof (dma_mem_t));
7583 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7584 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
7585 			cmd->ResponseLen = 0;
7586 			return;
7587 		}
7588 		if (info.Operation == MENLO_OP_WRITE_MEM) {
7589 			/* Get data. */
7590 			if (ql_get_buffer_data((caddr_t)info.pDataBytes,
7591 			    dma_mem->bp, info.TotalByteCount, mode) !=
7592 			    info.TotalByteCount) {
7593 				EL(ha, "failed, get_buffer_data\n");
7594 				ql_free_dma_resource(ha, dma_mem);
7595 				kmem_free(dma_mem, sizeof (dma_mem_t));
7596 				kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7597 				cmd->Status = EXT_STATUS_COPY_ERR;
7598 				cmd->ResponseLen = 0;
7599 				return;
7600 			}
7601 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
7602 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
7603 		}
7604 		pkt->mdata.dseg_count = LE_16(1);
7605 		pkt->mdata.dseg_0_address[0] = (uint32_t)
7606 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
7607 		pkt->mdata.dseg_0_address[1] = (uint32_t)
7608 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
7609 		pkt->mdata.dseg_0_length = LE_32(info.TotalByteCount);
7610 	} else if (info.Operation & MENLO_OP_CHANGE_CONFIG) {
7611 		pkt->mdata.parameter_1 =
7612 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamID);
7613 		pkt->mdata.parameter_2 =
7614 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData0);
7615 		pkt->mdata.parameter_3 =
7616 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData1);
7617 	} else if (info.Operation & MENLO_OP_GET_INFO) {
7618 		pkt->mdata.parameter_1 =
7619 		    LE_32(info.Parameters.ap.MenloInfo.InfoDataType);
7620 		pkt->mdata.parameter_2 =
7621 		    LE_32(info.Parameters.ap.MenloInfo.InfoContext);
7622 	}
7623 
7624 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
7625 	LITTLE_ENDIAN_16(&pkt->mdata.options_status);
7626 	LITTLE_ENDIAN_16(&pkt->mdata.failure_code);
7627 
7628 	if (rval != QL_SUCCESS || (pkt->mdata.entry_status & 0x3c) != 0 ||
7629 	    pkt->mdata.options_status != CS_COMPLETE) {
7630 		/* Command error */
7631 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
7632 		    pkt->mdata.entry_status & 0x3c, pkt->mdata.options_status,
7633 		    pkt->mdata.failure_code);
7634 		cmd->Status = EXT_STATUS_ERR;
7635 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
7636 		    QL_FUNCTION_FAILED;
7637 		cmd->ResponseLen = 0;
7638 	} else if (info.Operation == MENLO_OP_READ_MEM) {
7639 		(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
7640 		    DDI_DMA_SYNC_FORKERNEL);
7641 		if (ql_send_buffer_data((caddr_t)info.pDataBytes, dma_mem->bp,
7642 		    info.TotalByteCount, mode) != info.TotalByteCount) {
7643 			cmd->Status = EXT_STATUS_COPY_ERR;
7644 			cmd->ResponseLen = 0;
7645 		}
7646 	}
7647 
7648 	ql_free_dma_resource(ha, dma_mem);
7649 	kmem_free(dma_mem, sizeof (dma_mem_t));
7650 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7651 
7652 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
7653 }
7654 
7655 /*
7656  * ql_suspend_hba
7657  *	Suspends all adapter ports.
7658  *
7659  * Input:
7660  *	ha:		adapter state pointer.
7661  *	options:	BIT_0 --> leave driver stalled on exit if
7662  *				  failed.
7663  *
7664  * Returns:
7665  *	qla local function return status code.
7666  *
7667  * Context:
7668  *	Kernel context.
7669  */
7670 static int
7671 ql_suspend_hba(ql_adapter_state_t *ha, uint32_t opt)
7672 {
7673 	ql_adapter_state_t	*ha2;
7674 	ql_link_t		*link;
7675 	int			rval = QL_SUCCESS;
7676 
7677 	/* Quiesce I/O on all adapter ports */
7678 	for (link = ql_hba.first; link != NULL; link = link->next) {
7679 		ha2 = link->base_address;
7680 
7681 		if (ha2->fru_hba_index != ha->fru_hba_index) {
7682 			continue;
7683 		}
7684 
7685 		if ((rval = ql_stall_driver(ha2, opt)) != QL_SUCCESS) {
7686 			EL(ha, "ql_stall_driver status=%xh\n", rval);
7687 			break;
7688 		}
7689 	}
7690 
7691 	return (rval);
7692 }
7693 
7694 /*
7695  * ql_restart_hba
7696  *	Restarts adapter.
7697  *
7698  * Input:
7699  *	ha:	adapter state pointer.
7700  *
7701  * Context:
7702  *	Kernel context.
7703  */
7704 static void
7705 ql_restart_hba(ql_adapter_state_t *ha)
7706 {
7707 	ql_adapter_state_t	*ha2;
7708 	ql_link_t		*link;
7709 
7710 	/* Resume I/O on all adapter ports */
7711 	for (link = ql_hba.first; link != NULL; link = link->next) {
7712 		ha2 = link->base_address;
7713 
7714 		if (ha2->fru_hba_index != ha->fru_hba_index) {
7715 			continue;
7716 		}
7717 
7718 		ql_restart_driver(ha2);
7719 	}
7720 }
7721 
7722 /*
7723  * ql_get_vp_cnt_id
7724  *	Retrieves pci config space data
7725  *
7726  * Input:
7727  *	ha:	adapter state pointer.
7728  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7729  *	mode:	flags.
7730  *
7731  * Returns:
7732  *	None, request status indicated in cmd->Status.
7733  *
7734  * Context:
7735  *	Kernel context.
7736  *
7737  */
7738 static void
7739 ql_get_vp_cnt_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7740 {
7741 	ql_adapter_state_t	*vha;
7742 	EXT_VPORT_ID_CNT	tmp_vp = {0};
7743 	int			id = 0;
7744 	int			rval;
7745 
7746 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
7747 
7748 	if (cmd->ResponseLen < sizeof (EXT_VPORT_ID_CNT)) {
7749 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7750 		cmd->DetailStatus = sizeof (EXT_VPORT_ID_CNT);
7751 		EL(ha, "failed, ResponseLen < EXT_VPORT_ID_CNT, Len=%xh\n",
7752 		    cmd->ResponseLen);
7753 		cmd->ResponseLen = 0;
7754 		return;
7755 	}
7756 
7757 	vha = ha->vp_next;
7758 	while (vha != NULL) {
7759 		tmp_vp.VpCnt++;
7760 		tmp_vp.VpId[id] = vha->vp_index;
7761 		id++;
7762 		vha = vha->vp_next;
7763 	}
7764 	rval = ddi_copyout((void *)&tmp_vp,
7765 	    (void *)(uintptr_t)(cmd->ResponseAdr),
7766 	    sizeof (EXT_VPORT_ID_CNT), mode);
7767 	if (rval != 0) {
7768 		cmd->Status = EXT_STATUS_COPY_ERR;
7769 		cmd->ResponseLen = 0;
7770 		EL(ha, "failed, ddi_copyout\n");
7771 	} else {
7772 		cmd->ResponseLen = sizeof (EXT_VPORT_ID_CNT);
7773 		QL_PRINT_9(CE_CONT, "(%d): exiting, vport_cnt=%d\n",
7774 		    ha->instance, tmp_vp.VpCnt);
7775 	}
7776 
7777 }
7778 
7779 /*
7780  * ql_vp_ioctl
7781  *	Performs all EXT_CC_VPORT_CMD functions.
7782  *
7783  * Input:
7784  *	ha:	adapter state pointer.
7785  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7786  *	mode:	flags.
7787  *
7788  * Returns:
7789  *	None, request status indicated in cmd->Status.
7790  *
7791  * Context:
7792  *	Kernel context.
7793  */
7794 static void
7795 ql_vp_ioctl(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7796 {
7797 	QL_PRINT_9(CE_CONT, "(%d): entered, cmd=%d\n", ha->instance,
7798 	    cmd->SubCode);
7799 
7800 	/* case off on command subcode */
7801 	switch (cmd->SubCode) {
7802 	case EXT_VF_SC_VPORT_GETINFO:
7803 		ql_qry_vport(ha, cmd, mode);
7804 		break;
7805 	default:
7806 		/* function not supported. */
7807 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
7808 		EL(ha, "failed, Unsupported Subcode=%xh\n",
7809 		    cmd->SubCode);
7810 		break;
7811 	}
7812 
7813 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
7814 }
7815 
7816 /*
7817  * ql_qry_vport
7818  *	Performs EXT_VF_SC_VPORT_GETINFO subfunction.
7819  *
7820  * Input:
7821  *	ha:	adapter state pointer.
7822  *	cmd:	EXT_IOCTL cmd struct pointer.
7823  *	mode:	flags.
7824  *
7825  * Returns:
7826  *	None, request status indicated in cmd->Status.
7827  *
7828  * Context:
7829  *	Kernel context.
7830  */
7831 static void
7832 ql_qry_vport(ql_adapter_state_t *vha, EXT_IOCTL *cmd, int mode)
7833 {
7834 	ql_adapter_state_t	*tmp_vha;
7835 	EXT_VPORT_INFO		tmp_vport = {0};
7836 	int			max_vport;
7837 
7838 	QL_PRINT_9(CE_CONT, "(%d): entered\n", vha->instance);
7839 
7840 	if (cmd->ResponseLen < sizeof (EXT_VPORT_INFO)) {
7841 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7842 		cmd->DetailStatus = sizeof (EXT_VPORT_INFO);
7843 		EL(vha, "failed, ResponseLen < EXT_VPORT_INFO, Len=%xh\n",
7844 		    cmd->ResponseLen);
7845 		cmd->ResponseLen = 0;
7846 		return;
7847 	}
7848 
7849 	/* Fill in the vport information. */
7850 	bcopy(vha->loginparams.node_ww_name.raw_wwn, tmp_vport.wwnn,
7851 	    EXT_DEF_WWN_NAME_SIZE);
7852 	bcopy(vha->loginparams.nport_ww_name.raw_wwn, tmp_vport.wwpn,
7853 	    EXT_DEF_WWN_NAME_SIZE);
7854 	tmp_vport.state = vha->state;
7855 
7856 	tmp_vha = vha->pha->vp_next;
7857 	while (tmp_vha != NULL) {
7858 		tmp_vport.used++;
7859 		tmp_vha = tmp_vha->vp_next;
7860 	}
7861 
7862 	max_vport = (CFG_IST(vha, CFG_CTRL_2422) ? MAX_24_VIRTUAL_PORTS :
7863 	    MAX_25_VIRTUAL_PORTS);
7864 	if (max_vport > tmp_vport.used) {
7865 		tmp_vport.free = max_vport - tmp_vport.used;
7866 	}
7867 
7868 	if (ddi_copyout((void *)&tmp_vport,
7869 	    (void *)(uintptr_t)(cmd->ResponseAdr),
7870 	    sizeof (EXT_VPORT_INFO), mode) != 0) {
7871 		cmd->Status = EXT_STATUS_COPY_ERR;
7872 		cmd->ResponseLen = 0;
7873 		EL(vha, "failed, ddi_copyout\n");
7874 	} else {
7875 		cmd->ResponseLen = sizeof (EXT_VPORT_INFO);
7876 		QL_PRINT_9(CE_CONT, "(%d): exiting\n", vha->instance);
7877 	}
7878 }
7879