xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_xioctl.c (revision 4c3888b8f38c903370e022661d08aba393db3911)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2015 QLogic Corporation */
23 
24 /*
25  * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
26  */
27 
28 /*
29  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
30  */
31 
32 /*
33  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
34  *
35  * ***********************************************************************
36  * *									**
37  * *				NOTICE					**
38  * *		COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION		**
39  * *			ALL RIGHTS RESERVED				**
40  * *									**
41  * ***********************************************************************
42  *
43  */
44 
45 #include <ql_apps.h>
46 #include <ql_api.h>
47 #include <ql_debug.h>
48 #include <ql_init.h>
49 #include <ql_iocb.h>
50 #include <ql_ioctl.h>
51 #include <ql_mbx.h>
52 #include <ql_nx.h>
53 #include <ql_xioctl.h>
54 
55 /*
56  * Local data
57  */
58 
59 /*
60  * Local prototypes
61  */
62 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
63 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
64     boolean_t (*)(EXT_IOCTL *));
65 static boolean_t ql_validate_signature(EXT_IOCTL *);
66 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
67 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
68 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
75 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
76 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
77 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
78 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
90 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
91 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
92 static void ql_qry_cna_port(ql_adapter_state_t *, EXT_IOCTL *, int);
93 
94 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
95 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
96 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
97 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
98     uint32_t);
99 static uint32_t	ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
100 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
101 static int ql_24xx_flash_desc(ql_adapter_state_t *);
102 static int ql_setup_flash(ql_adapter_state_t *);
103 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
104 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
105 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t,
106     uint32_t, int);
107 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
108     uint8_t);
109 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
110 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
112 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
113 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
114 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
115 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
116 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
117 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
118 static int ql_setup_led(ql_adapter_state_t *);
119 static int ql_wrapup_led(ql_adapter_state_t *);
120 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
121 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
122 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
123 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
124 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
125 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
126 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
127 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
128 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
129 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t);
130 static void ql_process_flt(ql_adapter_state_t *, uint32_t);
131 static void ql_flash_nvram_defaults(ql_adapter_state_t *);
132 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
133 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
134 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
135 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
138 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
139 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
140 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
141 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
142 static void ql_restart_hba(ql_adapter_state_t *);
143 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
144 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
145 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
146 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
147 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *);
148 static void ql_update_flash_caches(ql_adapter_state_t *);
149 static void ql_get_dcbx_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
150 static void ql_get_xgmac_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
151 static void ql_get_fcf_list(ql_adapter_state_t *, EXT_IOCTL *, int);
152 static void ql_get_resource_counts(ql_adapter_state_t *, EXT_IOCTL *, int);
153 static void ql_qry_adapter_versions(ql_adapter_state_t *, EXT_IOCTL *, int);
154 static void ql_get_temperature(ql_adapter_state_t *, EXT_IOCTL *, int);
155 static void ql_dump_cmd(ql_adapter_state_t *, EXT_IOCTL *, int);
156 static void ql_serdes_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
157 static void ql_serdes_reg_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
158 static void ql_els_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
159 static void ql_flash_update_caps(ql_adapter_state_t *, EXT_IOCTL *, int);
160 static void ql_get_bbcr_data(ql_adapter_state_t *, EXT_IOCTL *, int);
161 static void ql_get_priv_stats(ql_adapter_state_t *, EXT_IOCTL *, int);
162 
163 /* ******************************************************************** */
164 /*			External IOCTL support.				*/
165 /* ******************************************************************** */
166 
167 /*
168  * ql_alloc_xioctl_resource
169  *	Allocates resources needed by module code.
170  *
171  * Input:
172  *	ha:		adapter state pointer.
173  *
174  * Returns:
175  *	SYS_ERRNO
176  *
177  * Context:
178  *	Kernel context.
179  */
180 int
ql_alloc_xioctl_resource(ql_adapter_state_t * ha)181 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
182 {
183 	ql_xioctl_t	*xp;
184 
185 	QL_PRINT_9(ha, "started\n");
186 
187 	if (ha->xioctl != NULL) {
188 		QL_PRINT_9(ha, "already allocated done\n",
189 		    ha->instance);
190 		return (0);
191 	}
192 
193 	xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
194 	if (xp == NULL) {
195 		EL(ha, "failed, kmem_zalloc\n");
196 		return (ENOMEM);
197 	}
198 	ha->xioctl = xp;
199 
200 	/* Allocate AEN tracking buffer */
201 	xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
202 	    sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
203 	if (xp->aen_tracking_queue == NULL) {
204 		EL(ha, "failed, kmem_zalloc-2\n");
205 		ql_free_xioctl_resource(ha);
206 		return (ENOMEM);
207 	}
208 
209 	QL_PRINT_9(ha, "done\n");
210 
211 	return (0);
212 }
213 
214 /*
215  * ql_free_xioctl_resource
216  *	Frees resources used by module code.
217  *
218  * Input:
219  *	ha:		adapter state pointer.
220  *
221  * Context:
222  *	Kernel context.
223  */
224 void
ql_free_xioctl_resource(ql_adapter_state_t * ha)225 ql_free_xioctl_resource(ql_adapter_state_t *ha)
226 {
227 	ql_xioctl_t	*xp = ha->xioctl;
228 
229 	QL_PRINT_9(ha, "started\n");
230 
231 	if (xp == NULL) {
232 		QL_PRINT_9(ha, "already freed\n");
233 		return;
234 	}
235 
236 	if (xp->aen_tracking_queue != NULL) {
237 		kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
238 		    sizeof (EXT_ASYNC_EVENT));
239 		xp->aen_tracking_queue = NULL;
240 	}
241 
242 	kmem_free(xp, sizeof (ql_xioctl_t));
243 	ha->xioctl = NULL;
244 
245 	QL_PRINT_9(ha, "done\n");
246 }
247 
248 /*
249  * ql_xioctl
250  *	External IOCTL processing.
251  *
252  * Input:
253  *	ha:	adapter state pointer.
254  *	cmd:	function to perform
255  *	arg:	data type varies with request
256  *	mode:	flags
257  *	cred_p:	credentials pointer
258  *	rval_p:	pointer to result value
259  *
260  * Returns:
261  *	0:		success
262  *	ENXIO:		No such device or address
263  *	ENOPROTOOPT:	Protocol not available
264  *
265  * Context:
266  *	Kernel context.
267  */
268 /* ARGSUSED */
269 int
ql_xioctl(ql_adapter_state_t * ha,int cmd,intptr_t arg,int mode,cred_t * cred_p,int * rval_p)270 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
271     cred_t *cred_p, int *rval_p)
272 {
273 	int	rval;
274 
275 	QL_PRINT_9(ha, "started, cmd=%d\n", cmd);
276 
277 	if (ha->xioctl == NULL) {
278 		QL_PRINT_9(ha, "no context\n");
279 		return (ENXIO);
280 	}
281 
282 	switch (cmd) {
283 	case EXT_CC_QUERY:
284 	case EXT_CC_SEND_FCCT_PASSTHRU:
285 	case EXT_CC_REG_AEN:
286 	case EXT_CC_GET_AEN:
287 	case EXT_CC_SEND_SCSI_PASSTHRU:
288 	case EXT_CC_WWPN_TO_SCSIADDR:
289 	case EXT_CC_SEND_ELS_RNID:
290 	case EXT_CC_SET_DATA:
291 	case EXT_CC_GET_DATA:
292 	case EXT_CC_HOST_IDX:
293 	case EXT_CC_READ_NVRAM:
294 	case EXT_CC_UPDATE_NVRAM:
295 	case EXT_CC_READ_OPTION_ROM:
296 	case EXT_CC_READ_OPTION_ROM_EX:
297 	case EXT_CC_UPDATE_OPTION_ROM:
298 	case EXT_CC_UPDATE_OPTION_ROM_EX:
299 	case EXT_CC_GET_VPD:
300 	case EXT_CC_SET_VPD:
301 	case EXT_CC_LOOPBACK:
302 	case EXT_CC_GET_FCACHE:
303 	case EXT_CC_GET_FCACHE_EX:
304 	case EXT_CC_HOST_DRVNAME:
305 	case EXT_CC_GET_SFP_DATA:
306 	case EXT_CC_PORT_PARAM:
307 	case EXT_CC_GET_PCI_DATA:
308 	case EXT_CC_GET_FWEXTTRACE:
309 	case EXT_CC_GET_FWFCETRACE:
310 	case EXT_CC_GET_VP_CNT_ID:
311 	case EXT_CC_VPORT_CMD:
312 	case EXT_CC_ACCESS_FLASH:
313 	case EXT_CC_RESET_FW:
314 	case EXT_CC_MENLO_MANAGE_INFO:
315 	case EXT_CC_I2C_DATA:
316 	case EXT_CC_DUMP:
317 	case EXT_CC_SERDES_REG_OP:
318 	case EXT_CC_VF_STATE:
319 	case EXT_CC_SERDES_REG_OP_EX:
320 	case EXT_CC_ELS_PASSTHRU_OS:
321 	case EXT_CC_FLASH_UPDATE_CAPS_OS:
322 	case EXT_CC_GET_BBCR_DATA_OS:
323 		rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
324 		break;
325 	default:
326 		/* function not supported. */
327 		EL(ha, "function=%d not supported\n", cmd);
328 		rval = ENOPROTOOPT;
329 	}
330 
331 	QL_PRINT_9(ha, "done\n");
332 
333 	return (rval);
334 }
335 
336 /*
337  * ql_sdm_ioctl
338  *	Provides ioctl functions for SAN/Device Management functions
339  *	AKA External Ioctl functions.
340  *
341  * Input:
342  *	ha:		adapter state pointer.
343  *	ioctl_code:	ioctl function to perform
344  *	arg:		Pointer to EXT_IOCTL cmd data in application land.
345  *	mode:		flags
346  *
347  * Returns:
348  *	0:	success
349  *	ENOMEM:	Alloc of local EXT_IOCTL struct failed.
350  *	EFAULT:	Copyin of caller's EXT_IOCTL struct failed or
351  *		copyout of EXT_IOCTL status info failed.
352  *	EINVAL:	Signature or version of caller's EXT_IOCTL invalid.
353  *	EBUSY:	Device busy
354  *
355  * Context:
356  *	Kernel context.
357  */
358 static int
ql_sdm_ioctl(ql_adapter_state_t * ha,int ioctl_code,void * arg,int mode)359 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
360 {
361 	EXT_IOCTL		*cmd;
362 	int			rval;
363 	ql_adapter_state_t	*vha;
364 
365 	QL_PRINT_9(ha, "started\n");
366 
367 	/* Copy argument structure (EXT_IOCTL) from application land. */
368 	if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
369 	    ql_validate_signature)) != 0) {
370 		/*
371 		 * a non-zero value at this time means a problem getting
372 		 * the requested information from application land, just
373 		 * return the error code and hope for the best.
374 		 */
375 		EL(ha, "failed, sdm_setup\n");
376 		return (rval);
377 	}
378 
379 	/*
380 	 * Map the physical ha ptr (which the ioctl is called with)
381 	 * to the virtual ha that the caller is addressing.
382 	 */
383 	if (ha->flags & VP_ENABLED) {
384 		/* Check that it is within range. */
385 		if (cmd->HbaSelect > ha->max_vports) {
386 			EL(ha, "Invalid HbaSelect vp index: %xh\n",
387 			    cmd->HbaSelect);
388 			cmd->Status = EXT_STATUS_INVALID_VPINDEX;
389 			cmd->ResponseLen = 0;
390 			return (EFAULT);
391 		}
392 		/*
393 		 * Special case: HbaSelect == 0 is physical ha
394 		 */
395 		if (cmd->HbaSelect != 0) {
396 			vha = ha->vp_next;
397 			while (vha != NULL) {
398 				if (vha->vp_index == cmd->HbaSelect) {
399 					ha = vha;
400 					break;
401 				}
402 				vha = vha->vp_next;
403 			}
404 			/*
405 			 * The specified vp index may be valid(within range)
406 			 * but it's not in the list. Currently this is all
407 			 * we can say.
408 			 */
409 			if (vha == NULL || !(vha->flags & VP_ENABLED)) {
410 				cmd->Status = EXT_STATUS_INVALID_VPINDEX;
411 				cmd->ResponseLen = 0;
412 				return (EFAULT);
413 			}
414 		}
415 	}
416 
417 	/*
418 	 * If driver is suspended, stalled, or powered down rtn BUSY
419 	 */
420 	if (ha->flags & ADAPTER_SUSPENDED ||
421 	    (ha->task_daemon_flags & (DRIVER_STALL | ISP_ABORT_NEEDED |
422 	    ABORT_ISP_ACTIVE | LOOP_RESYNC_NEEDED | LOOP_RESYNC_ACTIVE)) ||
423 	    ha->power_level != PM_LEVEL_D0) {
424 		EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
425 		    "driver suspended" :
426 		    (ha->task_daemon_flags & (DRIVER_STALL | ISP_ABORT_NEEDED |
427 		    ABORT_ISP_ACTIVE | LOOP_RESYNC_NEEDED |
428 		    LOOP_RESYNC_ACTIVE) ? "driver stalled" :
429 		    "FCA powered down"));
430 		cmd->Status = EXT_STATUS_BUSY;
431 		cmd->ResponseLen = 0;
432 		rval = EBUSY;
433 
434 		/* Return results to caller */
435 		if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
436 			EL(ha, "failed, sdm_return\n");
437 			rval = EFAULT;
438 		}
439 		return (rval);
440 	}
441 
442 	switch (ioctl_code) {
443 	case EXT_CC_QUERY_OS:
444 		ql_query(ha, cmd, mode);
445 		break;
446 	case EXT_CC_SEND_FCCT_PASSTHRU_OS:
447 		ql_fcct(ha, cmd, mode);
448 		break;
449 	case EXT_CC_REG_AEN_OS:
450 		ql_aen_reg(ha, cmd, mode);
451 		break;
452 	case EXT_CC_GET_AEN_OS:
453 		ql_aen_get(ha, cmd, mode);
454 		break;
455 	case EXT_CC_GET_DATA_OS:
456 		ql_get_host_data(ha, cmd, mode);
457 		break;
458 	case EXT_CC_SET_DATA_OS:
459 		ql_set_host_data(ha, cmd, mode);
460 		break;
461 	case EXT_CC_SEND_ELS_RNID_OS:
462 		ql_send_els_rnid(ha, cmd, mode);
463 		break;
464 	case EXT_CC_SCSI_PASSTHRU_OS:
465 		ql_scsi_passthru(ha, cmd, mode);
466 		break;
467 	case EXT_CC_WWPN_TO_SCSIADDR_OS:
468 		ql_wwpn_to_scsiaddr(ha, cmd, mode);
469 		break;
470 	case EXT_CC_HOST_IDX_OS:
471 		ql_host_idx(ha, cmd, mode);
472 		break;
473 	case EXT_CC_HOST_DRVNAME_OS:
474 		ql_host_drvname(ha, cmd, mode);
475 		break;
476 	case EXT_CC_READ_NVRAM_OS:
477 		ql_read_nvram(ha, cmd, mode);
478 		break;
479 	case EXT_CC_UPDATE_NVRAM_OS:
480 		ql_write_nvram(ha, cmd, mode);
481 		break;
482 	case EXT_CC_READ_OPTION_ROM_OS:
483 	case EXT_CC_READ_OPTION_ROM_EX_OS:
484 		ql_read_flash(ha, cmd, mode);
485 		break;
486 	case EXT_CC_UPDATE_OPTION_ROM_OS:
487 	case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
488 		ql_write_flash(ha, cmd, mode);
489 		break;
490 	case EXT_CC_LOOPBACK_OS:
491 		ql_diagnostic_loopback(ha, cmd, mode);
492 		break;
493 	case EXT_CC_GET_VPD_OS:
494 		ql_read_vpd(ha, cmd, mode);
495 		break;
496 	case EXT_CC_SET_VPD_OS:
497 		ql_write_vpd(ha, cmd, mode);
498 		break;
499 	case EXT_CC_GET_FCACHE_OS:
500 		ql_get_fcache(ha, cmd, mode);
501 		break;
502 	case EXT_CC_GET_FCACHE_EX_OS:
503 		ql_get_fcache_ex(ha, cmd, mode);
504 		break;
505 	case EXT_CC_GET_SFP_DATA_OS:
506 		ql_get_sfp(ha, cmd, mode);
507 		break;
508 	case EXT_CC_PORT_PARAM_OS:
509 		ql_port_param(ha, cmd, mode);
510 		break;
511 	case EXT_CC_GET_PCI_DATA_OS:
512 		ql_get_pci_data(ha, cmd, mode);
513 		break;
514 	case EXT_CC_GET_FWEXTTRACE_OS:
515 		ql_get_fwexttrace(ha, cmd, mode);
516 		break;
517 	case EXT_CC_GET_FWFCETRACE_OS:
518 		ql_get_fwfcetrace(ha, cmd, mode);
519 		break;
520 	case EXT_CC_MENLO_RESET:
521 		ql_menlo_reset(ha, cmd, mode);
522 		break;
523 	case EXT_CC_MENLO_GET_FW_VERSION:
524 		ql_menlo_get_fw_version(ha, cmd, mode);
525 		break;
526 	case EXT_CC_MENLO_UPDATE_FW:
527 		ql_menlo_update_fw(ha, cmd, mode);
528 		break;
529 	case EXT_CC_MENLO_MANAGE_INFO:
530 		ql_menlo_manage_info(ha, cmd, mode);
531 		break;
532 	case EXT_CC_GET_VP_CNT_ID_OS:
533 		ql_get_vp_cnt_id(ha, cmd, mode);
534 		break;
535 	case EXT_CC_VPORT_CMD_OS:
536 		ql_vp_ioctl(ha, cmd, mode);
537 		break;
538 	case EXT_CC_ACCESS_FLASH_OS:
539 		ql_access_flash(ha, cmd, mode);
540 		break;
541 	case EXT_CC_RESET_FW_OS:
542 		ql_reset_cmd(ha, cmd);
543 		break;
544 	case EXT_CC_I2C_DATA:
545 		ql_get_temperature(ha, cmd, mode);
546 		break;
547 	case EXT_CC_DUMP_OS:
548 		ql_dump_cmd(ha, cmd, mode);
549 		break;
550 	case EXT_CC_SERDES_REG_OP:
551 		ql_serdes_reg(ha, cmd, mode);
552 		break;
553 	case EXT_CC_SERDES_REG_OP_EX:
554 		ql_serdes_reg_ex(ha, cmd, mode);
555 		break;
556 	case EXT_CC_ELS_PASSTHRU_OS:
557 		ql_els_passthru(ha, cmd, mode);
558 		break;
559 	case EXT_CC_FLASH_UPDATE_CAPS_OS:
560 		ql_flash_update_caps(ha, cmd, mode);
561 		break;
562 	case EXT_CC_GET_BBCR_DATA_OS:
563 		ql_get_bbcr_data(ha, cmd, mode);
564 		break;
565 	default:
566 		/* function not supported. */
567 		EL(ha, "failed, function not supported=%d\n", ioctl_code);
568 
569 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
570 		cmd->ResponseLen = 0;
571 		break;
572 	}
573 
574 	/* Return results to caller */
575 	if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
576 		EL(ha, "failed, sdm_return\n");
577 		return (EFAULT);
578 	}
579 
580 	QL_PRINT_9(ha, "done\n");
581 
582 	return (0);
583 }
584 
585 /*
586  * ql_sdm_setup
587  *	Make a local copy of the EXT_IOCTL struct and validate it.
588  *
589  * Input:
590  *	ha:		adapter state pointer.
591  *	cmd_struct:	Pointer to location to store local adrs of EXT_IOCTL.
592  *	arg:		Address of application EXT_IOCTL cmd data
593  *	mode:		flags
594  *	val_sig:	Pointer to a function to validate the ioctl signature.
595  *
596  * Returns:
597  *	0:		success
598  *	EFAULT:		Copy in error of application EXT_IOCTL struct.
599  *	EINVAL:		Invalid version, signature.
600  *	ENOMEM:		Local allocation of EXT_IOCTL failed.
601  *
602  * Context:
603  *	Kernel context.
604  */
605 static int
ql_sdm_setup(ql_adapter_state_t * ha,EXT_IOCTL ** cmd_struct,void * arg,int mode,boolean_t (* val_sig)(EXT_IOCTL *))606 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
607     int mode, boolean_t (*val_sig)(EXT_IOCTL *))
608 {
609 	int		rval;
610 	EXT_IOCTL	*cmd;
611 
612 	QL_PRINT_9(ha, "started\n");
613 
614 	/* Allocate local memory for EXT_IOCTL. */
615 	*cmd_struct = NULL;
616 	cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
617 	if (cmd == NULL) {
618 		EL(ha, "failed, kmem_zalloc\n");
619 		return (ENOMEM);
620 	}
621 	/* Get argument structure. */
622 	rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
623 	if (rval != 0) {
624 		EL(ha, "failed, ddi_copyin\n");
625 		rval = EFAULT;
626 	} else {
627 		/*
628 		 * Check signature and the version.
629 		 * If either are not valid then neither is the
630 		 * structure so don't attempt to return any error status
631 		 * because we can't trust what caller's arg points to.
632 		 * Just return the errno.
633 		 */
634 		if (val_sig(cmd) == 0) {
635 			EL(ha, "failed, signature\n");
636 			rval = EINVAL;
637 		} else if (cmd->Version > EXT_VERSION) {
638 			EL(ha, "failed, version\n");
639 			rval = EINVAL;
640 		}
641 	}
642 
643 	if (rval == 0) {
644 		QL_PRINT_9(ha, "done\n");
645 		*cmd_struct = cmd;
646 		cmd->Status = EXT_STATUS_OK;
647 		cmd->DetailStatus = 0;
648 	} else {
649 		kmem_free((void *)cmd, sizeof (EXT_IOCTL));
650 	}
651 
652 	return (rval);
653 }
654 
655 /*
656  * ql_validate_signature
657  *	Validate the signature string for an external ioctl call.
658  *
659  * Input:
660  *	sg:	Pointer to EXT_IOCTL signature to validate.
661  *
662  * Returns:
663  *	B_TRUE:		Signature is valid.
664  *	B_FALSE:	Signature is NOT valid.
665  *
666  * Context:
667  *	Kernel context.
668  */
669 static boolean_t
ql_validate_signature(EXT_IOCTL * cmd_struct)670 ql_validate_signature(EXT_IOCTL *cmd_struct)
671 {
672 	/*
673 	 * Check signature.
674 	 *
675 	 * If signature is not valid then neither is the rest of
676 	 * the structure (e.g., can't trust it), so don't attempt
677 	 * to return any error status other than the errno.
678 	 */
679 	if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
680 		QL_PRINT_2(NULL, "failed,\n");
681 		return (B_FALSE);
682 	}
683 
684 	return (B_TRUE);
685 }
686 
687 /*
688  * ql_sdm_return
689  *	Copies return data/status to application land for
690  *	ioctl call using the SAN/Device Management EXT_IOCTL call interface.
691  *
692  * Input:
693  *	ha:		adapter state pointer.
694  *	cmd:		Pointer to kernel copy of requestor's EXT_IOCTL struct.
695  *	ioctl_code:	ioctl function to perform
696  *	arg:		EXT_IOCTL cmd data in application land.
697  *	mode:		flags
698  *
699  * Returns:
700  *	0:	success
701  *	EFAULT:	Copy out error.
702  *
703  * Context:
704  *	Kernel context.
705  */
706 /* ARGSUSED */
707 static int
ql_sdm_return(ql_adapter_state_t * ha,EXT_IOCTL * cmd,void * arg,int mode)708 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
709 {
710 	int	rval = 0;
711 
712 	QL_PRINT_9(ha, "started\n");
713 
714 	rval |= ddi_copyout((void *)&cmd->ResponseLen,
715 	    (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
716 	    mode);
717 
718 	rval |= ddi_copyout((void *)&cmd->Status,
719 	    (void *)&(((EXT_IOCTL*)arg)->Status),
720 	    sizeof (cmd->Status), mode);
721 	rval |= ddi_copyout((void *)&cmd->DetailStatus,
722 	    (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
723 	    sizeof (cmd->DetailStatus), mode);
724 
725 	kmem_free((void *)cmd, sizeof (EXT_IOCTL));
726 
727 	if (rval != 0) {
728 		/* Some copyout operation failed */
729 		EL(ha, "failed, ddi_copyout\n");
730 		return (EFAULT);
731 	}
732 
733 	QL_PRINT_9(ha, "done\n");
734 
735 	return (0);
736 }
737 
738 /*
739  * ql_query
740  *	Performs all EXT_CC_QUERY functions.
741  *
742  * Input:
743  *	ha:	adapter state pointer.
744  *	cmd:	Local EXT_IOCTL cmd struct pointer.
745  *	mode:	flags.
746  *
747  * Returns:
748  *	None, request status indicated in cmd->Status.
749  *
750  * Context:
751  *	Kernel context.
752  */
753 static void
ql_query(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)754 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
755 {
756 	QL_PRINT_9(ha, "started, cmd=%d\n",
757 	    cmd->SubCode);
758 
759 	/* case off on command subcode */
760 	switch (cmd->SubCode) {
761 	case EXT_SC_QUERY_HBA_NODE:
762 		ql_qry_hba_node(ha, cmd, mode);
763 		break;
764 	case EXT_SC_QUERY_HBA_PORT:
765 		ql_qry_hba_port(ha, cmd, mode);
766 		break;
767 	case EXT_SC_QUERY_DISC_PORT:
768 		ql_qry_disc_port(ha, cmd, mode);
769 		break;
770 	case EXT_SC_QUERY_DISC_TGT:
771 		ql_qry_disc_tgt(ha, cmd, mode);
772 		break;
773 	case EXT_SC_QUERY_DRIVER:
774 		ql_qry_driver(ha, cmd, mode);
775 		break;
776 	case EXT_SC_QUERY_FW:
777 		ql_qry_fw(ha, cmd, mode);
778 		break;
779 	case EXT_SC_QUERY_CHIP:
780 		ql_qry_chip(ha, cmd, mode);
781 		break;
782 	case EXT_SC_QUERY_CNA_PORT:
783 		ql_qry_cna_port(ha, cmd, mode);
784 		break;
785 	case EXT_SC_QUERY_ADAPTER_VERSIONS:
786 		ql_qry_adapter_versions(ha, cmd, mode);
787 		break;
788 	case EXT_SC_QUERY_DISC_LUN:
789 	default:
790 		/* function not supported. */
791 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
792 		EL(ha, "failed, Unsupported Subcode=%xh\n",
793 		    cmd->SubCode);
794 		break;
795 	}
796 
797 	QL_PRINT_9(ha, "done\n");
798 }
799 
800 /*
801  * ql_qry_hba_node
802  *	Performs EXT_SC_QUERY_HBA_NODE subfunction.
803  *
804  * Input:
805  *	ha:	adapter state pointer.
806  *	cmd:	EXT_IOCTL cmd struct pointer.
807  *	mode:	flags.
808  *
809  * Returns:
810  *	None, request status indicated in cmd->Status.
811  *
812  * Context:
813  *	Kernel context.
814  */
815 static void
ql_qry_hba_node(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)816 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
817 {
818 	EXT_HBA_NODE	tmp_node = {0};
819 	uint_t		len;
820 	caddr_t		bufp;
821 
822 	QL_PRINT_9(ha, "started\n");
823 
824 	if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
825 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
826 		cmd->DetailStatus = sizeof (EXT_HBA_NODE);
827 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
828 		    "Len=%xh\n", cmd->ResponseLen);
829 		cmd->ResponseLen = 0;
830 		return;
831 	}
832 
833 	/* fill in the values */
834 
835 	bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
836 	    EXT_DEF_WWN_NAME_SIZE);
837 
838 	(void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
839 
840 	(void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
841 
842 	bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
843 
844 	(void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
845 
846 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
847 		size_t		verlen;
848 		uint16_t	w;
849 		char		*tmpptr;
850 
851 		verlen = strlen((char *)(tmp_node.DriverVersion));
852 		if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
853 			EL(ha, "failed, No room for fpga version string\n");
854 		} else {
855 			w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
856 			    (uint16_t *)
857 			    (ha->sbus_fpga_iobase + FPGA_REVISION));
858 
859 			tmpptr = (char *)&(tmp_node.DriverVersion[verlen + 1]);
860 			if (tmpptr == NULL) {
861 				EL(ha, "Unable to insert fpga version str\n");
862 			} else {
863 				(void) sprintf(tmpptr, "%d.%d",
864 				    ((w & 0xf0) >> 4), (w & 0x0f));
865 				tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
866 			}
867 		}
868 	}
869 
870 	(void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
871 	    ha->fw_major_version, ha->fw_minor_version,
872 	    ha->fw_subminor_version);
873 
874 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
875 		switch (ha->fw_attributes) {
876 		case FWATTRIB_EF:
877 			(void) strcat((char *)(tmp_node.FWVersion), " EF");
878 			break;
879 		case FWATTRIB_TP:
880 			(void) strcat((char *)(tmp_node.FWVersion), " TP");
881 			break;
882 		case FWATTRIB_IP:
883 			(void) strcat((char *)(tmp_node.FWVersion), " IP");
884 			break;
885 		case FWATTRIB_IPX:
886 			(void) strcat((char *)(tmp_node.FWVersion), " IPX");
887 			break;
888 		case FWATTRIB_FL:
889 			(void) strcat((char *)(tmp_node.FWVersion), " FL");
890 			break;
891 		case FWATTRIB_FPX:
892 			(void) strcat((char *)(tmp_node.FWVersion), " FLX");
893 			break;
894 		default:
895 			break;
896 		}
897 	}
898 
899 	/* FCode version. */
900 	/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
901 	if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
902 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
903 	    (int *)&len) == DDI_PROP_SUCCESS) {
904 		if (len < EXT_DEF_MAX_STR_SIZE) {
905 			bcopy(bufp, tmp_node.OptRomVersion, len);
906 		} else {
907 			bcopy(bufp, tmp_node.OptRomVersion,
908 			    EXT_DEF_MAX_STR_SIZE - 1);
909 			tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
910 			    '\0';
911 		}
912 		kmem_free(bufp, len);
913 	} else {
914 		(void) sprintf((char *)tmp_node.OptRomVersion, "0");
915 	}
916 	tmp_node.PortCount = 1;
917 	tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
918 
919 	tmp_node.MpiVersion[0] = ha->mpi_fw_major_version;
920 	tmp_node.MpiVersion[1] = ha->mpi_fw_minor_version;
921 	tmp_node.MpiVersion[2] = ha->mpi_fw_subminor_version;
922 	tmp_node.PepFwVersion[0] = ha->phy_fw_major_version;
923 	tmp_node.PepFwVersion[1] = ha->phy_fw_minor_version;
924 	tmp_node.PepFwVersion[2] = ha->phy_fw_subminor_version;
925 	if (ddi_copyout((void *)&tmp_node,
926 	    (void *)(uintptr_t)(cmd->ResponseAdr),
927 	    sizeof (EXT_HBA_NODE), mode) != 0) {
928 		cmd->Status = EXT_STATUS_COPY_ERR;
929 		cmd->ResponseLen = 0;
930 		EL(ha, "failed, ddi_copyout\n");
931 	} else {
932 		cmd->ResponseLen = sizeof (EXT_HBA_NODE);
933 		QL_PRINT_9(ha, "done\n");
934 	}
935 }
936 
937 /*
938  * ql_qry_hba_port
939  *	Performs EXT_SC_QUERY_HBA_PORT subfunction.
940  *
941  * Input:
942  *	ha:	adapter state pointer.
943  *	cmd:	EXT_IOCTL cmd struct pointer.
944  *	mode:	flags.
945  *
946  * Returns:
947  *	None, request status indicated in cmd->Status.
948  *
949  * Context:
950  *	Kernel context.
951  */
952 static void
ql_qry_hba_port(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)953 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
954 {
955 	ql_link_t	*link;
956 	ql_tgt_t	*tq;
957 	ql_mbx_data_t	mr = {0};
958 	EXT_HBA_PORT	tmp_port = {0};
959 	int		rval;
960 	uint16_t	port_cnt, tgt_cnt, index;
961 
962 	QL_PRINT_9(ha, "started\n");
963 
964 	if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
965 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
966 		cmd->DetailStatus = sizeof (EXT_HBA_PORT);
967 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
968 		    cmd->ResponseLen);
969 		cmd->ResponseLen = 0;
970 		return;
971 	}
972 
973 	/* fill in the values */
974 
975 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
976 	    EXT_DEF_WWN_NAME_SIZE);
977 	tmp_port.Id[0] = 0;
978 	tmp_port.Id[1] = ha->d_id.b.domain;
979 	tmp_port.Id[2] = ha->d_id.b.area;
980 	tmp_port.Id[3] = ha->d_id.b.al_pa;
981 
982 	/* For now we are initiator only driver */
983 	tmp_port.Type = EXT_DEF_INITIATOR_DEV;
984 
985 	if (ha->task_daemon_flags & LOOP_DOWN) {
986 		tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
987 	} else if (DRIVER_SUSPENDED(ha)) {
988 		tmp_port.State = EXT_DEF_HBA_SUSPENDED;
989 	} else {
990 		tmp_port.State = EXT_DEF_HBA_OK;
991 	}
992 
993 	if (ha->flags & POINT_TO_POINT) {
994 		tmp_port.Mode = EXT_DEF_P2P_MODE;
995 	} else {
996 		tmp_port.Mode = EXT_DEF_LOOP_MODE;
997 	}
998 	/*
999 	 * fill in the portspeed values.
1000 	 *
1001 	 * default to not yet negotiated state
1002 	 */
1003 	tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
1004 
1005 	if (tmp_port.State == EXT_DEF_HBA_OK) {
1006 		switch (ha->iidma_rate) {
1007 		case IIDMA_RATE_1GB:
1008 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
1009 			break;
1010 		case IIDMA_RATE_2GB:
1011 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_2GBIT;
1012 			break;
1013 		case IIDMA_RATE_4GB:
1014 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_4GBIT;
1015 			break;
1016 		case IIDMA_RATE_8GB:
1017 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_8GBIT;
1018 			break;
1019 		case IIDMA_RATE_10GB:
1020 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_10GBIT;
1021 			break;
1022 		case IIDMA_RATE_16GB:
1023 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_16GBIT;
1024 			break;
1025 		case IIDMA_RATE_32GB:
1026 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_32GBIT;
1027 			break;
1028 		default:
1029 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1030 			EL(ha, "failed, data rate=%xh\n", mr.mb[1]);
1031 			break;
1032 		}
1033 	}
1034 
1035 	/* Report all supported port speeds */
1036 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
1037 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
1038 		    EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
1039 		    EXT_DEF_PORTSPEED_1GBIT);
1040 		/*
1041 		 * Correct supported speeds based on type of
1042 		 * sfp that is present
1043 		 */
1044 		switch (ha->sfp_stat) {
1045 		case 1:
1046 			/* no sfp detected */
1047 			break;
1048 		case 2:
1049 		case 4:
1050 			/* 4GB sfp */
1051 			tmp_port.PortSupportedSpeed &=
1052 			    ~EXT_DEF_PORTSPEED_8GBIT;
1053 			break;
1054 		case 3:
1055 		case 5:
1056 			/* 8GB sfp */
1057 			tmp_port.PortSupportedSpeed &=
1058 			    ~EXT_DEF_PORTSPEED_1GBIT;
1059 			break;
1060 		default:
1061 			EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
1062 			break;
1063 
1064 		}
1065 	} else if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1066 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT;
1067 	} else if (CFG_IST(ha, CFG_CTRL_24XX)) {
1068 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
1069 		    EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
1070 	} else if (CFG_IST(ha, CFG_CTRL_23XX)) {
1071 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
1072 		    EXT_DEF_PORTSPEED_1GBIT);
1073 	} else if (CFG_IST(ha, CFG_CTRL_63XX)) {
1074 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1075 	} else if (CFG_IST(ha, CFG_CTRL_22XX)) {
1076 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1077 	} else if (CFG_IST(ha, CFG_CTRL_83XX)) {
1078 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_4GBIT |
1079 		    EXT_DEF_PORTSPEED_8GBIT | EXT_DEF_PORTSPEED_16GBIT;
1080 	} else if (CFG_IST(ha, CFG_CTRL_27XX)) {
1081 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_4GBIT |
1082 		    EXT_DEF_PORTSPEED_8GBIT | EXT_DEF_PORTSPEED_16GBIT |
1083 		    EXT_DEF_PORTSPEED_32GBIT;
1084 	} else {
1085 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1086 		EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1087 	}
1088 
1089 	if (ha->task_daemon_flags & LOOP_DOWN) {
1090 		(void) ql_get_firmware_state(ha, NULL);
1091 	}
1092 
1093 	tmp_port.LinkState1 = ha->fw_state[1];
1094 	tmp_port.LinkState2 = LSB(ha->sfp_stat);
1095 	tmp_port.LinkState3 = ha->fw_state[3];
1096 	tmp_port.LinkState6 = ha->fw_state[6];
1097 
1098 	port_cnt = 0;
1099 	tgt_cnt = 0;
1100 
1101 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1102 		for (link = ha->dev[index].first; link != NULL;
1103 		    link = link->next) {
1104 			tq = link->base_address;
1105 
1106 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1107 			    tq->d_id.b24 == FS_MANAGEMENT_SERVER) {
1108 				continue;
1109 			}
1110 
1111 			if (tq->flags & (TQF_RSCN_RCVD | TQF_IIDMA_NEEDED |
1112 			    TQF_NEED_AUTHENTICATION | TQF_PLOGI_PROGRS)) {
1113 				continue;
1114 			}
1115 
1116 			port_cnt++;
1117 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1118 				tgt_cnt++;
1119 			}
1120 		}
1121 	}
1122 
1123 	tmp_port.DiscPortCount = port_cnt;
1124 	tmp_port.DiscTargetCount = tgt_cnt;
1125 
1126 	tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1127 
1128 	rval = ddi_copyout((void *)&tmp_port,
1129 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1130 	    sizeof (EXT_HBA_PORT), mode);
1131 	if (rval != 0) {
1132 		cmd->Status = EXT_STATUS_COPY_ERR;
1133 		cmd->ResponseLen = 0;
1134 		EL(ha, "failed, ddi_copyout\n");
1135 	} else {
1136 		cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1137 		QL_PRINT_9(ha, "done, ports=%d, targets=%d\n",
1138 		    ha->instance, port_cnt, tgt_cnt);
1139 	}
1140 }
1141 
1142 /*
1143  * ql_qry_disc_port
1144  *	Performs EXT_SC_QUERY_DISC_PORT subfunction.
1145  *
1146  * Input:
1147  *	ha:	adapter state pointer.
1148  *	cmd:	EXT_IOCTL cmd struct pointer.
1149  *	mode:	flags.
1150  *
1151  *	cmd->Instance = Port instance in fcport chain.
1152  *
1153  * Returns:
1154  *	None, request status indicated in cmd->Status.
1155  *
1156  * Context:
1157  *	Kernel context.
1158  */
1159 static void
ql_qry_disc_port(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1160 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1161 {
1162 	EXT_DISC_PORT	tmp_port = {0};
1163 	ql_link_t	*link;
1164 	ql_tgt_t	*tq;
1165 	uint16_t	index;
1166 	uint16_t	inst = 0;
1167 
1168 	QL_PRINT_9(ha, "started\n");
1169 
1170 	if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1171 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1172 		cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1173 		EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1174 		    cmd->ResponseLen);
1175 		cmd->ResponseLen = 0;
1176 		return;
1177 	}
1178 
1179 	for (link = NULL, index = 0;
1180 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1181 		for (link = ha->dev[index].first; link != NULL;
1182 		    link = link->next) {
1183 			tq = link->base_address;
1184 
1185 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1186 			    tq->d_id.b24 == FS_MANAGEMENT_SERVER) {
1187 				continue;
1188 			}
1189 
1190 			if (tq->flags & (TQF_RSCN_RCVD | TQF_IIDMA_NEEDED |
1191 			    TQF_NEED_AUTHENTICATION | TQF_PLOGI_PROGRS)) {
1192 				continue;
1193 			}
1194 
1195 			if (inst != cmd->Instance) {
1196 				inst++;
1197 				continue;
1198 			}
1199 
1200 			/* fill in the values */
1201 			bcopy(tq->node_name, tmp_port.WWNN,
1202 			    EXT_DEF_WWN_NAME_SIZE);
1203 			bcopy(tq->port_name, tmp_port.WWPN,
1204 			    EXT_DEF_WWN_NAME_SIZE);
1205 
1206 			break;
1207 		}
1208 	}
1209 
1210 	if (link == NULL) {
1211 		/* no matching device */
1212 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1213 		EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1214 		cmd->ResponseLen = 0;
1215 		return;
1216 	}
1217 
1218 	tmp_port.Id[0] = 0;
1219 	tmp_port.Id[1] = tq->d_id.b.domain;
1220 	tmp_port.Id[2] = tq->d_id.b.area;
1221 	tmp_port.Id[3] = tq->d_id.b.al_pa;
1222 
1223 	tmp_port.Type = 0;
1224 	if (tq->flags & TQF_INITIATOR_DEVICE) {
1225 		tmp_port.Type = (uint16_t)(tmp_port.Type |
1226 		    EXT_DEF_INITIATOR_DEV);
1227 	} else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1228 		(void) ql_inq_scan(ha, tq, 1);
1229 	} else if (tq->flags & TQF_TAPE_DEVICE) {
1230 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1231 	}
1232 
1233 	if (tq->flags & TQF_FABRIC_DEVICE) {
1234 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1235 	} else {
1236 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1237 	}
1238 
1239 	tmp_port.Status = 0;
1240 	tmp_port.Bus = 0;  /* Hard-coded for Solaris */
1241 
1242 	bcopy(tq->port_name, &tmp_port.TargetId, 8);
1243 
1244 	if (ddi_copyout((void *)&tmp_port,
1245 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1246 	    sizeof (EXT_DISC_PORT), mode) != 0) {
1247 		cmd->Status = EXT_STATUS_COPY_ERR;
1248 		cmd->ResponseLen = 0;
1249 		EL(ha, "failed, ddi_copyout\n");
1250 	} else {
1251 		cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1252 		QL_PRINT_9(ha, "done\n");
1253 	}
1254 }
1255 
1256 /*
1257  * ql_qry_disc_tgt
1258  *	Performs EXT_SC_QUERY_DISC_TGT subfunction.
1259  *
1260  * Input:
1261  *	ha:		adapter state pointer.
1262  *	cmd:		EXT_IOCTL cmd struct pointer.
1263  *	mode:		flags.
1264  *
1265  *	cmd->Instance = Port instance in fcport chain.
1266  *
1267  * Returns:
1268  *	None, request status indicated in cmd->Status.
1269  *
1270  * Context:
1271  *	Kernel context.
1272  */
1273 static void
ql_qry_disc_tgt(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1274 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1275 {
1276 	EXT_DISC_TARGET	tmp_tgt = {0};
1277 	ql_link_t	*link;
1278 	ql_tgt_t	*tq;
1279 	uint16_t	index;
1280 	uint16_t	inst = 0;
1281 
1282 	QL_PRINT_9(ha, "started, target=%d\n",
1283 	    cmd->Instance);
1284 
1285 	if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1286 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1287 		cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1288 		EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1289 		    cmd->ResponseLen);
1290 		cmd->ResponseLen = 0;
1291 		return;
1292 	}
1293 
1294 	/* Scan port list for requested target and fill in the values */
1295 	for (link = NULL, index = 0;
1296 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1297 		for (link = ha->dev[index].first; link != NULL;
1298 		    link = link->next) {
1299 			tq = link->base_address;
1300 
1301 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1302 			    tq->flags & TQF_INITIATOR_DEVICE ||
1303 			    tq->d_id.b24 == FS_MANAGEMENT_SERVER) {
1304 				continue;
1305 			}
1306 			if (inst != cmd->Instance) {
1307 				inst++;
1308 				continue;
1309 			}
1310 
1311 			/* fill in the values */
1312 			bcopy(tq->node_name, tmp_tgt.WWNN,
1313 			    EXT_DEF_WWN_NAME_SIZE);
1314 			bcopy(tq->port_name, tmp_tgt.WWPN,
1315 			    EXT_DEF_WWN_NAME_SIZE);
1316 
1317 			break;
1318 		}
1319 	}
1320 
1321 	if (link == NULL) {
1322 		/* no matching device */
1323 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1324 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
1325 		EL(ha, "failed, not found target=%d\n", cmd->Instance);
1326 		cmd->ResponseLen = 0;
1327 		return;
1328 	}
1329 	tmp_tgt.Id[0] = 0;
1330 	tmp_tgt.Id[1] = tq->d_id.b.domain;
1331 	tmp_tgt.Id[2] = tq->d_id.b.area;
1332 	tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1333 
1334 	tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1335 
1336 	if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1337 		(void) ql_inq_scan(ha, tq, 1);
1338 	}
1339 
1340 	tmp_tgt.Type = 0;
1341 	if (tq->flags & TQF_TAPE_DEVICE) {
1342 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1343 	}
1344 
1345 	if (tq->flags & TQF_FABRIC_DEVICE) {
1346 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1347 	} else {
1348 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1349 	}
1350 
1351 	tmp_tgt.Status = 0;
1352 
1353 	tmp_tgt.Bus = 0;  /* Hard-coded for Solaris. */
1354 
1355 	bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1356 
1357 	if (ddi_copyout((void *)&tmp_tgt,
1358 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1359 	    sizeof (EXT_DISC_TARGET), mode) != 0) {
1360 		cmd->Status = EXT_STATUS_COPY_ERR;
1361 		cmd->ResponseLen = 0;
1362 		EL(ha, "failed, ddi_copyout\n");
1363 	} else {
1364 		cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1365 		QL_PRINT_9(ha, "done\n");
1366 	}
1367 }
1368 
1369 /*
1370  * ql_qry_fw
1371  *	Performs EXT_SC_QUERY_FW subfunction.
1372  *
1373  * Input:
1374  *	ha:	adapter state pointer.
1375  *	cmd:	EXT_IOCTL cmd struct pointer.
1376  *	mode:	flags.
1377  *
1378  * Returns:
1379  *	None, request status indicated in cmd->Status.
1380  *
1381  * Context:
1382  *	Kernel context.
1383  */
1384 static void
ql_qry_fw(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1385 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1386 {
1387 	EXT_FW		fw_info = {0};
1388 
1389 	QL_PRINT_9(ha, "started\n");
1390 
1391 	if (cmd->ResponseLen < sizeof (EXT_FW)) {
1392 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1393 		cmd->DetailStatus = sizeof (EXT_FW);
1394 		EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1395 		    cmd->ResponseLen);
1396 		cmd->ResponseLen = 0;
1397 		return;
1398 	}
1399 
1400 	(void) sprintf((char *)(fw_info.Version), "%d.%02d.%02d",
1401 	    ha->fw_major_version, ha->fw_minor_version,
1402 	    ha->fw_subminor_version);
1403 
1404 	fw_info.Attrib = ha->fw_attributes;
1405 
1406 	if (ddi_copyout((void *)&fw_info,
1407 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1408 	    sizeof (EXT_FW), mode) != 0) {
1409 		cmd->Status = EXT_STATUS_COPY_ERR;
1410 		cmd->ResponseLen = 0;
1411 		EL(ha, "failed, ddi_copyout\n");
1412 		return;
1413 	} else {
1414 		cmd->ResponseLen = sizeof (EXT_FW);
1415 		QL_PRINT_9(ha, "done\n");
1416 	}
1417 }
1418 
1419 /*
1420  * ql_qry_chip
1421  *	Performs EXT_SC_QUERY_CHIP subfunction.
1422  *
1423  * Input:
1424  *	ha:	adapter state pointer.
1425  *	cmd:	EXT_IOCTL cmd struct pointer.
1426  *	mode:	flags.
1427  *
1428  * Returns:
1429  *	None, request status indicated in cmd->Status.
1430  *
1431  * Context:
1432  *	Kernel context.
1433  */
1434 static void
ql_qry_chip(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1435 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1436 {
1437 	EXT_CHIP	chip = {0};
1438 	uint16_t	PciDevNumber;
1439 
1440 	QL_PRINT_9(ha, "started\n");
1441 
1442 	if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1443 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1444 		cmd->DetailStatus = sizeof (EXT_CHIP);
1445 		EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1446 		    cmd->ResponseLen);
1447 		cmd->ResponseLen = 0;
1448 		return;
1449 	}
1450 
1451 	chip.VendorId = ha->ven_id;
1452 	chip.DeviceId = ha->device_id;
1453 	chip.SubVendorId = ha->subven_id;
1454 	chip.SubSystemId = ha->subsys_id;
1455 	chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1456 	chip.IoAddrLen = 0x100;
1457 	chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1458 	chip.MemAddrLen = 0x100;
1459 	chip.ChipRevID = ha->rev_id;
1460 	chip.FuncNo = ha->pci_function_number;
1461 	chip.PciBusNumber = (uint16_t)
1462 	    ((ha->pci_bus_addr & PCI_REG_BUS_M) >> PCI_REG_BUS_SHIFT);
1463 
1464 	PciDevNumber = (uint16_t)
1465 	    ((ha->pci_bus_addr & PCI_REG_DEV_M) >> PCI_REG_DEV_SHIFT);
1466 	chip.PciSlotNumber = (uint16_t)(((PciDevNumber << 3) & 0xF8) |
1467 	    (chip.FuncNo & 0x7));
1468 
1469 	if (ddi_copyout((void *)&chip,
1470 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1471 	    sizeof (EXT_CHIP), mode) != 0) {
1472 		cmd->Status = EXT_STATUS_COPY_ERR;
1473 		cmd->ResponseLen = 0;
1474 		EL(ha, "failed, ddi_copyout\n");
1475 	} else {
1476 		cmd->ResponseLen = sizeof (EXT_CHIP);
1477 		QL_PRINT_9(ha, "done\n");
1478 	}
1479 }
1480 
1481 /*
1482  * ql_qry_driver
1483  *	Performs EXT_SC_QUERY_DRIVER subfunction.
1484  *
1485  * Input:
1486  *	ha:	adapter state pointer.
1487  *	cmd:	EXT_IOCTL cmd struct pointer.
1488  *	mode:	flags.
1489  *
1490  * Returns:
1491  *	None, request status indicated in cmd->Status.
1492  *
1493  * Context:
1494  *	Kernel context.
1495  */
1496 static void
ql_qry_driver(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1497 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1498 {
1499 	EXT_DRIVER	qd = {0};
1500 
1501 	QL_PRINT_9(ha, "started\n");
1502 
1503 	if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1504 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
1505 		cmd->DetailStatus = sizeof (EXT_DRIVER);
1506 		EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1507 		    cmd->ResponseLen);
1508 		cmd->ResponseLen = 0;
1509 		return;
1510 	}
1511 
1512 	(void) strcpy((void *)&qd.Version[0], QL_VERSION);
1513 	qd.NumOfBus = 1;	/* Fixed for Solaris */
1514 	qd.TargetsPerBus = (uint16_t)
1515 	    (CFG_IST(ha, (CFG_ISP_FW_TYPE_2 | CFG_EXT_FW_INTERFACE)) ?
1516 	    MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1517 	qd.LunsPerTarget = 2030;
1518 	qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1519 	qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1520 
1521 	if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1522 	    sizeof (EXT_DRIVER), mode) != 0) {
1523 		cmd->Status = EXT_STATUS_COPY_ERR;
1524 		cmd->ResponseLen = 0;
1525 		EL(ha, "failed, ddi_copyout\n");
1526 	} else {
1527 		cmd->ResponseLen = sizeof (EXT_DRIVER);
1528 		QL_PRINT_9(ha, "done\n");
1529 	}
1530 }
1531 
1532 /*
1533  * ql_fcct
1534  *	IOCTL management server FC-CT passthrough.
1535  *
1536  * Input:
1537  *	ha:	adapter state pointer.
1538  *	cmd:	User space CT arguments pointer.
1539  *	mode:	flags.
1540  *
1541  * Returns:
1542  *	None, request status indicated in cmd->Status.
1543  *
1544  * Context:
1545  *	Kernel context.
1546  */
1547 static void
ql_fcct(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1548 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1549 {
1550 	ql_mbx_iocb_t		*pkt;
1551 	ql_mbx_data_t		mr;
1552 	dma_mem_t		*dma_mem;
1553 	caddr_t			pld;
1554 	uint32_t		pkt_size, pld_byte_cnt, *long_ptr;
1555 	int			rval;
1556 	ql_ct_iu_preamble_t	*ct;
1557 	ql_xioctl_t		*xp = ha->xioctl;
1558 	ql_tgt_t		tq;
1559 	uint16_t		comp_status, loop_id;
1560 
1561 	QL_PRINT_9(ha, "started\n");
1562 
1563 	/* Get CT argument structure. */
1564 	if ((ha->topology & QL_FABRIC_CONNECTION) == 0) {
1565 		EL(ha, "failed, No switch\n");
1566 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1567 		cmd->ResponseLen = 0;
1568 		return;
1569 	}
1570 
1571 	if (DRIVER_SUSPENDED(ha)) {
1572 		EL(ha, "failed, LOOP_NOT_READY\n");
1573 		cmd->Status = EXT_STATUS_BUSY;
1574 		cmd->ResponseLen = 0;
1575 		return;
1576 	}
1577 
1578 	/* Login management server device. */
1579 	if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1580 		tq.d_id.b.al_pa = 0xfa;
1581 		tq.d_id.b.area = 0xff;
1582 		tq.d_id.b.domain = 0xff;
1583 		tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
1584 		    MANAGEMENT_SERVER_24XX_LOOP_ID :
1585 		    MANAGEMENT_SERVER_LOOP_ID);
1586 		rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1587 		if (rval != QL_SUCCESS) {
1588 			EL(ha, "failed, server login\n");
1589 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1590 			cmd->ResponseLen = 0;
1591 			return;
1592 		} else {
1593 			xp->flags |= QL_MGMT_SERVER_LOGIN;
1594 		}
1595 	}
1596 
1597 	QL_PRINT_9(ha, "cmd\n");
1598 	QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1599 
1600 	/* Allocate a DMA Memory Descriptor */
1601 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1602 	if (dma_mem == NULL) {
1603 		EL(ha, "failed, kmem_zalloc\n");
1604 		cmd->Status = EXT_STATUS_NO_MEMORY;
1605 		cmd->ResponseLen = 0;
1606 		return;
1607 	}
1608 	/* Determine maximum buffer size. */
1609 	if (cmd->RequestLen < cmd->ResponseLen) {
1610 		pld_byte_cnt = cmd->ResponseLen;
1611 	} else {
1612 		pld_byte_cnt = cmd->RequestLen;
1613 	}
1614 
1615 	/* Allocate command block. */
1616 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1617 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1618 	if (pkt == NULL) {
1619 		EL(ha, "failed, kmem_zalloc\n");
1620 		cmd->Status = EXT_STATUS_NO_MEMORY;
1621 		cmd->ResponseLen = 0;
1622 		return;
1623 	}
1624 	pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1625 
1626 	/* Get command payload data. */
1627 	if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1628 	    cmd->RequestLen, mode) != cmd->RequestLen) {
1629 		EL(ha, "failed, get_buffer_data\n");
1630 		kmem_free(pkt, pkt_size);
1631 		cmd->Status = EXT_STATUS_COPY_ERR;
1632 		cmd->ResponseLen = 0;
1633 		return;
1634 	}
1635 
1636 	/* Get DMA memory for the IOCB */
1637 	if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1638 	    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1639 		cmn_err(CE_WARN, "%sDMA memory "
1640 		    "alloc failed", QL_NAME);
1641 		kmem_free(pkt, pkt_size);
1642 		kmem_free(dma_mem, sizeof (dma_mem_t));
1643 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1644 		cmd->ResponseLen = 0;
1645 		return;
1646 	}
1647 
1648 	/* Copy out going payload data to IOCB DMA buffer. */
1649 	ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1650 	    (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1651 
1652 	/* Sync IOCB DMA buffer. */
1653 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1654 	    DDI_DMA_SYNC_FORDEV);
1655 
1656 	/*
1657 	 * Setup IOCB
1658 	 */
1659 	ct = (ql_ct_iu_preamble_t *)pld;
1660 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
1661 		pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1662 		pkt->ms24.entry_count = 1;
1663 
1664 		pkt->ms24.vp_index = ha->vp_index;
1665 
1666 		/* Set loop ID */
1667 		pkt->ms24.n_port_hdl = (uint16_t)
1668 		    (ct->gs_type == GS_TYPE_DIR_SERVER ?
1669 		    LE_16(SNS_24XX_HDL) :
1670 		    LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1671 
1672 		/* Set ISP command timeout. */
1673 		pkt->ms24.timeout = LE_16(120);
1674 
1675 		/* Set cmd/response data segment counts. */
1676 		pkt->ms24.cmd_dseg_count = LE_16(1);
1677 		pkt->ms24.resp_dseg_count = LE_16(1);
1678 
1679 		/* Load ct cmd byte count. */
1680 		pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1681 
1682 		/* Load ct rsp byte count. */
1683 		pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1684 
1685 		long_ptr = (uint32_t *)&pkt->ms24.dseg;
1686 
1687 		/* Load MS command entry data segments. */
1688 		*long_ptr++ = (uint32_t)
1689 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1690 		*long_ptr++ = (uint32_t)
1691 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1692 		*long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1693 
1694 		/* Load MS response entry data segments. */
1695 		*long_ptr++ = (uint32_t)
1696 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1697 		*long_ptr++ = (uint32_t)
1698 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1699 		*long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1700 
1701 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1702 		    sizeof (ql_mbx_iocb_t));
1703 
1704 		comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1705 		if (comp_status == CS_DATA_UNDERRUN) {
1706 			if ((BE_16(ct->max_residual_size)) == 0) {
1707 				comp_status = CS_COMPLETE;
1708 			}
1709 		}
1710 
1711 		if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1712 		    0) {
1713 			EL(ha, "failed, I/O timeout or "
1714 			    "es=%xh, ss_l=%xh, rval=%xh\n",
1715 			    pkt->sts24.entry_status,
1716 			    pkt->sts24.scsi_status_l, rval);
1717 			kmem_free(pkt, pkt_size);
1718 			ql_free_dma_resource(ha, dma_mem);
1719 			kmem_free(dma_mem, sizeof (dma_mem_t));
1720 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1721 			cmd->ResponseLen = 0;
1722 			return;
1723 		}
1724 	} else {
1725 		pkt->ms.entry_type = MS_TYPE;
1726 		pkt->ms.entry_count = 1;
1727 
1728 		/* Set loop ID */
1729 		loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1730 		    SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1731 		if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1732 			pkt->ms.loop_id_l = LSB(loop_id);
1733 			pkt->ms.loop_id_h = MSB(loop_id);
1734 		} else {
1735 			pkt->ms.loop_id_h = LSB(loop_id);
1736 		}
1737 
1738 		/* Set ISP command timeout. */
1739 		pkt->ms.timeout = LE_16(120);
1740 
1741 		/* Set data segment counts. */
1742 		pkt->ms.cmd_dseg_count_l = 1;
1743 		pkt->ms.total_dseg_count = LE_16(2);
1744 
1745 		/* Response total byte count. */
1746 		pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1747 		pkt->ms.dseg[1].length = LE_32(cmd->ResponseLen);
1748 
1749 		/* Command total byte count. */
1750 		pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1751 		pkt->ms.dseg[0].length = LE_32(cmd->RequestLen);
1752 
1753 		/* Load command/response data segments. */
1754 		pkt->ms.dseg[0].address[0] = (uint32_t)
1755 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1756 		pkt->ms.dseg[0].address[1] = (uint32_t)
1757 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1758 		pkt->ms.dseg[1].address[0] = (uint32_t)
1759 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1760 		pkt->ms.dseg[1].address[1] = (uint32_t)
1761 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1762 
1763 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1764 		    sizeof (ql_mbx_iocb_t));
1765 
1766 		comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1767 		if (comp_status == CS_DATA_UNDERRUN) {
1768 			if ((BE_16(ct->max_residual_size)) == 0) {
1769 				comp_status = CS_COMPLETE;
1770 			}
1771 		}
1772 		if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1773 			EL(ha, "failed, I/O timeout or "
1774 			    "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1775 			kmem_free(pkt, pkt_size);
1776 			ql_free_dma_resource(ha, dma_mem);
1777 			kmem_free(dma_mem, sizeof (dma_mem_t));
1778 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1779 			cmd->ResponseLen = 0;
1780 			return;
1781 		}
1782 	}
1783 
1784 	/* Sync in coming DMA buffer. */
1785 	(void) ddi_dma_sync(dma_mem->dma_handle, 0,
1786 	    pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1787 	/* Copy in coming DMA data. */
1788 	ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1789 	    (uint8_t *)dma_mem->bp, pld_byte_cnt,
1790 	    DDI_DEV_AUTOINCR);
1791 
1792 	/* Copy response payload from DMA buffer to application. */
1793 	if (cmd->ResponseLen != 0) {
1794 		QL_PRINT_9(ha, "ResponseLen=%d\n",
1795 		    cmd->ResponseLen);
1796 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
1797 
1798 		/* Send response payload. */
1799 		if (ql_send_buffer_data(pld,
1800 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
1801 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
1802 			EL(ha, "failed, send_buffer_data\n");
1803 			cmd->Status = EXT_STATUS_COPY_ERR;
1804 			cmd->ResponseLen = 0;
1805 		}
1806 	}
1807 
1808 	kmem_free(pkt, pkt_size);
1809 	ql_free_dma_resource(ha, dma_mem);
1810 	kmem_free(dma_mem, sizeof (dma_mem_t));
1811 
1812 	QL_PRINT_9(ha, "done\n");
1813 }
1814 
1815 /*
1816  * ql_aen_reg
1817  *	IOCTL management server Asynchronous Event Tracking Enable/Disable.
1818  *
1819  * Input:
1820  *	ha:	adapter state pointer.
1821  *	cmd:	EXT_IOCTL cmd struct pointer.
1822  *	mode:	flags.
1823  *
1824  * Returns:
1825  *	None, request status indicated in cmd->Status.
1826  *
1827  * Context:
1828  *	Kernel context.
1829  */
1830 static void
ql_aen_reg(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1831 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1832 {
1833 	EXT_REG_AEN	reg_struct;
1834 	int		rval = 0;
1835 	ql_xioctl_t	*xp = ha->xioctl;
1836 
1837 	QL_PRINT_9(ha, "started\n");
1838 
1839 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &reg_struct,
1840 	    cmd->RequestLen, mode);
1841 
1842 	if (rval == 0) {
1843 		if (reg_struct.Enable) {
1844 			xp->flags |= QL_AEN_TRACKING_ENABLE;
1845 		} else {
1846 			xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1847 			/* Empty the queue. */
1848 			INTR_LOCK(ha);
1849 			xp->aen_q_head = 0;
1850 			xp->aen_q_tail = 0;
1851 			INTR_UNLOCK(ha);
1852 		}
1853 		QL_PRINT_9(ha, "done\n");
1854 	} else {
1855 		cmd->Status = EXT_STATUS_COPY_ERR;
1856 		EL(ha, "failed, ddi_copyin\n");
1857 	}
1858 }
1859 
1860 /*
1861  * ql_aen_get
1862  *	IOCTL management server Asynchronous Event Record Transfer.
1863  *
1864  * Input:
1865  *	ha:	adapter state pointer.
1866  *	cmd:	EXT_IOCTL cmd struct pointer.
1867  *	mode:	flags.
1868  *
1869  * Returns:
1870  *	None, request status indicated in cmd->Status.
1871  *
1872  * Context:
1873  *	Kernel context.
1874  */
1875 static void
ql_aen_get(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1876 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1877 {
1878 	uint32_t	out_size;
1879 	EXT_ASYNC_EVENT	*tmp_q;
1880 	EXT_ASYNC_EVENT	aen[EXT_DEF_MAX_AEN_QUEUE];
1881 	uint8_t		i;
1882 	uint8_t		queue_cnt;
1883 	uint8_t		request_cnt;
1884 	ql_xioctl_t	*xp = ha->xioctl;
1885 
1886 	QL_PRINT_9(ha, "started\n");
1887 
1888 	/* Compute the number of events that can be returned */
1889 	request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1890 
1891 	if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1892 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1893 		cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1894 		EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1895 		    "Len=%xh\n", request_cnt);
1896 		cmd->ResponseLen = 0;
1897 		return;
1898 	}
1899 
1900 	/* 1st: Make a local copy of the entire queue content. */
1901 	tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1902 	queue_cnt = 0;
1903 
1904 	INTR_LOCK(ha);
1905 	i = xp->aen_q_head;
1906 
1907 	for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1908 		if (tmp_q[i].AsyncEventCode != 0) {
1909 			bcopy(&tmp_q[i], &aen[queue_cnt],
1910 			    sizeof (EXT_ASYNC_EVENT));
1911 			queue_cnt++;
1912 			tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1913 		}
1914 		if (i == xp->aen_q_tail) {
1915 			/* done. */
1916 			break;
1917 		}
1918 		i++;
1919 		if (i == EXT_DEF_MAX_AEN_QUEUE) {
1920 			i = 0;
1921 		}
1922 	}
1923 
1924 	/* Empty the queue. */
1925 	xp->aen_q_head = 0;
1926 	xp->aen_q_tail = 0;
1927 
1928 	INTR_UNLOCK(ha);
1929 
1930 	/* 2nd: Now transfer the queue content to user buffer */
1931 	/* Copy the entire queue to user's buffer. */
1932 	out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1933 	if (queue_cnt == 0) {
1934 		cmd->ResponseLen = 0;
1935 	} else if (ddi_copyout((void *)&aen[0],
1936 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1937 	    out_size, mode) != 0) {
1938 		cmd->Status = EXT_STATUS_COPY_ERR;
1939 		cmd->ResponseLen = 0;
1940 		EL(ha, "failed, ddi_copyout\n");
1941 	} else {
1942 		cmd->ResponseLen = out_size;
1943 		QL_PRINT_9(ha, "done\n");
1944 	}
1945 }
1946 
1947 /*
1948  * ql_enqueue_aen
1949  *
1950  * Input:
1951  *	ha:		adapter state pointer.
1952  *	event_code:	async event code of the event to add to queue.
1953  *	payload:	event payload for the queue.
1954  *	INTR_LOCK must be already obtained.
1955  *
1956  * Context:
1957  *	Interrupt or Kernel context, no mailbox commands allowed.
1958  */
1959 void
ql_enqueue_aen(ql_adapter_state_t * ha,uint16_t event_code,void * payload)1960 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1961 {
1962 	uint8_t			new_entry;	/* index to current entry */
1963 	uint16_t		*mbx;
1964 	EXT_ASYNC_EVENT		*aen_queue;
1965 	ql_xioctl_t		*xp = ha->xioctl;
1966 
1967 	QL_PRINT_9(ha, "started, event_code=%d\n",
1968 	    event_code);
1969 
1970 	if (xp == NULL) {
1971 		QL_PRINT_9(ha, "no context\n");
1972 		return;
1973 	}
1974 	aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1975 
1976 	if (aen_queue[xp->aen_q_tail].AsyncEventCode != 0) {
1977 		/* Need to change queue pointers to make room. */
1978 
1979 		/* Increment tail for adding new entry. */
1980 		xp->aen_q_tail++;
1981 		if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1982 			xp->aen_q_tail = 0;
1983 		}
1984 		if (xp->aen_q_head == xp->aen_q_tail) {
1985 			/*
1986 			 * We're overwriting the oldest entry, so need to
1987 			 * update the head pointer.
1988 			 */
1989 			xp->aen_q_head++;
1990 			if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1991 				xp->aen_q_head = 0;
1992 			}
1993 		}
1994 	}
1995 
1996 	new_entry = xp->aen_q_tail;
1997 	aen_queue[new_entry].AsyncEventCode = event_code;
1998 
1999 	/* Update payload */
2000 	if (payload != NULL) {
2001 		switch (event_code) {
2002 		case MBA_LIP_OCCURRED:
2003 		case MBA_LOOP_UP:
2004 		case MBA_LOOP_DOWN:
2005 		case MBA_LIP_F8:
2006 		case MBA_LIP_RESET:
2007 		case MBA_PORT_UPDATE:
2008 			break;
2009 		case MBA_RSCN_UPDATE:
2010 			mbx = (uint16_t *)payload;
2011 			/* al_pa */
2012 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
2013 			    LSB(mbx[2]);
2014 			/* area */
2015 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
2016 			    MSB(mbx[2]);
2017 			/* domain */
2018 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
2019 			    LSB(mbx[1]);
2020 			/* save in big endian */
2021 			BIG_ENDIAN_24(&aen_queue[new_entry].
2022 			    Payload.RSCN.RSCNInfo[0]);
2023 
2024 			aen_queue[new_entry].Payload.RSCN.AddrFormat =
2025 			    MSB(mbx[1]);
2026 
2027 			break;
2028 		default:
2029 			/* Not supported */
2030 			EL(ha, "failed, event code not supported=%xh\n",
2031 			    event_code);
2032 			aen_queue[new_entry].AsyncEventCode = 0;
2033 			break;
2034 		}
2035 	}
2036 
2037 	QL_PRINT_9(ha, "done\n");
2038 }
2039 
2040 /*
2041  * ql_scsi_passthru
2042  *	IOCTL SCSI passthrough.
2043  *
2044  * Input:
2045  *	ha:	adapter state pointer.
2046  *	cmd:	User space SCSI command pointer.
2047  *	mode:	flags.
2048  *
2049  * Returns:
2050  *	None, request status indicated in cmd->Status.
2051  *
2052  * Context:
2053  *	Kernel context.
2054  */
2055 static void
ql_scsi_passthru(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2056 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2057 {
2058 	ql_mbx_iocb_t		*pkt;
2059 	ql_mbx_data_t		mr;
2060 	dma_mem_t		*dma_mem;
2061 	caddr_t			pld;
2062 	uint32_t		pkt_size, pld_size;
2063 	uint16_t		qlnt, retries, cnt, cnt2;
2064 	uint8_t			*name;
2065 	EXT_FC_SCSI_PASSTHRU	*ufc_req;
2066 	EXT_SCSI_PASSTHRU	*usp_req;
2067 	int			rval;
2068 	union _passthru {
2069 		EXT_SCSI_PASSTHRU	sp_cmd;
2070 		EXT_FC_SCSI_PASSTHRU	fc_cmd;
2071 	} pt_req;		/* Passthru request */
2072 	uint32_t		status, sense_sz = 0;
2073 	ql_tgt_t		*tq = NULL;
2074 	EXT_SCSI_PASSTHRU	*sp_req = &pt_req.sp_cmd;
2075 	EXT_FC_SCSI_PASSTHRU	*fc_req = &pt_req.fc_cmd;
2076 
2077 	/* SCSI request struct for SCSI passthrough IOs. */
2078 	struct {
2079 		uint16_t	lun;
2080 		uint16_t	sense_length;	/* Sense buffer size */
2081 		size_t		resid;		/* Residual */
2082 		uint8_t		*cdbp;		/* Requestor's CDB */
2083 		uint8_t		*u_sense;	/* Requestor's sense buffer */
2084 		uint8_t		cdb_len;	/* Requestor's CDB length */
2085 		uint8_t		direction;
2086 	} scsi_req;
2087 
2088 	struct {
2089 		uint8_t		*rsp_info;
2090 		uint8_t		*req_sense_data;
2091 		uint32_t	residual_length;
2092 		uint32_t	rsp_info_length;
2093 		uint32_t	req_sense_length;
2094 		uint16_t	comp_status;
2095 		uint8_t		state_flags_l;
2096 		uint8_t		state_flags_h;
2097 		uint8_t		scsi_status_l;
2098 		uint8_t		scsi_status_h;
2099 	} sts;
2100 
2101 	QL_PRINT_9(ha, "started\n");
2102 
2103 	/* Verify Sub Code and set cnt to needed request size. */
2104 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2105 		pld_size = sizeof (EXT_SCSI_PASSTHRU);
2106 	} else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
2107 		pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
2108 	} else {
2109 		EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
2110 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
2111 		cmd->ResponseLen = 0;
2112 		return;
2113 	}
2114 
2115 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2116 	if (dma_mem == NULL) {
2117 		EL(ha, "failed, kmem_zalloc\n");
2118 		cmd->Status = EXT_STATUS_NO_MEMORY;
2119 		cmd->ResponseLen = 0;
2120 		return;
2121 	}
2122 	/*  Verify the size of and copy in the passthru request structure. */
2123 	if (cmd->RequestLen != pld_size) {
2124 		/* Return error */
2125 		EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2126 		    cmd->RequestLen, pld_size);
2127 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2128 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2129 		cmd->ResponseLen = 0;
2130 		return;
2131 	}
2132 
2133 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req,
2134 	    pld_size, mode) != 0) {
2135 		EL(ha, "failed, ddi_copyin\n");
2136 		cmd->Status = EXT_STATUS_COPY_ERR;
2137 		cmd->ResponseLen = 0;
2138 		return;
2139 	}
2140 
2141 	/*
2142 	 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2143 	 * request data structure.
2144 	 */
2145 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2146 		scsi_req.lun = sp_req->TargetAddr.Lun;
2147 		scsi_req.sense_length = sizeof (sp_req->SenseData);
2148 		scsi_req.cdbp = &sp_req->Cdb[0];
2149 		scsi_req.cdb_len = sp_req->CdbLength;
2150 		scsi_req.direction = sp_req->Direction;
2151 		usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2152 		scsi_req.u_sense = &usp_req->SenseData[0];
2153 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
2154 
2155 		qlnt = QLNT_PORT;
2156 		name = (uint8_t *)&sp_req->TargetAddr.Target;
2157 		QL_PRINT_9(ha, "SubCode=%xh, Target=%lld\n",
2158 		    ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2159 		tq = ql_find_port(ha, name, qlnt);
2160 	} else {
2161 		/*
2162 		 * Must be FC PASSTHRU, verified above.
2163 		 */
2164 		if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2165 			qlnt = QLNT_PORT;
2166 			name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2167 			QL_PRINT_9(ha, "SubCode=%xh, "
2168 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2169 			    ha->instance, cmd->SubCode, name[0], name[1],
2170 			    name[2], name[3], name[4], name[5], name[6],
2171 			    name[7]);
2172 			tq = ql_find_port(ha, name, qlnt);
2173 		} else if (fc_req->FCScsiAddr.DestType ==
2174 		    EXT_DEF_DESTTYPE_WWNN) {
2175 			qlnt = QLNT_NODE;
2176 			name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2177 			QL_PRINT_9(ha, "SubCode=%xh, "
2178 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2179 			    ha->instance, cmd->SubCode, name[0], name[1],
2180 			    name[2], name[3], name[4], name[5], name[6],
2181 			    name[7]);
2182 			tq = ql_find_port(ha, name, qlnt);
2183 		} else if (fc_req->FCScsiAddr.DestType ==
2184 		    EXT_DEF_DESTTYPE_PORTID) {
2185 			qlnt = QLNT_PID;
2186 			name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2187 			QL_PRINT_9(ha, "SubCode=%xh, PID="
2188 			    "%02x%02x%02x\n", cmd->SubCode,
2189 			    name[0], name[1], name[2]);
2190 			tq = ql_find_port(ha, name, qlnt);
2191 		} else {
2192 			EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2193 			    cmd->SubCode, fc_req->FCScsiAddr.DestType);
2194 			cmd->Status = EXT_STATUS_INVALID_PARAM;
2195 			cmd->ResponseLen = 0;
2196 			return;
2197 		}
2198 		scsi_req.lun = fc_req->FCScsiAddr.Lun;
2199 		scsi_req.sense_length = sizeof (fc_req->SenseData);
2200 		scsi_req.cdbp = &sp_req->Cdb[0];
2201 		scsi_req.cdb_len = sp_req->CdbLength;
2202 		ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2203 		scsi_req.u_sense = &ufc_req->SenseData[0];
2204 		scsi_req.direction = fc_req->Direction;
2205 	}
2206 
2207 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2208 		EL(ha, "failed, fc_port not found\n");
2209 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2210 		cmd->ResponseLen = 0;
2211 		return;
2212 	}
2213 
2214 	if (tq->flags & TQF_NEED_AUTHENTICATION) {
2215 		EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2216 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2217 		cmd->ResponseLen = 0;
2218 		return;
2219 	}
2220 
2221 	/* Allocate command block. */
2222 	if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2223 	    scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2224 	    cmd->ResponseLen) {
2225 		pld_size = cmd->ResponseLen;
2226 		pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2227 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2228 		if (pkt == NULL) {
2229 			EL(ha, "failed, kmem_zalloc\n");
2230 			cmd->Status = EXT_STATUS_NO_MEMORY;
2231 			cmd->ResponseLen = 0;
2232 			return;
2233 		}
2234 		pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2235 
2236 		/* Get DMA memory for the IOCB */
2237 		if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2238 		    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2239 			cmn_err(CE_WARN, "%srequest queue DMA memory "
2240 			    "alloc failed", QL_NAME);
2241 			kmem_free(pkt, pkt_size);
2242 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2243 			cmd->ResponseLen = 0;
2244 			return;
2245 		}
2246 
2247 		if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2248 			scsi_req.direction = (uint8_t)
2249 			    (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
2250 			    CF_RD : CF_DATA_IN | CF_STAG);
2251 		} else {
2252 			scsi_req.direction = (uint8_t)
2253 			    (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
2254 			    CF_WR : CF_DATA_OUT | CF_STAG);
2255 			cmd->ResponseLen = 0;
2256 
2257 			/* Get command payload. */
2258 			if (ql_get_buffer_data(
2259 			    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2260 			    pld, pld_size, mode) != pld_size) {
2261 				EL(ha, "failed, get_buffer_data\n");
2262 				cmd->Status = EXT_STATUS_COPY_ERR;
2263 
2264 				kmem_free(pkt, pkt_size);
2265 				ql_free_dma_resource(ha, dma_mem);
2266 				kmem_free(dma_mem, sizeof (dma_mem_t));
2267 				return;
2268 			}
2269 
2270 			/* Copy out going data to DMA buffer. */
2271 			ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2272 			    (uint8_t *)dma_mem->bp, pld_size,
2273 			    DDI_DEV_AUTOINCR);
2274 
2275 			/* Sync DMA buffer. */
2276 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2277 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
2278 		}
2279 	} else {
2280 		scsi_req.direction = (uint8_t)
2281 		    (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ? 0 : CF_STAG);
2282 		cmd->ResponseLen = 0;
2283 
2284 		pkt_size = sizeof (ql_mbx_iocb_t);
2285 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2286 		if (pkt == NULL) {
2287 			EL(ha, "failed, kmem_zalloc-2\n");
2288 			cmd->Status = EXT_STATUS_NO_MEMORY;
2289 			return;
2290 		}
2291 		pld = NULL;
2292 		pld_size = 0;
2293 	}
2294 
2295 	/* retries = ha->port_down_retry_count; */
2296 	retries = 1;
2297 	cmd->Status = EXT_STATUS_OK;
2298 	cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2299 
2300 	QL_PRINT_9(ha, "SCSI cdb\n");
2301 	QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2302 
2303 	do {
2304 		if (DRIVER_SUSPENDED(ha)) {
2305 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2306 			break;
2307 		}
2308 
2309 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2310 			uint64_t		lun_addr = 0;
2311 			fcp_ent_addr_t		*fcp_ent_addr = 0;
2312 
2313 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2314 			pkt->cmd24.entry_count = 1;
2315 
2316 			/* Set LUN number and address method */
2317 			lun_addr = ql_get_lun_addr(tq, scsi_req.lun);
2318 			fcp_ent_addr = (fcp_ent_addr_t *)&lun_addr;
2319 
2320 			pkt->cmd24.fcp_lun[2] =
2321 			    lobyte(fcp_ent_addr->ent_addr_0);
2322 			pkt->cmd24.fcp_lun[3] =
2323 			    hibyte(fcp_ent_addr->ent_addr_0);
2324 			pkt->cmd24.fcp_lun[0] =
2325 			    lobyte(fcp_ent_addr->ent_addr_1);
2326 			pkt->cmd24.fcp_lun[1] =
2327 			    hibyte(fcp_ent_addr->ent_addr_1);
2328 			pkt->cmd24.fcp_lun[6] =
2329 			    lobyte(fcp_ent_addr->ent_addr_2);
2330 			pkt->cmd24.fcp_lun[7] =
2331 			    hibyte(fcp_ent_addr->ent_addr_2);
2332 			pkt->cmd24.fcp_lun[4] =
2333 			    lobyte(fcp_ent_addr->ent_addr_3);
2334 			pkt->cmd24.fcp_lun[5] =
2335 			    hibyte(fcp_ent_addr->ent_addr_3);
2336 
2337 			/* Set N_port handle */
2338 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2339 
2340 			/* Set VP Index */
2341 			pkt->cmd24.vp_index = ha->vp_index;
2342 
2343 			/* Set target ID */
2344 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2345 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
2346 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2347 
2348 			/* Set ISP command timeout. */
2349 			pkt->cmd24.timeout = (uint16_t)LE_16(15);
2350 
2351 			/* Load SCSI CDB */
2352 			ddi_rep_put8(ha->req_q[0]->req_ring.acc_handle,
2353 			    scsi_req.cdbp, pkt->cmd24.scsi_cdb,
2354 			    scsi_req.cdb_len, DDI_DEV_AUTOINCR);
2355 			for (cnt = 0; cnt < MAX_CMDSZ;
2356 			    cnt = (uint16_t)(cnt + 4)) {
2357 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2358 				    + cnt, 4);
2359 			}
2360 
2361 			/* Set tag queue control flags */
2362 			pkt->cmd24.task = TA_STAG;
2363 
2364 			if (pld_size) {
2365 				/* Set transfer direction. */
2366 				pkt->cmd24.control_flags = scsi_req.direction;
2367 
2368 				/* Set data segment count. */
2369 				pkt->cmd24.dseg_count = LE_16(1);
2370 
2371 				/* Load total byte count. */
2372 				pkt->cmd24.total_byte_count = LE_32(pld_size);
2373 
2374 				/* Load data descriptor. */
2375 				pkt->cmd24.dseg.address[0] = (uint32_t)
2376 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2377 				pkt->cmd24.dseg.address[1] = (uint32_t)
2378 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2379 				pkt->cmd24.dseg.length = LE_32(pld_size);
2380 			}
2381 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2382 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2383 			pkt->cmd3.entry_count = 1;
2384 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2385 				pkt->cmd3.target_l = LSB(tq->loop_id);
2386 				pkt->cmd3.target_h = MSB(tq->loop_id);
2387 			} else {
2388 				pkt->cmd3.target_h = LSB(tq->loop_id);
2389 			}
2390 			pkt->cmd3.lun_l = LSB(scsi_req.lun);
2391 			pkt->cmd3.lun_h = MSB(scsi_req.lun);
2392 			pkt->cmd3.control_flags_l = scsi_req.direction;
2393 			pkt->cmd3.timeout = LE_16(15);
2394 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2395 				pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2396 			}
2397 			if (pld_size) {
2398 				pkt->cmd3.dseg_count = LE_16(1);
2399 				pkt->cmd3.byte_count = LE_32(pld_size);
2400 				pkt->cmd3.dseg[0].address[0] = (uint32_t)
2401 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2402 				pkt->cmd3.dseg[0].address[1] = (uint32_t)
2403 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2404 				pkt->cmd3.dseg[0].length = LE_32(pld_size);
2405 			}
2406 		} else {
2407 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2408 			pkt->cmd.entry_count = 1;
2409 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2410 				pkt->cmd.target_l = LSB(tq->loop_id);
2411 				pkt->cmd.target_h = MSB(tq->loop_id);
2412 			} else {
2413 				pkt->cmd.target_h = LSB(tq->loop_id);
2414 			}
2415 			pkt->cmd.lun_l = LSB(scsi_req.lun);
2416 			pkt->cmd.lun_h = MSB(scsi_req.lun);
2417 			pkt->cmd.control_flags_l = scsi_req.direction;
2418 			pkt->cmd.timeout = LE_16(15);
2419 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2420 				pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2421 			}
2422 			if (pld_size) {
2423 				pkt->cmd.dseg_count = LE_16(1);
2424 				pkt->cmd.byte_count = LE_32(pld_size);
2425 				pkt->cmd.dseg[0].address = (uint32_t)
2426 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2427 				pkt->cmd.dseg[0].length = LE_32(pld_size);
2428 			}
2429 		}
2430 		/* Go issue command and wait for completion. */
2431 		QL_PRINT_9(ha, "request pkt\n");
2432 		QL_DUMP_9(pkt, 8, pkt_size);
2433 
2434 		status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2435 
2436 		if (pld_size) {
2437 			/* Sync in coming DMA buffer. */
2438 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2439 			    dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2440 			/* Copy in coming DMA data. */
2441 			ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2442 			    (uint8_t *)dma_mem->bp, pld_size,
2443 			    DDI_DEV_AUTOINCR);
2444 		}
2445 
2446 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2447 			pkt->sts24.entry_status = (uint8_t)
2448 			    (pkt->sts24.entry_status & 0x3c);
2449 		} else {
2450 			pkt->sts.entry_status = (uint8_t)
2451 			    (pkt->sts.entry_status & 0x7e);
2452 		}
2453 
2454 		if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2455 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2456 			    pkt->sts.entry_status, tq->d_id.b24);
2457 			status = QL_FUNCTION_PARAMETER_ERROR;
2458 		}
2459 
2460 		sts.comp_status = (uint16_t)
2461 		    (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
2462 		    LE_16(pkt->sts24.comp_status) :
2463 		    LE_16(pkt->sts.comp_status));
2464 
2465 		/*
2466 		 * We have verified about all the request that can be so far.
2467 		 * Now we need to start verification of our ability to
2468 		 * actually issue the CDB.
2469 		 */
2470 		if (DRIVER_SUSPENDED(ha)) {
2471 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2472 			break;
2473 		} else if (status == QL_SUCCESS &&
2474 		    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2475 		    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2476 			EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2477 			if (tq->flags & TQF_FABRIC_DEVICE) {
2478 				rval = ql_login_fport(ha, tq, tq->loop_id,
2479 				    LFF_NO_PLOGI, &mr);
2480 				if (rval != QL_SUCCESS) {
2481 					EL(ha, "failed, login_fport=%xh, "
2482 					    "d_id=%xh\n", rval, tq->d_id.b24);
2483 				}
2484 			} else {
2485 				rval = ql_login_lport(ha, tq, tq->loop_id,
2486 				    LLF_NONE);
2487 				if (rval != QL_SUCCESS) {
2488 					EL(ha, "failed, login_lport=%xh, "
2489 					    "d_id=%xh\n", rval, tq->d_id.b24);
2490 				}
2491 			}
2492 		} else {
2493 			break;
2494 		}
2495 
2496 		bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2497 
2498 	} while (retries--);
2499 
2500 	if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2501 		/* Cannot issue command now, maybe later */
2502 		EL(ha, "failed, suspended\n");
2503 		kmem_free(pkt, pkt_size);
2504 		ql_free_dma_resource(ha, dma_mem);
2505 		kmem_free(dma_mem, sizeof (dma_mem_t));
2506 		cmd->Status = EXT_STATUS_SUSPENDED;
2507 		cmd->ResponseLen = 0;
2508 		return;
2509 	}
2510 
2511 	if (status != QL_SUCCESS) {
2512 		/* Command error */
2513 		EL(ha, "failed, I/O\n");
2514 		kmem_free(pkt, pkt_size);
2515 		ql_free_dma_resource(ha, dma_mem);
2516 		kmem_free(dma_mem, sizeof (dma_mem_t));
2517 		cmd->Status = EXT_STATUS_ERR;
2518 		cmd->DetailStatus = status;
2519 		cmd->ResponseLen = 0;
2520 		return;
2521 	}
2522 
2523 	/* Setup status. */
2524 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2525 		sts.scsi_status_l = pkt->sts24.scsi_status_l;
2526 		sts.scsi_status_h = pkt->sts24.scsi_status_h;
2527 
2528 		/* Setup residuals. */
2529 		sts.residual_length = LE_32(pkt->sts24.residual_length);
2530 
2531 		/* Setup state flags. */
2532 		sts.state_flags_l = pkt->sts24.state_flags_l;
2533 		sts.state_flags_h = pkt->sts24.state_flags_h;
2534 		if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2535 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2536 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2537 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2538 		} else {
2539 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2540 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2541 			    SF_GOT_STATUS);
2542 		}
2543 		if (scsi_req.direction & CF_WR) {
2544 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2545 			    SF_DATA_OUT);
2546 		} else if (scsi_req.direction & CF_RD) {
2547 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2548 			    SF_DATA_IN);
2549 		}
2550 		sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2551 
2552 		/* Setup FCP response info. */
2553 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2554 		    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2555 		sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2556 		for (cnt = 0; cnt < sts.rsp_info_length;
2557 		    cnt = (uint16_t)(cnt + 4)) {
2558 			ql_chg_endian(sts.rsp_info + cnt, 4);
2559 		}
2560 
2561 		/* Setup sense data. */
2562 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2563 			sts.req_sense_length =
2564 			    LE_32(pkt->sts24.fcp_sense_length);
2565 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2566 			    SF_ARQ_DONE);
2567 		} else {
2568 			sts.req_sense_length = 0;
2569 		}
2570 		sts.req_sense_data =
2571 		    &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2572 		cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2573 		    (uintptr_t)sts.req_sense_data);
2574 		for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2575 			ql_chg_endian(sts.req_sense_data + cnt, 4);
2576 		}
2577 	} else {
2578 		sts.scsi_status_l = pkt->sts.scsi_status_l;
2579 		sts.scsi_status_h = pkt->sts.scsi_status_h;
2580 
2581 		/* Setup residuals. */
2582 		sts.residual_length = LE_32(pkt->sts.residual_length);
2583 
2584 		/* Setup state flags. */
2585 		sts.state_flags_l = pkt->sts.state_flags_l;
2586 		sts.state_flags_h = pkt->sts.state_flags_h;
2587 
2588 		/* Setup FCP response info. */
2589 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2590 		    LE_16(pkt->sts.rsp_info_length) : 0;
2591 		sts.rsp_info = &pkt->sts.rsp_info[0];
2592 
2593 		/* Setup sense data. */
2594 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2595 		    LE_16(pkt->sts.req_sense_length) : 0;
2596 		sts.req_sense_data = &pkt->sts.req_sense_data[0];
2597 	}
2598 
2599 	QL_PRINT_9(ha, "response pkt\n");
2600 	QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2601 
2602 	switch (sts.comp_status) {
2603 	case CS_INCOMPLETE:
2604 	case CS_ABORTED:
2605 	case CS_DEVICE_UNAVAILABLE:
2606 	case CS_PORT_UNAVAILABLE:
2607 	case CS_PORT_LOGGED_OUT:
2608 	case CS_PORT_CONFIG_CHG:
2609 	case CS_PORT_BUSY:
2610 	case CS_LOOP_DOWN_ABORT:
2611 		cmd->Status = EXT_STATUS_BUSY;
2612 		break;
2613 	case CS_RESET:
2614 	case CS_QUEUE_FULL:
2615 		cmd->Status = EXT_STATUS_ERR;
2616 		break;
2617 	case CS_TIMEOUT:
2618 		cmd->Status = EXT_STATUS_ERR;
2619 		break;
2620 	case CS_DATA_OVERRUN:
2621 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
2622 		break;
2623 	case CS_DATA_UNDERRUN:
2624 		cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2625 		break;
2626 	}
2627 
2628 	/*
2629 	 * If non data transfer commands fix tranfer counts.
2630 	 */
2631 	if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2632 	    scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2633 	    scsi_req.cdbp[0] == SCMD_SEEK ||
2634 	    scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2635 	    scsi_req.cdbp[0] == SCMD_RESERVE ||
2636 	    scsi_req.cdbp[0] == SCMD_RELEASE ||
2637 	    scsi_req.cdbp[0] == SCMD_START_STOP ||
2638 	    scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2639 	    scsi_req.cdbp[0] == SCMD_VERIFY ||
2640 	    scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2641 	    scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2642 	    scsi_req.cdbp[0] == SCMD_SPACE ||
2643 	    scsi_req.cdbp[0] == SCMD_ERASE ||
2644 	    (scsi_req.cdbp[0] == SCMD_FORMAT &&
2645 	    (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2646 		/*
2647 		 * Non data transfer command, clear sts_entry residual
2648 		 * length.
2649 		 */
2650 		sts.residual_length = 0;
2651 		cmd->ResponseLen = 0;
2652 		if (sts.comp_status == CS_DATA_UNDERRUN) {
2653 			sts.comp_status = CS_COMPLETE;
2654 			cmd->Status = EXT_STATUS_OK;
2655 		}
2656 	} else {
2657 		cmd->ResponseLen = pld_size;
2658 	}
2659 
2660 	/* Correct ISP completion status */
2661 	if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2662 	    (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2663 		QL_PRINT_9(ha, "Correct completion\n",
2664 		    ha->instance);
2665 		scsi_req.resid = 0;
2666 	} else if (sts.comp_status == CS_DATA_UNDERRUN) {
2667 		QL_PRINT_9(ha, "Correct UNDERRUN\n",
2668 		    ha->instance);
2669 		scsi_req.resid = sts.residual_length;
2670 		if (sts.scsi_status_h & FCP_RESID_UNDER) {
2671 			cmd->Status = (uint32_t)EXT_STATUS_OK;
2672 
2673 			cmd->ResponseLen = (uint32_t)
2674 			    (pld_size - scsi_req.resid);
2675 		} else {
2676 			EL(ha, "failed, Transfer ERROR\n");
2677 			cmd->Status = EXT_STATUS_ERR;
2678 			cmd->ResponseLen = 0;
2679 		}
2680 	} else {
2681 		QL_PRINT_9(ha, "error d_id=%xh, comp_status=%xh, "
2682 		    "scsi_status_h=%xh, scsi_status_l=%xh\n",
2683 		    tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2684 		    sts.scsi_status_l);
2685 
2686 		scsi_req.resid = pld_size;
2687 		/*
2688 		 * Handle residual count on SCSI check
2689 		 * condition.
2690 		 *
2691 		 * - If Residual Under / Over is set, use the
2692 		 *   Residual Transfer Length field in IOCB.
2693 		 * - If Residual Under / Over is not set, and
2694 		 *   Transferred Data bit is set in State Flags
2695 		 *   field of IOCB, report residual value of 0
2696 		 *   (you may want to do this for tape
2697 		 *   Write-type commands only). This takes care
2698 		 *   of logical end of tape problem and does
2699 		 *   not break Unit Attention.
2700 		 * - If Residual Under / Over is not set, and
2701 		 *   Transferred Data bit is not set in State
2702 		 *   Flags, report residual value equal to
2703 		 *   original data transfer length.
2704 		 */
2705 		if (sts.scsi_status_l & STATUS_CHECK) {
2706 			cmd->Status = EXT_STATUS_SCSI_STATUS;
2707 			cmd->DetailStatus = sts.scsi_status_l;
2708 			if (sts.scsi_status_h &
2709 			    (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2710 				scsi_req.resid = sts.residual_length;
2711 			} else if (sts.state_flags_h &
2712 			    STATE_XFERRED_DATA) {
2713 				scsi_req.resid = 0;
2714 			}
2715 		}
2716 	}
2717 
2718 	if (sts.scsi_status_l & STATUS_CHECK &&
2719 	    sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2720 	    sts.req_sense_length) {
2721 		/*
2722 		 * Check condition with vaild sense data flag set and sense
2723 		 * length != 0
2724 		 */
2725 		if (sts.req_sense_length > scsi_req.sense_length) {
2726 			sense_sz = scsi_req.sense_length;
2727 		} else {
2728 			sense_sz = sts.req_sense_length;
2729 		}
2730 
2731 		EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2732 		    tq->d_id.b24);
2733 		QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2734 
2735 		if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2736 		    (size_t)sense_sz, mode) != 0) {
2737 			EL(ha, "failed, request sense ddi_copyout\n");
2738 		}
2739 
2740 		cmd->Status = EXT_STATUS_SCSI_STATUS;
2741 		cmd->DetailStatus = sts.scsi_status_l;
2742 	}
2743 
2744 	/* Copy response payload from DMA buffer to application. */
2745 	if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2746 	    cmd->ResponseLen != 0) {
2747 		QL_PRINT_9(ha, "Data Return resid=%lu, "
2748 		    "byte_count=%u, ResponseLen=%xh\n",
2749 		    scsi_req.resid, pld_size, cmd->ResponseLen);
2750 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
2751 
2752 		/* Send response payload. */
2753 		if (ql_send_buffer_data(pld,
2754 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2755 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
2756 			EL(ha, "failed, send_buffer_data\n");
2757 			cmd->Status = EXT_STATUS_COPY_ERR;
2758 			cmd->ResponseLen = 0;
2759 		}
2760 	}
2761 
2762 	if (cmd->Status != EXT_STATUS_OK) {
2763 		EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2764 		    "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2765 	} else {
2766 		/*EMPTY*/
2767 		QL_PRINT_9(ha, "done, ResponseLen=%d\n",
2768 		    ha->instance, cmd->ResponseLen);
2769 	}
2770 
2771 	kmem_free(pkt, pkt_size);
2772 	ql_free_dma_resource(ha, dma_mem);
2773 	kmem_free(dma_mem, sizeof (dma_mem_t));
2774 }
2775 
2776 /*
2777  * ql_wwpn_to_scsiaddr
2778  *
2779  * Input:
2780  *	ha:	adapter state pointer.
2781  *	cmd:	EXT_IOCTL cmd struct pointer.
2782  *	mode:	flags.
2783  *
2784  * Context:
2785  *	Kernel context.
2786  */
2787 static void
ql_wwpn_to_scsiaddr(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2788 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2789 {
2790 	int		status;
2791 	uint8_t		wwpn[EXT_DEF_WWN_NAME_SIZE];
2792 	EXT_SCSI_ADDR	*tmp_addr;
2793 	ql_tgt_t	*tq;
2794 
2795 	QL_PRINT_9(ha, "started\n");
2796 
2797 	if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2798 		/* Return error */
2799 		EL(ha, "incorrect RequestLen\n");
2800 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2801 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2802 		return;
2803 	}
2804 
2805 	status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2806 	    cmd->RequestLen, mode);
2807 
2808 	if (status != 0) {
2809 		cmd->Status = EXT_STATUS_COPY_ERR;
2810 		EL(ha, "failed, ddi_copyin\n");
2811 		return;
2812 	}
2813 
2814 	tq = ql_find_port(ha, wwpn, QLNT_PORT);
2815 
2816 	if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2817 		/* no matching device */
2818 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2819 		EL(ha, "failed, device not found\n");
2820 		return;
2821 	}
2822 
2823 	/* Copy out the IDs found.  For now we can only return target ID. */
2824 	tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2825 
2826 	status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2827 
2828 	if (status != 0) {
2829 		cmd->Status = EXT_STATUS_COPY_ERR;
2830 		EL(ha, "failed, ddi_copyout\n");
2831 	} else {
2832 		cmd->Status = EXT_STATUS_OK;
2833 		QL_PRINT_9(ha, "done\n");
2834 	}
2835 }
2836 
2837 /*
2838  * ql_host_idx
2839  *	Gets host order index.
2840  *
2841  * Input:
2842  *	ha:	adapter state pointer.
2843  *	cmd:	EXT_IOCTL cmd struct pointer.
2844  *	mode:	flags.
2845  *
2846  * Returns:
2847  *	None, request status indicated in cmd->Status.
2848  *
2849  * Context:
2850  *	Kernel context.
2851  */
2852 static void
ql_host_idx(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2853 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2854 {
2855 	uint16_t	idx;
2856 
2857 	QL_PRINT_9(ha, "started\n");
2858 
2859 	if (cmd->ResponseLen < sizeof (uint16_t)) {
2860 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2861 		cmd->DetailStatus = sizeof (uint16_t);
2862 		EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2863 		cmd->ResponseLen = 0;
2864 		return;
2865 	}
2866 
2867 	idx = (uint16_t)ha->instance;
2868 
2869 	if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2870 	    sizeof (uint16_t), mode) != 0) {
2871 		cmd->Status = EXT_STATUS_COPY_ERR;
2872 		cmd->ResponseLen = 0;
2873 		EL(ha, "failed, ddi_copyout\n");
2874 	} else {
2875 		cmd->ResponseLen = sizeof (uint16_t);
2876 		QL_PRINT_9(ha, "done\n");
2877 	}
2878 }
2879 
2880 /*
2881  * ql_host_drvname
2882  *	Gets host driver name
2883  *
2884  * Input:
2885  *	ha:	adapter state pointer.
2886  *	cmd:	EXT_IOCTL cmd struct pointer.
2887  *	mode:	flags.
2888  *
2889  * Returns:
2890  *	None, request status indicated in cmd->Status.
2891  *
2892  * Context:
2893  *	Kernel context.
2894  */
2895 static void
ql_host_drvname(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2896 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2897 {
2898 
2899 	char		drvname[] = QL_NAME;
2900 	uint32_t	qlnamelen;
2901 
2902 	QL_PRINT_9(ha, "started\n");
2903 
2904 	qlnamelen = (uint32_t)(strlen(QL_NAME) + 1);
2905 
2906 	if (cmd->ResponseLen < qlnamelen) {
2907 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2908 		cmd->DetailStatus = qlnamelen;
2909 		EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2910 		    cmd->ResponseLen, qlnamelen);
2911 		cmd->ResponseLen = 0;
2912 		return;
2913 	}
2914 
2915 	if (ddi_copyout((void *)&drvname,
2916 	    (void *)(uintptr_t)(cmd->ResponseAdr),
2917 	    qlnamelen, mode) != 0) {
2918 		cmd->Status = EXT_STATUS_COPY_ERR;
2919 		cmd->ResponseLen = 0;
2920 		EL(ha, "failed, ddi_copyout\n");
2921 	} else {
2922 		cmd->ResponseLen = qlnamelen - 1;
2923 	}
2924 
2925 	QL_PRINT_9(ha, "done\n");
2926 }
2927 
2928 /*
2929  * ql_read_nvram
2930  *	Get NVRAM contents.
2931  *
2932  * Input:
2933  *	ha:	adapter state pointer.
2934  *	cmd:	EXT_IOCTL cmd struct pointer.
2935  *	mode:	flags.
2936  *
2937  * Returns:
2938  *	None, request status indicated in cmd->Status.
2939  *
2940  * Context:
2941  *	Kernel context.
2942  */
2943 static void
ql_read_nvram(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2944 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2945 {
2946 
2947 	QL_PRINT_9(ha, "started\n");
2948 
2949 	if (cmd->ResponseLen < ha->nvram_cache->size) {
2950 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2951 		cmd->DetailStatus = ha->nvram_cache->size;
2952 		EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2953 		    cmd->ResponseLen);
2954 		cmd->ResponseLen = 0;
2955 		return;
2956 	}
2957 
2958 	/* Get NVRAM data. */
2959 	if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2960 	    mode) != 0) {
2961 		cmd->Status = EXT_STATUS_COPY_ERR;
2962 		cmd->ResponseLen = 0;
2963 		EL(ha, "failed, copy error\n");
2964 	} else {
2965 		cmd->ResponseLen = ha->nvram_cache->size;
2966 		QL_PRINT_9(ha, "done\n");
2967 	}
2968 }
2969 
2970 /*
2971  * ql_write_nvram
2972  *	Loads NVRAM contents.
2973  *
2974  * Input:
2975  *	ha:	adapter state pointer.
2976  *	cmd:	EXT_IOCTL cmd struct pointer.
2977  *	mode:	flags.
2978  *
2979  * Returns:
2980  *	None, request status indicated in cmd->Status.
2981  *
2982  * Context:
2983  *	Kernel context.
2984  */
2985 static void
ql_write_nvram(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2986 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2987 {
2988 
2989 	QL_PRINT_9(ha, "started\n");
2990 
2991 	if (cmd->RequestLen < ha->nvram_cache->size) {
2992 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2993 		cmd->DetailStatus = ha->nvram_cache->size;
2994 		EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2995 		    cmd->RequestLen);
2996 		return;
2997 	}
2998 
2999 	/* Load NVRAM data. */
3000 	if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
3001 	    mode) != 0) {
3002 		cmd->Status = EXT_STATUS_COPY_ERR;
3003 		EL(ha, "failed, copy error\n");
3004 	} else {
3005 		/*EMPTY*/
3006 		QL_PRINT_9(ha, "done\n");
3007 	}
3008 }
3009 
3010 /*
3011  * ql_write_vpd
3012  *	Loads VPD contents.
3013  *
3014  * Input:
3015  *	ha:	adapter state pointer.
3016  *	cmd:	EXT_IOCTL cmd struct pointer.
3017  *	mode:	flags.
3018  *
3019  * Returns:
3020  *	None, request status indicated in cmd->Status.
3021  *
3022  * Context:
3023  *	Kernel context.
3024  */
3025 static void
ql_write_vpd(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3026 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3027 {
3028 	QL_PRINT_9(ha, "started\n");
3029 
3030 	int32_t		rval = 0;
3031 
3032 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
3033 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
3034 		EL(ha, "failed, invalid request for HBA\n");
3035 		return;
3036 	}
3037 
3038 	if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
3039 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3040 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
3041 		EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
3042 		    cmd->RequestLen);
3043 		return;
3044 	}
3045 
3046 	/* Load VPD data. */
3047 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
3048 	    mode)) != 0) {
3049 		cmd->Status = EXT_STATUS_COPY_ERR;
3050 		cmd->DetailStatus = rval;
3051 		EL(ha, "failed, errno=%x\n", rval);
3052 	} else {
3053 		/*EMPTY*/
3054 		QL_PRINT_9(ha, "done\n");
3055 	}
3056 }
3057 
3058 /*
3059  * ql_read_vpd
3060  *	Dumps VPD contents.
3061  *
3062  * Input:
3063  *	ha:	adapter state pointer.
3064  *	cmd:	EXT_IOCTL cmd struct pointer.
3065  *	mode:	flags.
3066  *
3067  * Returns:
3068  *	None, request status indicated in cmd->Status.
3069  *
3070  * Context:
3071  *	Kernel context.
3072  */
3073 static void
ql_read_vpd(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3074 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3075 {
3076 	QL_PRINT_9(ha, "started\n");
3077 
3078 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
3079 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
3080 		EL(ha, "failed, invalid request for HBA\n");
3081 		return;
3082 	}
3083 
3084 	if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
3085 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3086 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
3087 		EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
3088 		    cmd->ResponseLen);
3089 		return;
3090 	}
3091 
3092 	/* Dump VPD data. */
3093 	if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
3094 	    mode)) != 0) {
3095 		cmd->Status = EXT_STATUS_COPY_ERR;
3096 		EL(ha, "failed,\n");
3097 	} else {
3098 		/*EMPTY*/
3099 		QL_PRINT_9(ha, "done\n");
3100 	}
3101 }
3102 
3103 /*
3104  * ql_get_fcache
3105  *	Dumps flash cache contents.
3106  *
3107  * Input:
3108  *	ha:	adapter state pointer.
3109  *	cmd:	EXT_IOCTL cmd struct pointer.
3110  *	mode:	flags.
3111  *
3112  * Returns:
3113  *	None, request status indicated in cmd->Status.
3114  *
3115  * Context:
3116  *	Kernel context.
3117  */
3118 static void
ql_get_fcache(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3119 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3120 {
3121 	uint32_t	bsize, boff, types, cpsize, hsize;
3122 	ql_fcache_t	*fptr;
3123 
3124 	QL_PRINT_9(ha, "started\n");
3125 
3126 	if (ha->fcache == NULL) {
3127 		cmd->Status = EXT_STATUS_ERR;
3128 		EL(ha, "failed, adapter fcache not setup\n");
3129 		return;
3130 	}
3131 
3132 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
3133 		bsize = 100;
3134 	} else {
3135 		bsize = 400;
3136 	}
3137 
3138 	if (cmd->ResponseLen < bsize) {
3139 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3140 		cmd->DetailStatus = bsize;
3141 		EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3142 		    bsize, cmd->ResponseLen);
3143 		return;
3144 	}
3145 
3146 	boff = 0;
3147 	bsize = 0;
3148 	fptr = ha->fcache;
3149 
3150 	/*
3151 	 * For backwards compatibility, get one of each image type
3152 	 */
3153 	types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3154 	while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3155 		/* Get the next image */
3156 		if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3157 
3158 			cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3159 
3160 			if (ddi_copyout(fptr->buf,
3161 			    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3162 			    cpsize, mode) != 0) {
3163 				EL(ha, "ddicopy failed, done\n");
3164 				cmd->Status = EXT_STATUS_COPY_ERR;
3165 				cmd->DetailStatus = 0;
3166 				return;
3167 			}
3168 			boff += 100;
3169 			bsize += cpsize;
3170 			types &= ~(fptr->type);
3171 		}
3172 	}
3173 
3174 	/*
3175 	 * Get the firmware image -- it needs to be last in the
3176 	 * buffer at offset 300 for backwards compatibility. Also for
3177 	 * backwards compatibility, the pci header is stripped off.
3178 	 */
3179 	if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3180 
3181 		hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3182 		if (hsize > fptr->buflen) {
3183 			EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3184 			    hsize, fptr->buflen);
3185 			cmd->Status = EXT_STATUS_COPY_ERR;
3186 			cmd->DetailStatus = 0;
3187 			return;
3188 		}
3189 
3190 		cpsize = ((fptr->buflen - hsize) < 100 ?
3191 		    fptr->buflen - hsize : 100);
3192 
3193 		if (ddi_copyout(fptr->buf + hsize,
3194 		    (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3195 		    cpsize, mode) != 0) {
3196 			EL(ha, "fw ddicopy failed, done\n");
3197 			cmd->Status = EXT_STATUS_COPY_ERR;
3198 			cmd->DetailStatus = 0;
3199 			return;
3200 		}
3201 		bsize += 100;
3202 	}
3203 
3204 	cmd->Status = EXT_STATUS_OK;
3205 	cmd->DetailStatus = bsize;
3206 
3207 	QL_PRINT_9(ha, "done\n");
3208 }
3209 
3210 /*
3211  * ql_get_fcache_ex
3212  *	Dumps flash cache contents.
3213  *
3214  * Input:
3215  *	ha:	adapter state pointer.
3216  *	cmd:	EXT_IOCTL cmd struct pointer.
3217  *	mode:	flags.
3218  *
3219  * Returns:
3220  *	None, request status indicated in cmd->Status.
3221  *
3222  * Context:
3223  *	Kernel context.
3224  */
3225 static void
ql_get_fcache_ex(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3226 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3227 {
3228 	uint32_t	bsize = 0;
3229 	uint32_t	boff = 0;
3230 	ql_fcache_t	*fptr;
3231 
3232 	QL_PRINT_9(ha, "started\n");
3233 
3234 	if (ha->fcache == NULL) {
3235 		cmd->Status = EXT_STATUS_ERR;
3236 		EL(ha, "failed, adapter fcache not setup\n");
3237 		return;
3238 	}
3239 
3240 	/* Make sure user passed enough buffer space */
3241 	for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3242 		bsize += FBUFSIZE;
3243 	}
3244 
3245 	if (cmd->ResponseLen < bsize) {
3246 		if (cmd->ResponseLen != 0) {
3247 			EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3248 			    bsize, cmd->ResponseLen);
3249 		}
3250 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3251 		cmd->DetailStatus = bsize;
3252 		return;
3253 	}
3254 
3255 	boff = 0;
3256 	fptr = ha->fcache;
3257 	while ((fptr != NULL) && (fptr->buf != NULL)) {
3258 		/* Get the next image */
3259 		if (ddi_copyout(fptr->buf,
3260 		    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3261 		    (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3262 		    mode) != 0) {
3263 			EL(ha, "failed, ddicopy at %xh, done\n", boff);
3264 			cmd->Status = EXT_STATUS_COPY_ERR;
3265 			cmd->DetailStatus = 0;
3266 			return;
3267 		}
3268 		boff += FBUFSIZE;
3269 		fptr = fptr->next;
3270 	}
3271 
3272 	cmd->Status = EXT_STATUS_OK;
3273 	cmd->DetailStatus = bsize;
3274 
3275 	QL_PRINT_9(ha, "done\n");
3276 }
3277 
3278 /*
3279  * ql_read_flash
3280  *	Get flash contents.
3281  *
3282  * Input:
3283  *	ha:	adapter state pointer.
3284  *	cmd:	EXT_IOCTL cmd struct pointer.
3285  *	mode:	flags.
3286  *
3287  * Returns:
3288  *	None, request status indicated in cmd->Status.
3289  *
3290  * Context:
3291  *	Kernel context.
3292  */
3293 static void
ql_read_flash(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3294 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3295 {
3296 	ql_xioctl_t	*xp = ha->xioctl;
3297 
3298 	QL_PRINT_9(ha, "started\n");
3299 
3300 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1) &&
3301 	    ql_stall_driver(ha, 0) != QL_SUCCESS) {
3302 		EL(ha, "ql_stall_driver failed\n");
3303 		ql_restart_driver(ha);
3304 		cmd->Status = EXT_STATUS_BUSY;
3305 		cmd->DetailStatus = xp->fdesc.flash_size;
3306 		cmd->ResponseLen = 0;
3307 		return;
3308 	}
3309 
3310 	if (ql_setup_fcache(ha) != QL_SUCCESS) {
3311 		cmd->Status = EXT_STATUS_ERR;
3312 		cmd->DetailStatus = xp->fdesc.flash_size;
3313 		EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n",
3314 		    cmd->ResponseLen, xp->fdesc.flash_size);
3315 		cmd->ResponseLen = 0;
3316 	} else {
3317 		/* adjust read size to flash size */
3318 		if (cmd->ResponseLen > xp->fdesc.flash_size) {
3319 			EL(ha, "adjusting req=%xh, max=%xh\n",
3320 			    cmd->ResponseLen, xp->fdesc.flash_size);
3321 			cmd->ResponseLen = xp->fdesc.flash_size;
3322 		}
3323 
3324 		/* Get flash data. */
3325 		if (ql_flash_fcode_dump(ha,
3326 		    (void *)(uintptr_t)(cmd->ResponseAdr),
3327 		    (size_t)(cmd->ResponseLen), 0, mode) != 0) {
3328 			cmd->Status = EXT_STATUS_COPY_ERR;
3329 			cmd->ResponseLen = 0;
3330 			EL(ha, "failed,\n");
3331 		}
3332 	}
3333 
3334 	/* Resume I/O */
3335 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
3336 		EL(ha, "isp_abort_needed for restart\n");
3337 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3338 		    DRIVER_STALL);
3339 	}
3340 
3341 	QL_PRINT_9(ha, "done\n");
3342 }
3343 
3344 /*
3345  * ql_write_flash
3346  *	Loads flash contents.
3347  *
3348  * Input:
3349  *	ha:	adapter state pointer.
3350  *	cmd:	EXT_IOCTL cmd struct pointer.
3351  *	mode:	flags.
3352  *
3353  * Returns:
3354  *	None, request status indicated in cmd->Status.
3355  *
3356  * Context:
3357  *	Kernel context.
3358  */
3359 static void
ql_write_flash(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3360 ql_write_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3361 {
3362 	ql_xioctl_t	*xp = ha->xioctl;
3363 
3364 	QL_PRINT_9(ha, "started\n");
3365 
3366 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1) &&
3367 	    ql_stall_driver(ha, 0) != QL_SUCCESS) {
3368 		EL(ha, "ql_stall_driver failed\n");
3369 		ql_restart_driver(ha);
3370 		cmd->Status = EXT_STATUS_BUSY;
3371 		cmd->DetailStatus = xp->fdesc.flash_size;
3372 		cmd->ResponseLen = 0;
3373 		return;
3374 	}
3375 
3376 	if (ql_setup_fcache(ha) != QL_SUCCESS) {
3377 		cmd->Status = EXT_STATUS_ERR;
3378 		cmd->DetailStatus = xp->fdesc.flash_size;
3379 		EL(ha, "failed, RequestLen=%xh, size=%xh\n",
3380 		    cmd->RequestLen, xp->fdesc.flash_size);
3381 		cmd->ResponseLen = 0;
3382 	} else {
3383 		/* Load flash data. */
3384 		if (cmd->RequestLen > xp->fdesc.flash_size) {
3385 			cmd->Status = EXT_STATUS_ERR;
3386 			cmd->DetailStatus = xp->fdesc.flash_size;
3387 			EL(ha, "failed, RequestLen=%xh, flash size=%xh\n",
3388 			    cmd->RequestLen, xp->fdesc.flash_size);
3389 		} else if (ql_flash_fcode_load(ha,
3390 		    (void *)(uintptr_t)(cmd->RequestAdr),
3391 		    (size_t)(cmd->RequestLen), mode) != 0) {
3392 			cmd->Status = EXT_STATUS_COPY_ERR;
3393 			EL(ha, "failed,\n");
3394 		}
3395 	}
3396 
3397 	/* Resume I/O */
3398 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
3399 		EL(ha, "isp_abort_needed for restart\n");
3400 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3401 		    DRIVER_STALL);
3402 	}
3403 
3404 	QL_PRINT_9(ha, "done\n");
3405 }
3406 
3407 /*
3408  * ql_diagnostic_loopback
3409  *	Performs EXT_CC_LOOPBACK Command
3410  *
3411  * Input:
3412  *	ha:	adapter state pointer.
3413  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3414  *	mode:	flags.
3415  *
3416  * Returns:
3417  *	None, request status indicated in cmd->Status.
3418  *
3419  * Context:
3420  *	Kernel context.
3421  */
3422 static void
ql_diagnostic_loopback(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3423 ql_diagnostic_loopback(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3424 {
3425 	EXT_LOOPBACK_REQ	plbreq;
3426 	EXT_LOOPBACK_RSP	plbrsp;
3427 	ql_mbx_data_t		mr;
3428 	uint32_t		rval, timer, bpsize;
3429 	caddr_t			bp, pld;
3430 	uint16_t		opt;
3431 	boolean_t		loop_up;
3432 
3433 	QL_PRINT_9(ha, "started\n");
3434 
3435 	/* Get loop back request. */
3436 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
3437 	    (void *)&plbreq, sizeof (EXT_LOOPBACK_REQ), mode) != 0) {
3438 		EL(ha, "failed, ddi_copyin\n");
3439 		cmd->Status = EXT_STATUS_COPY_ERR;
3440 		cmd->ResponseLen = 0;
3441 		return;
3442 	}
3443 
3444 	/* Check transfer length fits in buffer. */
3445 	if (plbreq.BufferLength < plbreq.TransferCount) {
3446 		EL(ha, "failed, BufferLength=%d, xfercnt=%d\n",
3447 
3448 		    plbreq.BufferLength, plbreq.TransferCount);
3449 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3450 		cmd->ResponseLen = 0;
3451 		return;
3452 	}
3453 
3454 	/* Allocate command memory. */
3455 	bpsize = plbreq.TransferCount + 4; /* Include opcode size */
3456 	bp = kmem_zalloc(bpsize, KM_SLEEP);
3457 	if (bp == NULL) {
3458 		EL(ha, "failed, kmem_zalloc\n");
3459 		cmd->Status = EXT_STATUS_NO_MEMORY;
3460 		cmd->ResponseLen = 0;
3461 		return;
3462 	}
3463 	pld = bp + 4;
3464 	*bp = 0x10;	/* opcode */
3465 
3466 	/* Get loopback data. */
3467 	if (ql_get_buffer_data((caddr_t)(uintptr_t)plbreq.BufferAddress,
3468 	    pld, plbreq.TransferCount, mode) != plbreq.TransferCount) {
3469 		EL(ha, "failed, ddi_copyin-2\n");
3470 		kmem_free(bp, bpsize);
3471 		cmd->Status = EXT_STATUS_COPY_ERR;
3472 		cmd->ResponseLen = 0;
3473 		return;
3474 	}
3475 
3476 	if (LOOP_RECONFIGURE(ha) ||
3477 	    ql_stall_driver(ha, 0) != QL_SUCCESS) {
3478 		EL(ha, "failed, LOOP_NOT_READY\n");
3479 		ql_restart_driver(ha);
3480 		kmem_free(bp, bpsize);
3481 		cmd->Status = EXT_STATUS_BUSY;
3482 		cmd->ResponseLen = 0;
3483 		return;
3484 	}
3485 	loop_up = ha->task_daemon_flags & LOOP_DOWN ? B_FALSE : B_TRUE;
3486 
3487 	/* Shutdown IP. */
3488 	if (ha->flags & IP_INITIALIZED) {
3489 		(void) ql_shutdown_ip(ha);
3490 	}
3491 
3492 	/* determine topology so we can send the loopback or the echo */
3493 	/* Echo is supported on 2300's only and above */
3494 
3495 	ADAPTER_STATE_LOCK(ha);
3496 	ha->flags |= LOOPBACK_ACTIVE;
3497 	ADAPTER_STATE_UNLOCK(ha);
3498 
3499 	opt = plbreq.Options;
3500 
3501 	if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
3502 		opt = (uint16_t)(plbreq.Options & MBC_LOOPBACK_POINT_MASK);
3503 		if (loop_up && opt == MBC_LOOPBACK_POINT_EXTERNAL) {
3504 			if (plbreq.TransferCount > 252) {
3505 				EL(ha, "transfer count (%d) > 252\n",
3506 				    plbreq.TransferCount);
3507 				ql_restart_driver(ha);
3508 				kmem_free(bp, bpsize);
3509 				cmd->Status = EXT_STATUS_INVALID_PARAM;
3510 				cmd->ResponseLen = 0;
3511 				return;
3512 			}
3513 			plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3514 			rval = ql_diag_echo(ha, pld, plbreq.TransferCount,
3515 			    MBC_ECHO_ELS, &mr);
3516 		} else {
3517 			if (CFG_IST(ha, CFG_LOOP_POINT_SUPPORT)) {
3518 				(void) ql_set_loop_point(ha, opt);
3519 			}
3520 			plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3521 			rval = ql_diag_loopback(ha, pld, plbreq.TransferCount,
3522 			    opt, plbreq.IterationCount, &mr);
3523 			if (mr.mb[0] == 0x4005 && mr.mb[1] == 0x17) {
3524 				(void) ql_abort_isp(ha);
3525 			}
3526 			if (CFG_IST(ha, CFG_LOOP_POINT_SUPPORT)) {
3527 				(void) ql_set_loop_point(ha, 0);
3528 			}
3529 		}
3530 	} else {
3531 		if (loop_up && (ha->topology & QL_F_PORT) &&
3532 		    CFG_IST(ha, CFG_LB_ECHO_SUPPORT)) {
3533 			QL_PRINT_9(ha, "F_PORT topology -- using "
3534 			    "echo\n");
3535 			plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3536 			if ((rval = ql_diag_echo(ha, bp, bpsize,
3537 			    (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_1) ?
3538 			    MBC_ECHO_64BIT : MBC_ECHO_ELS), &mr)) !=
3539 			    QL_SUCCESS) {
3540 				rval = ql_diag_echo(ha, pld,
3541 				    plbreq.TransferCount,
3542 				    (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_1) ?
3543 				    MBC_ECHO_64BIT : 0), &mr);
3544 			}
3545 		} else {
3546 			plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3547 			if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
3548 				opt = (uint16_t)(opt | MBC_LOOPBACK_64BIT);
3549 			}
3550 			rval = ql_diag_loopback(ha, pld, plbreq.TransferCount,
3551 			    opt, plbreq.IterationCount, &mr);
3552 		}
3553 	}
3554 	ADAPTER_STATE_LOCK(ha);
3555 	ha->flags &= ~LOOPBACK_ACTIVE;
3556 	ADAPTER_STATE_UNLOCK(ha);
3557 
3558 	ql_restart_driver(ha);
3559 	if (loop_up && opt == MBC_LOOPBACK_POINT_INTERNAL) {
3560 		timer = 30;
3561 		do {
3562 			delay(100);
3563 		} while (timer-- && LOOP_NOT_READY(ha));
3564 	}
3565 
3566 	/* Restart IP if it was shutdown. */
3567 	if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
3568 		(void) ql_initialize_ip(ha);
3569 		ql_isp_rcvbuf(ha);
3570 	}
3571 
3572 	if (rval != QL_SUCCESS) {
3573 		EL(ha, "failed, diagnostic_loopback_mbx=%xh\n", rval);
3574 		kmem_free(bp, bpsize);
3575 		cmd->Status = EXT_STATUS_MAILBOX;
3576 		cmd->DetailStatus = rval;
3577 		cmd->ResponseLen = 0;
3578 		return;
3579 	}
3580 
3581 	/* Return loopback data. */
3582 	if (ql_send_buffer_data(pld, (caddr_t)(uintptr_t)plbreq.BufferAddress,
3583 	    plbreq.TransferCount, mode) != plbreq.TransferCount) {
3584 		EL(ha, "failed, ddi_copyout\n");
3585 		kmem_free(bp, bpsize);
3586 		cmd->Status = EXT_STATUS_COPY_ERR;
3587 		cmd->ResponseLen = 0;
3588 		return;
3589 	}
3590 	kmem_free(bp, bpsize);
3591 
3592 	/* Return loopback results. */
3593 	plbrsp.BufferAddress = plbreq.BufferAddress;
3594 	plbrsp.BufferLength = plbreq.TransferCount;
3595 	plbrsp.CompletionStatus = mr.mb[0];
3596 
3597 	if (plbrsp.CommandSent == INT_DEF_LB_ECHO_CMD) {
3598 		plbrsp.CrcErrorCount = 0;
3599 		plbrsp.DisparityErrorCount = 0;
3600 		plbrsp.FrameLengthErrorCount = 0;
3601 		plbrsp.IterationCountLastError = 0;
3602 	} else {
3603 		plbrsp.CrcErrorCount = mr.mb[1];
3604 		plbrsp.DisparityErrorCount = mr.mb[2];
3605 		plbrsp.FrameLengthErrorCount = mr.mb[3];
3606 		plbrsp.IterationCountLastError =
3607 		    SHORT_TO_LONG(mr.mb[18], mr.mb[19]);
3608 	}
3609 
3610 	rval = ddi_copyout((void *)&plbrsp,
3611 	    (void *)(uintptr_t)cmd->ResponseAdr,
3612 	    sizeof (EXT_LOOPBACK_RSP), mode);
3613 	if (rval != 0) {
3614 		EL(ha, "failed, ddi_copyout-2\n");
3615 		cmd->Status = EXT_STATUS_COPY_ERR;
3616 		cmd->ResponseLen = 0;
3617 		return;
3618 	}
3619 	cmd->ResponseLen = sizeof (EXT_LOOPBACK_RSP);
3620 
3621 	QL_PRINT_9(ha, "done\n");
3622 }
3623 
3624 /*
3625  * ql_set_loop_point
3626  *	Setup loop point for port configuration.
3627  *
3628  * Input:
3629  *	ha:	adapter state structure.
3630  *	opt:	loop point option.
3631  *
3632  * Returns:
3633  *	ql local function return status code.
3634  *
3635  * Context:
3636  *	Kernel context.
3637  */
3638 int
ql_set_loop_point(ql_adapter_state_t * ha,uint16_t opt)3639 ql_set_loop_point(ql_adapter_state_t *ha, uint16_t opt)
3640 {
3641 	ql_mbx_data_t	mr;
3642 	int		rval;
3643 	uint32_t	timer;
3644 
3645 	QL_PRINT_9(ha, "started\n");
3646 
3647 	/*
3648 	 * We get the current port config, modify the loopback field and
3649 	 * write it back out.
3650 	 */
3651 	if ((rval = ql_get_port_config(ha, &mr)) != QL_SUCCESS) {
3652 		EL(ha, "get_port_config status=%xh\n", rval);
3653 		return (rval);
3654 	}
3655 	/*
3656 	 * Set the loopback mode field while maintaining the others.
3657 	 */
3658 	mr.mb[1] = (uint16_t)(mr.mb[1] & ~LOOPBACK_MODE_FIELD_MASK);
3659 	if (opt == MBC_LOOPBACK_POINT_INTERNAL) {
3660 		mr.mb[1] = (uint16_t)(mr.mb[1] | LOOPBACK_MODE_INTERNAL);
3661 	} else if (CFG_IST(ha, CFG_CTRL_80XX) &&
3662 	    opt == MBC_LOOPBACK_POINT_EXTERNAL) {
3663 		mr.mb[1] = (uint16_t)(mr.mb[1] | LOOPBACK_MODE_EXTERNAL);
3664 	}
3665 	/*
3666 	 * Changing the port configuration will cause the port state to cycle
3667 	 * down and back up. The indication that this has happened is that
3668 	 * the point to point flag gets set.
3669 	 */
3670 	ADAPTER_STATE_LOCK(ha);
3671 	ha->flags &= ~POINT_TO_POINT;
3672 	ADAPTER_STATE_UNLOCK(ha);
3673 	if ((rval = ql_set_port_config(ha, &mr)) != QL_SUCCESS) {
3674 		EL(ha, "set_port_config status=%xh\n", rval);
3675 	}
3676 
3677 	/* wait for a while */
3678 	for (timer = opt ? 10 : 0; timer; timer--) {
3679 		if (ha->flags & POINT_TO_POINT) {
3680 			break;
3681 		}
3682 		/* Delay for 1000000 usec (1 second). */
3683 		ql_delay(ha, 1000000);
3684 	}
3685 
3686 	QL_PRINT_9(ha, "done\n");
3687 
3688 	return (rval);
3689 }
3690 
3691 /*
3692  * ql_send_els_rnid
3693  *	IOCTL for extended link service RNID command.
3694  *
3695  * Input:
3696  *	ha:	adapter state pointer.
3697  *	cmd:	User space CT arguments pointer.
3698  *	mode:	flags.
3699  *
3700  * Returns:
3701  *	None, request status indicated in cmd->Status.
3702  *
3703  * Context:
3704  *	Kernel context.
3705  */
3706 static void
ql_send_els_rnid(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3707 ql_send_els_rnid(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3708 {
3709 	EXT_RNID_REQ	tmp_rnid;
3710 	port_id_t	tmp_fcid;
3711 	caddr_t		tmp_buf, bptr;
3712 	uint32_t	copy_len;
3713 	ql_tgt_t	*tq = NULL;
3714 	EXT_RNID_DATA	rnid_data;
3715 	uint32_t	loop_ready_wait = 10 * 60 * 10;
3716 	int		rval = 0;
3717 	uint32_t	local_hba = 0;
3718 
3719 	QL_PRINT_9(ha, "started\n");
3720 
3721 	if (DRIVER_SUSPENDED(ha)) {
3722 		EL(ha, "failed, LOOP_NOT_READY\n");
3723 		cmd->Status = EXT_STATUS_BUSY;
3724 		cmd->ResponseLen = 0;
3725 		return;
3726 	}
3727 
3728 	if (cmd->RequestLen != sizeof (EXT_RNID_REQ)) {
3729 		/* parameter error */
3730 		EL(ha, "failed, RequestLen < EXT_RNID_REQ, Len=%xh\n",
3731 		    cmd->RequestLen);
3732 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3733 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
3734 		cmd->ResponseLen = 0;
3735 		return;
3736 	}
3737 
3738 	if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr,
3739 	    &tmp_rnid, cmd->RequestLen, mode) != 0) {
3740 		EL(ha, "failed, ddi_copyin\n");
3741 		cmd->Status = EXT_STATUS_COPY_ERR;
3742 		cmd->ResponseLen = 0;
3743 		return;
3744 	}
3745 
3746 	/* Find loop ID of the device */
3747 	if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWNN) {
3748 		bptr = (caddr_t)ha->loginparams.node_ww_name.raw_wwn;
3749 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWNN,
3750 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3751 			local_hba = 1;
3752 		} else {
3753 			tq = ql_find_port(ha,
3754 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWNN, QLNT_NODE);
3755 		}
3756 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWPN) {
3757 		bptr = (caddr_t)ha->loginparams.nport_ww_name.raw_wwn;
3758 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWPN,
3759 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3760 			local_hba = 1;
3761 		} else {
3762 			tq = ql_find_port(ha,
3763 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWPN, QLNT_PORT);
3764 		}
3765 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_PORTID) {
3766 		/*
3767 		 * Copy caller's d_id to tmp space.
3768 		 */
3769 		bcopy(&tmp_rnid.Addr.FcAddr.Id[1], tmp_fcid.r.d_id,
3770 		    EXT_DEF_PORTID_SIZE_ACTUAL);
3771 		BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]);
3772 
3773 		if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id,
3774 		    EXT_DEF_PORTID_SIZE_ACTUAL) == 0) {
3775 			local_hba = 1;
3776 		} else {
3777 			tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id,
3778 			    QLNT_PID);
3779 		}
3780 	}
3781 
3782 	/* Allocate memory for command. */
3783 	tmp_buf = kmem_zalloc(SEND_RNID_RSP_SIZE, KM_SLEEP);
3784 	if (tmp_buf == NULL) {
3785 		EL(ha, "failed, kmem_zalloc\n");
3786 		cmd->Status = EXT_STATUS_NO_MEMORY;
3787 		cmd->ResponseLen = 0;
3788 		return;
3789 	}
3790 
3791 	if (local_hba) {
3792 		rval = ql_get_rnid_params(ha, SEND_RNID_RSP_SIZE, tmp_buf);
3793 		if (rval != QL_SUCCESS) {
3794 			EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
3795 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3796 			cmd->Status = EXT_STATUS_ERR;
3797 			cmd->ResponseLen = 0;
3798 			return;
3799 		}
3800 
3801 		/* Save gotten RNID data. */
3802 		bcopy(tmp_buf, &rnid_data, sizeof (EXT_RNID_DATA));
3803 
3804 		/* Now build the Send RNID response */
3805 		tmp_buf[0] = (char)(EXT_DEF_RNID_DFORMAT_TOPO_DISC);
3806 		tmp_buf[1] = (2 * EXT_DEF_WWN_NAME_SIZE);
3807 		tmp_buf[2] = 0;
3808 		tmp_buf[3] = sizeof (EXT_RNID_DATA);
3809 		bcopy(ha->loginparams.nport_ww_name.raw_wwn, &tmp_buf[4],
3810 		    EXT_DEF_WWN_NAME_SIZE);
3811 		bcopy(ha->loginparams.node_ww_name.raw_wwn,
3812 		    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3813 		    EXT_DEF_WWN_NAME_SIZE);
3814 		bcopy((uint8_t *)&rnid_data,
3815 		    &tmp_buf[4 + 2 * EXT_DEF_WWN_NAME_SIZE],
3816 		    sizeof (EXT_RNID_DATA));
3817 	} else {
3818 		if (tq == NULL) {
3819 			/* no matching device */
3820 			EL(ha, "failed, device not found\n");
3821 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3822 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
3823 			cmd->DetailStatus = EXT_DSTATUS_TARGET;
3824 			cmd->ResponseLen = 0;
3825 			return;
3826 		}
3827 
3828 		/* Send command */
3829 		rval = ql_send_rnid_els(ha, tq->loop_id,
3830 		    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, tmp_buf);
3831 		if (rval != QL_SUCCESS) {
3832 			EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3833 			    rval, tq->loop_id);
3834 			while (LOOP_NOT_READY(ha)) {
3835 				ql_delay(ha, 100000);
3836 				if (loop_ready_wait-- == 0) {
3837 					EL(ha, "failed, loop not ready\n");
3838 					cmd->Status = EXT_STATUS_ERR;
3839 					cmd->ResponseLen = 0;
3840 				}
3841 			}
3842 			rval = ql_send_rnid_els(ha, tq->loop_id,
3843 			    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE,
3844 			    tmp_buf);
3845 			if (rval != QL_SUCCESS) {
3846 				/* error */
3847 				EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3848 				    rval, tq->loop_id);
3849 				kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3850 				cmd->Status = EXT_STATUS_ERR;
3851 				cmd->ResponseLen = 0;
3852 				return;
3853 			}
3854 		}
3855 	}
3856 
3857 	/* Copy the response */
3858 	copy_len = (cmd->ResponseLen > SEND_RNID_RSP_SIZE) ?
3859 	    SEND_RNID_RSP_SIZE : cmd->ResponseLen;
3860 
3861 	if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)cmd->ResponseAdr,
3862 	    copy_len, mode) != copy_len) {
3863 		cmd->Status = EXT_STATUS_COPY_ERR;
3864 		EL(ha, "failed, ddi_copyout\n");
3865 	} else {
3866 		cmd->ResponseLen = copy_len;
3867 		if (copy_len < SEND_RNID_RSP_SIZE) {
3868 			cmd->Status = EXT_STATUS_DATA_OVERRUN;
3869 			EL(ha, "failed, EXT_STATUS_DATA_OVERRUN\n");
3870 
3871 		} else if (cmd->ResponseLen > SEND_RNID_RSP_SIZE) {
3872 			cmd->Status = EXT_STATUS_DATA_UNDERRUN;
3873 			EL(ha, "failed, EXT_STATUS_DATA_UNDERRUN\n");
3874 		} else {
3875 			cmd->Status = EXT_STATUS_OK;
3876 			QL_PRINT_9(ha, "done\n",
3877 			    ha->instance);
3878 		}
3879 	}
3880 
3881 	kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3882 }
3883 
3884 /*
3885  * ql_set_host_data
3886  *	Process IOCTL subcommand to set host/adapter related data.
3887  *
3888  * Input:
3889  *	ha:	adapter state pointer.
3890  *	cmd:	User space CT arguments pointer.
3891  *	mode:	flags.
3892  *
3893  * Returns:
3894  *	None, request status indicated in cmd->Status.
3895  *
3896  * Context:
3897  *	Kernel context.
3898  */
3899 static void
ql_set_host_data(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3900 ql_set_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3901 {
3902 	QL_PRINT_9(ha, "started, SubCode=%d\n",
3903 	    cmd->SubCode);
3904 
3905 	/*
3906 	 * case off on command subcode
3907 	 */
3908 	switch (cmd->SubCode) {
3909 	case EXT_SC_SET_RNID:
3910 		ql_set_rnid_parameters(ha, cmd, mode);
3911 		break;
3912 	case EXT_SC_RST_STATISTICS:
3913 		(void) ql_reset_statistics(ha, cmd);
3914 		break;
3915 	case EXT_SC_SET_BEACON_STATE:
3916 		ql_set_led_state(ha, cmd, mode);
3917 		break;
3918 	case EXT_SC_SET_PARMS:
3919 	case EXT_SC_SET_BUS_MODE:
3920 	case EXT_SC_SET_DR_DUMP_BUF:
3921 	case EXT_SC_SET_RISC_CODE:
3922 	case EXT_SC_SET_FLASH_RAM:
3923 	case EXT_SC_SET_LUN_BITMASK:
3924 	case EXT_SC_SET_RETRY_CNT:
3925 	case EXT_SC_SET_RTIN:
3926 	case EXT_SC_SET_FC_LUN_BITMASK:
3927 	case EXT_SC_ADD_TARGET_DEVICE:
3928 	case EXT_SC_SWAP_TARGET_DEVICE:
3929 	case EXT_SC_SET_SEL_TIMEOUT:
3930 	default:
3931 		/* function not supported. */
3932 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3933 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3934 		break;
3935 	}
3936 
3937 	if (cmd->Status != EXT_STATUS_OK) {
3938 		EL(ha, "failed, Status=%d\n", cmd->Status);
3939 	} else {
3940 		/*EMPTY*/
3941 		QL_PRINT_9(ha, "done\n");
3942 	}
3943 }
3944 
3945 /*
3946  * ql_get_host_data
3947  *	Performs EXT_CC_GET_DATA subcommands.
3948  *
3949  * Input:
3950  *	ha:	adapter state pointer.
3951  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3952  *	mode:	flags.
3953  *
3954  * Returns:
3955  *	None, request status indicated in cmd->Status.
3956  *
3957  * Context:
3958  *	Kernel context.
3959  */
3960 static void
ql_get_host_data(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3961 ql_get_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3962 {
3963 	int	out_size = 0;
3964 
3965 	QL_PRINT_9(ha, "started, SubCode=%d\n",
3966 	    cmd->SubCode);
3967 
3968 	/* case off on command subcode */
3969 	switch (cmd->SubCode) {
3970 	case EXT_SC_GET_STATISTICS:
3971 		out_size = sizeof (EXT_HBA_PORT_STAT);
3972 		break;
3973 	case EXT_SC_GET_FC_STATISTICS:
3974 		out_size = sizeof (EXT_HBA_PORT_STAT);
3975 		break;
3976 	case EXT_SC_GET_PORT_SUMMARY:
3977 		out_size = sizeof (EXT_DEVICEDATA);
3978 		break;
3979 	case EXT_SC_GET_RNID:
3980 		out_size = sizeof (EXT_RNID_DATA);
3981 		break;
3982 	case EXT_SC_GET_TARGET_ID:
3983 		out_size = sizeof (EXT_DEST_ADDR);
3984 		break;
3985 	case EXT_SC_GET_BEACON_STATE:
3986 		out_size = sizeof (EXT_BEACON_CONTROL);
3987 		break;
3988 	case EXT_SC_GET_FC4_STATISTICS:
3989 		out_size = sizeof (EXT_HBA_FC4STATISTICS);
3990 		break;
3991 	case EXT_SC_GET_DCBX_PARAM:
3992 		out_size = EXT_DEF_DCBX_PARAM_BUF_SIZE;
3993 		break;
3994 	case EXT_SC_GET_RESOURCE_CNTS:
3995 		out_size = sizeof (EXT_RESOURCE_CNTS);
3996 		break;
3997 	case EXT_SC_GET_FCF_LIST:
3998 		out_size = sizeof (EXT_FCF_LIST);
3999 		break;
4000 	case EXT_SC_GET_PRIV_STATS:
4001 		out_size = cmd->ResponseLen;
4002 		break;
4003 	case EXT_SC_GET_SCSI_ADDR:
4004 	case EXT_SC_GET_ERR_DETECTIONS:
4005 	case EXT_SC_GET_BUS_MODE:
4006 	case EXT_SC_GET_DR_DUMP_BUF:
4007 	case EXT_SC_GET_RISC_CODE:
4008 	case EXT_SC_GET_FLASH_RAM:
4009 	case EXT_SC_GET_LINK_STATUS:
4010 	case EXT_SC_GET_LOOP_ID:
4011 	case EXT_SC_GET_LUN_BITMASK:
4012 	case EXT_SC_GET_PORT_DATABASE:
4013 	case EXT_SC_GET_PORT_DATABASE_MEM:
4014 	case EXT_SC_GET_POSITION_MAP:
4015 	case EXT_SC_GET_RETRY_CNT:
4016 	case EXT_SC_GET_RTIN:
4017 	case EXT_SC_GET_FC_LUN_BITMASK:
4018 	case EXT_SC_GET_SEL_TIMEOUT:
4019 	default:
4020 		/* function not supported. */
4021 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
4022 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
4023 		cmd->ResponseLen = 0;
4024 		return;
4025 	}
4026 
4027 	if (cmd->ResponseLen < out_size) {
4028 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
4029 		cmd->DetailStatus = out_size;
4030 		EL(ha, "failed, ResponseLen=%xh, size=%xh\n",
4031 		    cmd->ResponseLen, out_size);
4032 		cmd->ResponseLen = 0;
4033 		return;
4034 	}
4035 
4036 	switch (cmd->SubCode) {
4037 	case EXT_SC_GET_RNID:
4038 		ql_get_rnid_parameters(ha, cmd, mode);
4039 		break;
4040 	case EXT_SC_GET_STATISTICS:
4041 		ql_get_statistics(ha, cmd, mode);
4042 		break;
4043 	case EXT_SC_GET_FC_STATISTICS:
4044 		ql_get_statistics_fc(ha, cmd, mode);
4045 		break;
4046 	case EXT_SC_GET_FC4_STATISTICS:
4047 		ql_get_statistics_fc4(ha, cmd, mode);
4048 		break;
4049 	case EXT_SC_GET_PORT_SUMMARY:
4050 		ql_get_port_summary(ha, cmd, mode);
4051 		break;
4052 	case EXT_SC_GET_TARGET_ID:
4053 		ql_get_target_id(ha, cmd, mode);
4054 		break;
4055 	case EXT_SC_GET_BEACON_STATE:
4056 		ql_get_led_state(ha, cmd, mode);
4057 		break;
4058 	case EXT_SC_GET_DCBX_PARAM:
4059 		ql_get_dcbx_parameters(ha, cmd, mode);
4060 		break;
4061 	case EXT_SC_GET_FCF_LIST:
4062 		ql_get_fcf_list(ha, cmd, mode);
4063 		break;
4064 	case EXT_SC_GET_RESOURCE_CNTS:
4065 		ql_get_resource_counts(ha, cmd, mode);
4066 		break;
4067 	case EXT_SC_GET_PRIV_STATS:
4068 		ql_get_priv_stats(ha, cmd, mode);
4069 		break;
4070 	}
4071 
4072 	if (cmd->Status != EXT_STATUS_OK) {
4073 		EL(ha, "failed, Status=%d\n", cmd->Status);
4074 	} else {
4075 		/*EMPTY*/
4076 		QL_PRINT_9(ha, "done\n");
4077 	}
4078 }
4079 
4080 /* ******************************************************************** */
4081 /*			Helper Functions				*/
4082 /* ******************************************************************** */
4083 
4084 /*
4085  * ql_lun_count
4086  *	Get numbers of LUNS on target.
4087  *
4088  * Input:
4089  *	ha:	adapter state pointer.
4090  *	q:	device queue pointer.
4091  *
4092  * Returns:
4093  *	Number of LUNs.
4094  *
4095  * Context:
4096  *	Kernel context.
4097  */
4098 static int
ql_lun_count(ql_adapter_state_t * ha,ql_tgt_t * tq)4099 ql_lun_count(ql_adapter_state_t *ha, ql_tgt_t *tq)
4100 {
4101 	int	cnt;
4102 
4103 	QL_PRINT_9(ha, "started\n");
4104 
4105 	/* Bypass LUNs that failed. */
4106 	cnt = ql_report_lun(ha, tq);
4107 	if (cnt == 0) {
4108 		cnt = ql_inq_scan(ha, tq, ha->maximum_luns_per_target);
4109 	}
4110 
4111 	QL_PRINT_9(ha, "done\n");
4112 
4113 	return (cnt);
4114 }
4115 
4116 /*
4117  * ql_report_lun
4118  *	Get numbers of LUNS using report LUN command.
4119  *
4120  * Input:
4121  *	ha:	adapter state pointer.
4122  *	q:	target queue pointer.
4123  *
4124  * Returns:
4125  *	Number of LUNs.
4126  *
4127  * Context:
4128  *	Kernel context.
4129  */
4130 static int
ql_report_lun(ql_adapter_state_t * ha,ql_tgt_t * tq)4131 ql_report_lun(ql_adapter_state_t *ha, ql_tgt_t *tq)
4132 {
4133 	int			rval;
4134 	uint8_t			retries;
4135 	ql_mbx_iocb_t		*pkt;
4136 	ql_rpt_lun_lst_t	*rpt;
4137 	dma_mem_t		dma_mem;
4138 	uint32_t		pkt_size, cnt;
4139 	uint16_t		comp_status;
4140 	uint8_t			scsi_status_h, scsi_status_l, *reqs;
4141 
4142 	QL_PRINT_9(ha, "started\n");
4143 
4144 	if (DRIVER_SUSPENDED(ha)) {
4145 		EL(ha, "failed, LOOP_NOT_READY\n");
4146 		return (0);
4147 	}
4148 
4149 	pkt_size = sizeof (ql_mbx_iocb_t) + sizeof (ql_rpt_lun_lst_t);
4150 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4151 	if (pkt == NULL) {
4152 		EL(ha, "failed, kmem_zalloc\n");
4153 		return (0);
4154 	}
4155 	rpt = (ql_rpt_lun_lst_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4156 
4157 	/* Get DMA memory for the IOCB */
4158 	if (ql_get_dma_mem(ha, &dma_mem, sizeof (ql_rpt_lun_lst_t),
4159 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4160 		cmn_err(CE_WARN, "%s(%d) DMA memory "
4161 		    "alloc failed", QL_NAME, ha->instance);
4162 		kmem_free(pkt, pkt_size);
4163 		return (0);
4164 	}
4165 
4166 	for (retries = 0; retries < 4; retries++) {
4167 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
4168 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4169 			pkt->cmd24.entry_count = 1;
4170 
4171 			/* Set N_port handle */
4172 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4173 
4174 			/* Set target ID */
4175 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4176 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
4177 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4178 
4179 			/* Set Virtual Port ID */
4180 			pkt->cmd24.vp_index = ha->vp_index;
4181 
4182 			/* Set ISP command timeout. */
4183 			pkt->cmd24.timeout = LE_16(15);
4184 
4185 			/* Load SCSI CDB */
4186 			pkt->cmd24.scsi_cdb[0] = SCMD_REPORT_LUNS;
4187 			pkt->cmd24.scsi_cdb[6] =
4188 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4189 			pkt->cmd24.scsi_cdb[7] =
4190 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4191 			pkt->cmd24.scsi_cdb[8] =
4192 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4193 			pkt->cmd24.scsi_cdb[9] =
4194 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4195 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4196 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4197 				    + cnt, 4);
4198 			}
4199 
4200 			/* Set tag queue control flags */
4201 			pkt->cmd24.task = TA_STAG;
4202 
4203 			/* Set transfer direction. */
4204 			pkt->cmd24.control_flags = CF_RD;
4205 
4206 			/* Set data segment count. */
4207 			pkt->cmd24.dseg_count = LE_16(1);
4208 
4209 			/* Load total byte count. */
4210 			/* Load data descriptor. */
4211 			pkt->cmd24.dseg.address[0] = (uint32_t)
4212 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4213 			pkt->cmd24.dseg.address[1] = (uint32_t)
4214 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4215 			pkt->cmd24.total_byte_count =
4216 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4217 			pkt->cmd24.dseg.length =
4218 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4219 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4220 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4221 			pkt->cmd3.entry_count = 1;
4222 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4223 				pkt->cmd3.target_l = LSB(tq->loop_id);
4224 				pkt->cmd3.target_h = MSB(tq->loop_id);
4225 			} else {
4226 				pkt->cmd3.target_h = LSB(tq->loop_id);
4227 			}
4228 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4229 			pkt->cmd3.timeout = LE_16(15);
4230 			pkt->cmd3.dseg_count = LE_16(1);
4231 			pkt->cmd3.scsi_cdb[0] = SCMD_REPORT_LUNS;
4232 			pkt->cmd3.scsi_cdb[6] =
4233 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4234 			pkt->cmd3.scsi_cdb[7] =
4235 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4236 			pkt->cmd3.scsi_cdb[8] =
4237 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4238 			pkt->cmd3.scsi_cdb[9] =
4239 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4240 			pkt->cmd3.byte_count =
4241 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4242 			pkt->cmd3.dseg[0].address[0] = (uint32_t)
4243 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4244 			pkt->cmd3.dseg[0].address[1] = (uint32_t)
4245 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4246 			pkt->cmd3.dseg[0].length =
4247 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4248 		} else {
4249 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4250 			pkt->cmd.entry_count = 1;
4251 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4252 				pkt->cmd.target_l = LSB(tq->loop_id);
4253 				pkt->cmd.target_h = MSB(tq->loop_id);
4254 			} else {
4255 				pkt->cmd.target_h = LSB(tq->loop_id);
4256 			}
4257 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4258 			pkt->cmd.timeout = LE_16(15);
4259 			pkt->cmd.dseg_count = LE_16(1);
4260 			pkt->cmd.scsi_cdb[0] = SCMD_REPORT_LUNS;
4261 			pkt->cmd.scsi_cdb[6] =
4262 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4263 			pkt->cmd.scsi_cdb[7] =
4264 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4265 			pkt->cmd.scsi_cdb[8] =
4266 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4267 			pkt->cmd.scsi_cdb[9] =
4268 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4269 			pkt->cmd.byte_count =
4270 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4271 			pkt->cmd.dseg[0].address = (uint32_t)
4272 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4273 			pkt->cmd.dseg[0].length =
4274 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4275 		}
4276 
4277 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4278 		    sizeof (ql_mbx_iocb_t));
4279 
4280 		/* Sync in coming DMA buffer. */
4281 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4282 		    DDI_DMA_SYNC_FORKERNEL);
4283 		/* Copy in coming DMA data. */
4284 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)rpt,
4285 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4286 
4287 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
4288 			pkt->sts24.entry_status = (uint8_t)
4289 			    (pkt->sts24.entry_status & 0x3c);
4290 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4291 			scsi_status_h = pkt->sts24.scsi_status_h;
4292 			scsi_status_l = pkt->sts24.scsi_status_l;
4293 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4294 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4295 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4296 		} else {
4297 			pkt->sts.entry_status = (uint8_t)
4298 			    (pkt->sts.entry_status & 0x7e);
4299 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4300 			scsi_status_h = pkt->sts.scsi_status_h;
4301 			scsi_status_l = pkt->sts.scsi_status_l;
4302 			reqs = &pkt->sts.req_sense_data[0];
4303 		}
4304 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4305 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4306 			    pkt->sts.entry_status, tq->d_id.b24);
4307 			rval = QL_FUNCTION_PARAMETER_ERROR;
4308 		}
4309 
4310 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4311 		    scsi_status_l & STATUS_CHECK) {
4312 			/* Device underrun, treat as OK. */
4313 			if (rval == QL_SUCCESS &&
4314 			    comp_status == CS_DATA_UNDERRUN &&
4315 			    scsi_status_h & FCP_RESID_UNDER) {
4316 				break;
4317 			}
4318 
4319 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4320 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4321 			    comp_status, scsi_status_h, scsi_status_l);
4322 
4323 			if (rval == QL_SUCCESS) {
4324 				if ((comp_status == CS_TIMEOUT) ||
4325 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4326 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4327 					rval = QL_FUNCTION_TIMEOUT;
4328 					break;
4329 				}
4330 				rval = QL_FUNCTION_FAILED;
4331 			} else if (rval == QL_ABORTED) {
4332 				break;
4333 			}
4334 
4335 			if (scsi_status_l & STATUS_CHECK) {
4336 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4337 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4338 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4339 				    reqs[1], reqs[2], reqs[3], reqs[4],
4340 				    reqs[5], reqs[6], reqs[7], reqs[8],
4341 				    reqs[9], reqs[10], reqs[11], reqs[12],
4342 				    reqs[13], reqs[14], reqs[15], reqs[16],
4343 				    reqs[17]);
4344 			}
4345 		} else {
4346 			break;
4347 		}
4348 		bzero((caddr_t)pkt, pkt_size);
4349 	}
4350 
4351 	if (rval != QL_SUCCESS) {
4352 		EL(ha, "failed=%xh\n", rval);
4353 		rval = 0;
4354 	} else {
4355 		QL_PRINT_9(ha, "LUN list\n");
4356 		QL_DUMP_9(rpt, 8, rpt->hdr.len + 8);
4357 		rval = (int)(BE_32(rpt->hdr.len) / 8);
4358 	}
4359 
4360 	kmem_free(pkt, pkt_size);
4361 	ql_free_dma_resource(ha, &dma_mem);
4362 
4363 	QL_PRINT_9(ha, "done\n");
4364 
4365 	return (rval);
4366 }
4367 
4368 /*
4369  * ql_inq_scan
4370  *	Get numbers of LUNS using inquiry command.
4371  *
4372  * Input:
4373  *	ha:		adapter state pointer.
4374  *	tq:		target queue pointer.
4375  *	count:		scan for the number of existing LUNs.
4376  *
4377  * Returns:
4378  *	Number of LUNs.
4379  *
4380  * Context:
4381  *	Kernel context.
4382  */
4383 static int
ql_inq_scan(ql_adapter_state_t * ha,ql_tgt_t * tq,int count)4384 ql_inq_scan(ql_adapter_state_t *ha, ql_tgt_t *tq, int count)
4385 {
4386 	int		lun, cnt, rval;
4387 	ql_mbx_iocb_t	*pkt;
4388 	uint8_t		*inq;
4389 	uint32_t	pkt_size;
4390 
4391 	QL_PRINT_9(ha, "started\n");
4392 
4393 	pkt_size = sizeof (ql_mbx_iocb_t) + INQ_DATA_SIZE;
4394 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4395 	if (pkt == NULL) {
4396 		EL(ha, "failed, kmem_zalloc\n");
4397 		return (0);
4398 	}
4399 	inq = (uint8_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4400 
4401 	cnt = 0;
4402 	for (lun = 0; lun < MAX_LUNS; lun++) {
4403 
4404 		if (DRIVER_SUSPENDED(ha)) {
4405 			rval = QL_LOOP_DOWN;
4406 			cnt = 0;
4407 			break;
4408 		}
4409 
4410 		rval = ql_inq(ha, tq, lun, pkt, INQ_DATA_SIZE);
4411 		if (rval == QL_SUCCESS) {
4412 			switch (*inq) {
4413 			case DTYPE_DIRECT:
4414 			case DTYPE_PROCESSOR:	/* Appliance. */
4415 			case DTYPE_WORM:
4416 			case DTYPE_RODIRECT:
4417 			case DTYPE_SCANNER:
4418 			case DTYPE_OPTICAL:
4419 			case DTYPE_CHANGER:
4420 			case DTYPE_ESI:
4421 				cnt++;
4422 				break;
4423 			case DTYPE_SEQUENTIAL:
4424 				cnt++;
4425 				tq->flags |= TQF_TAPE_DEVICE;
4426 				break;
4427 			default:
4428 				QL_PRINT_9(ha, "failed, "
4429 				    "unsupported device id=%xh, lun=%d, "
4430 				    "type=%xh\n", tq->loop_id,
4431 				    lun, *inq);
4432 				break;
4433 			}
4434 
4435 			if (*inq == DTYPE_ESI || cnt >= count) {
4436 				break;
4437 			}
4438 		} else if (rval == QL_ABORTED || rval == QL_FUNCTION_TIMEOUT) {
4439 			cnt = 0;
4440 			break;
4441 		}
4442 	}
4443 
4444 	kmem_free(pkt, pkt_size);
4445 
4446 	QL_PRINT_9(ha, "done\n");
4447 
4448 	return (cnt);
4449 }
4450 
4451 /*
4452  * ql_inq
4453  *	Issue inquiry command.
4454  *
4455  * Input:
4456  *	ha:		adapter state pointer.
4457  *	tq:		target queue pointer.
4458  *	lun:		LUN number.
4459  *	pkt:		command and buffer pointer.
4460  *	inq_len:	amount of inquiry data.
4461  *
4462  * Returns:
4463  *	ql local function return status code.
4464  *
4465  * Context:
4466  *	Kernel context.
4467  */
4468 static int
ql_inq(ql_adapter_state_t * ha,ql_tgt_t * tq,int lun,ql_mbx_iocb_t * pkt,uint32_t inq_len)4469 ql_inq(ql_adapter_state_t *ha, ql_tgt_t *tq, int lun, ql_mbx_iocb_t *pkt,
4470     uint32_t inq_len)
4471 {
4472 	dma_mem_t	dma_mem;
4473 	int		rval, retries;
4474 	uint32_t	pkt_size, cnt;
4475 	uint16_t	comp_status;
4476 	uint8_t		scsi_status_h, scsi_status_l, *reqs;
4477 	caddr_t		inq_data;
4478 	uint64_t	lun_addr;
4479 	fcp_ent_addr_t	*fcp_ent_addr = (fcp_ent_addr_t *)&lun_addr;
4480 
4481 	QL_PRINT_9(ha, "started\n");
4482 
4483 	if (DRIVER_SUSPENDED(ha)) {
4484 		EL(ha, "failed, loop down\n");
4485 		return (QL_FUNCTION_TIMEOUT);
4486 	}
4487 
4488 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + inq_len);
4489 	bzero((caddr_t)pkt, pkt_size);
4490 
4491 	inq_data = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
4492 
4493 	/* Get DMA memory for the IOCB */
4494 	if (ql_get_dma_mem(ha, &dma_mem, inq_len,
4495 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4496 		cmn_err(CE_WARN, "%s(%d) DMA memory "
4497 		    "alloc failed", QL_NAME, ha->instance);
4498 		return (0);
4499 	}
4500 
4501 	for (retries = 0; retries < 4; retries++) {
4502 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
4503 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4504 			pkt->cmd24.entry_count = 1;
4505 
4506 			/* Set LUN number */
4507 			lun_addr = ql_get_lun_addr(tq, lun);
4508 			fcp_ent_addr = (fcp_ent_addr_t *)&lun_addr;
4509 			pkt->cmd24.fcp_lun[2] =
4510 			    lobyte(fcp_ent_addr->ent_addr_0);
4511 			pkt->cmd24.fcp_lun[3] =
4512 			    hibyte(fcp_ent_addr->ent_addr_0);
4513 			pkt->cmd24.fcp_lun[0] =
4514 			    lobyte(fcp_ent_addr->ent_addr_1);
4515 			pkt->cmd24.fcp_lun[1] =
4516 			    hibyte(fcp_ent_addr->ent_addr_1);
4517 			pkt->cmd24.fcp_lun[6] =
4518 			    lobyte(fcp_ent_addr->ent_addr_2);
4519 			pkt->cmd24.fcp_lun[7] =
4520 			    hibyte(fcp_ent_addr->ent_addr_2);
4521 			pkt->cmd24.fcp_lun[4] =
4522 			    lobyte(fcp_ent_addr->ent_addr_3);
4523 			pkt->cmd24.fcp_lun[5] =
4524 			    hibyte(fcp_ent_addr->ent_addr_3);
4525 
4526 			/* Set N_port handle */
4527 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4528 
4529 			/* Set target ID */
4530 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4531 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
4532 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4533 
4534 			/* Set Virtual Port ID */
4535 			pkt->cmd24.vp_index = ha->vp_index;
4536 
4537 			/* Set ISP command timeout. */
4538 			pkt->cmd24.timeout = LE_16(15);
4539 
4540 			/* Load SCSI CDB */
4541 			pkt->cmd24.scsi_cdb[0] = SCMD_INQUIRY;
4542 			pkt->cmd24.scsi_cdb[4] = LSB(LSW(inq_len));
4543 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4544 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4545 				    + cnt, 4);
4546 			}
4547 
4548 			/* Set tag queue control flags */
4549 			pkt->cmd24.task = TA_STAG;
4550 
4551 			/* Set transfer direction. */
4552 			pkt->cmd24.control_flags = CF_RD;
4553 
4554 			/* Set data segment count. */
4555 			pkt->cmd24.dseg_count = LE_16(1);
4556 
4557 			/* Load total byte count. */
4558 			pkt->cmd24.total_byte_count = LE_32(inq_len);
4559 
4560 			/* Load data descriptor. */
4561 			pkt->cmd24.dseg.address[0] = (uint32_t)
4562 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4563 			pkt->cmd24.dseg.address[1] = (uint32_t)
4564 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4565 			pkt->cmd24.dseg.length = LE_32(inq_len);
4566 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4567 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4568 			cnt = CMD_TYPE_3_DATA_SEGMENTS;
4569 
4570 			pkt->cmd3.entry_count = 1;
4571 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4572 				pkt->cmd3.target_l = LSB(tq->loop_id);
4573 				pkt->cmd3.target_h = MSB(tq->loop_id);
4574 			} else {
4575 				pkt->cmd3.target_h = LSB(tq->loop_id);
4576 			}
4577 			pkt->cmd3.lun_l = LSB(lun);
4578 			pkt->cmd3.lun_h = MSB(lun);
4579 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4580 			pkt->cmd3.timeout = LE_16(15);
4581 			pkt->cmd3.scsi_cdb[0] = SCMD_INQUIRY;
4582 			pkt->cmd3.scsi_cdb[4] = LSB(LSW(inq_len));
4583 			pkt->cmd3.dseg_count = LE_16(1);
4584 			pkt->cmd3.byte_count = LE_32(inq_len);
4585 			pkt->cmd3.dseg[0].address[0] = (uint32_t)
4586 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4587 			pkt->cmd3.dseg[0].address[1] = (uint32_t)
4588 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4589 			pkt->cmd3.dseg[0].length = LE_32(inq_len);
4590 		} else {
4591 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4592 			cnt = CMD_TYPE_2_DATA_SEGMENTS;
4593 
4594 			pkt->cmd.entry_count = 1;
4595 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4596 				pkt->cmd.target_l = LSB(tq->loop_id);
4597 				pkt->cmd.target_h = MSB(tq->loop_id);
4598 			} else {
4599 				pkt->cmd.target_h = LSB(tq->loop_id);
4600 			}
4601 			pkt->cmd.lun_l = LSB(lun);
4602 			pkt->cmd.lun_h = MSB(lun);
4603 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4604 			pkt->cmd.timeout = LE_16(15);
4605 			pkt->cmd.scsi_cdb[0] = SCMD_INQUIRY;
4606 			pkt->cmd.scsi_cdb[4] = LSB(LSW(inq_len));
4607 			pkt->cmd.dseg_count = LE_16(1);
4608 			pkt->cmd.byte_count = LE_32(inq_len);
4609 			pkt->cmd.dseg[0].address = (uint32_t)
4610 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4611 			pkt->cmd.dseg[0].length = LE_32(inq_len);
4612 		}
4613 
4614 /*		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); */
4615 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4616 		    sizeof (ql_mbx_iocb_t));
4617 
4618 		/* Sync in coming IOCB DMA buffer. */
4619 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4620 		    DDI_DMA_SYNC_FORKERNEL);
4621 		/* Copy in coming DMA data. */
4622 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)inq_data,
4623 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4624 
4625 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
4626 			pkt->sts24.entry_status = (uint8_t)
4627 			    (pkt->sts24.entry_status & 0x3c);
4628 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4629 			scsi_status_h = pkt->sts24.scsi_status_h;
4630 			scsi_status_l = pkt->sts24.scsi_status_l;
4631 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4632 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4633 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4634 		} else {
4635 			pkt->sts.entry_status = (uint8_t)
4636 			    (pkt->sts.entry_status & 0x7e);
4637 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4638 			scsi_status_h = pkt->sts.scsi_status_h;
4639 			scsi_status_l = pkt->sts.scsi_status_l;
4640 			reqs = &pkt->sts.req_sense_data[0];
4641 		}
4642 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4643 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4644 			    pkt->sts.entry_status, tq->d_id.b24);
4645 			rval = QL_FUNCTION_PARAMETER_ERROR;
4646 		}
4647 
4648 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4649 		    scsi_status_l & STATUS_CHECK) {
4650 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4651 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4652 			    comp_status, scsi_status_h, scsi_status_l);
4653 
4654 			if (rval == QL_SUCCESS) {
4655 				if ((comp_status == CS_TIMEOUT) ||
4656 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4657 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4658 					rval = QL_FUNCTION_TIMEOUT;
4659 					break;
4660 				}
4661 				rval = QL_FUNCTION_FAILED;
4662 			}
4663 
4664 			if (scsi_status_l & STATUS_CHECK) {
4665 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4666 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4667 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4668 				    reqs[1], reqs[2], reqs[3], reqs[4],
4669 				    reqs[5], reqs[6], reqs[7], reqs[8],
4670 				    reqs[9], reqs[10], reqs[11], reqs[12],
4671 				    reqs[13], reqs[14], reqs[15], reqs[16],
4672 				    reqs[17]);
4673 			}
4674 		} else {
4675 			break;
4676 		}
4677 	}
4678 	ql_free_dma_resource(ha, &dma_mem);
4679 
4680 	QL_PRINT_9(ha, "done\n");
4681 
4682 	return (rval);
4683 }
4684 
4685 /*
4686  * ql_get_buffer_data
4687  *	Copies data from user space to kernal buffer.
4688  *
4689  * Input:
4690  *	src:	User source buffer address.
4691  *	dst:	Kernal destination buffer address.
4692  *	size:	Amount of data.
4693  *	mode:	flags.
4694  *
4695  * Returns:
4696  *	Returns number of bytes transferred.
4697  *
4698  * Context:
4699  *	Kernel context.
4700  */
4701 static uint32_t
ql_get_buffer_data(caddr_t src,caddr_t dst,uint32_t size,int mode)4702 ql_get_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4703 {
4704 	uint32_t	cnt;
4705 
4706 	for (cnt = 0; cnt < size; cnt++) {
4707 		if (ddi_copyin(src++, dst++, 1, mode) != 0) {
4708 			QL_PRINT_2(NULL, "failed, ddi_copyin\n");
4709 			break;
4710 		}
4711 	}
4712 
4713 	return (cnt);
4714 }
4715 
4716 /*
4717  * ql_send_buffer_data
4718  *	Copies data from kernal buffer to user space.
4719  *
4720  * Input:
4721  *	src:	Kernal source buffer address.
4722  *	dst:	User destination buffer address.
4723  *	size:	Amount of data.
4724  *	mode:	flags.
4725  *
4726  * Returns:
4727  *	Returns number of bytes transferred.
4728  *
4729  * Context:
4730  *	Kernel context.
4731  */
4732 static uint32_t
ql_send_buffer_data(caddr_t src,caddr_t dst,uint32_t size,int mode)4733 ql_send_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4734 {
4735 	uint32_t	cnt;
4736 
4737 	for (cnt = 0; cnt < size; cnt++) {
4738 		if (ddi_copyout(src++, dst++, 1, mode) != 0) {
4739 			QL_PRINT_2(NULL, "failed, ddi_copyin\n");
4740 			break;
4741 		}
4742 	}
4743 
4744 	return (cnt);
4745 }
4746 
4747 /*
4748  * ql_find_port
4749  *	Locates device queue.
4750  *
4751  * Input:
4752  *	ha:	adapter state pointer.
4753  *	name:	device port name.
4754  *
4755  * Returns:
4756  *	Returns target queue pointer.
4757  *
4758  * Context:
4759  *	Kernel context.
4760  */
4761 static ql_tgt_t *
ql_find_port(ql_adapter_state_t * ha,uint8_t * name,uint16_t type)4762 ql_find_port(ql_adapter_state_t *ha, uint8_t *name, uint16_t type)
4763 {
4764 	ql_link_t	*link;
4765 	ql_tgt_t	*tq;
4766 	uint16_t	index;
4767 
4768 	/* Scan port list for requested target */
4769 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
4770 		for (link = ha->dev[index].first; link != NULL;
4771 		    link = link->next) {
4772 			tq = link->base_address;
4773 
4774 			switch (type) {
4775 			case QLNT_LOOP_ID:
4776 				if (bcmp(name, &tq->loop_id,
4777 				    sizeof (uint16_t)) == 0) {
4778 					return (tq);
4779 				}
4780 				break;
4781 			case QLNT_PORT:
4782 				if (bcmp(name, tq->port_name, 8) == 0) {
4783 					return (tq);
4784 				}
4785 				break;
4786 			case QLNT_NODE:
4787 				if (bcmp(name, tq->node_name, 8) == 0) {
4788 					return (tq);
4789 				}
4790 				break;
4791 			case QLNT_PID:
4792 				if (bcmp(name, tq->d_id.r.d_id,
4793 				    sizeof (tq->d_id.r.d_id)) == 0) {
4794 					return (tq);
4795 				}
4796 				break;
4797 			default:
4798 				EL(ha, "failed, invalid type=%d\n", type);
4799 				return (NULL);
4800 			}
4801 		}
4802 	}
4803 
4804 	return (NULL);
4805 }
4806 
4807 /*
4808  * ql_24xx_flash_desc
4809  *	Get flash descriptor table.
4810  *
4811  * Input:
4812  *	ha:		adapter state pointer.
4813  *
4814  * Returns:
4815  *	ql local function return status code.
4816  *
4817  * Context:
4818  *	Kernel context.
4819  */
4820 static int
ql_24xx_flash_desc(ql_adapter_state_t * ha)4821 ql_24xx_flash_desc(ql_adapter_state_t *ha)
4822 {
4823 	uint32_t	cnt;
4824 	uint16_t	chksum, *bp, data;
4825 	int		rval;
4826 	flash_desc_t	*fdesc;
4827 	ql_xioctl_t	*xp = ha->xioctl;
4828 
4829 	QL_PRINT_9(ha, "started\n");
4830 
4831 	if (ha->flash_desc_addr == 0) {
4832 		QL_PRINT_9(ha, "desc ptr=0\n");
4833 		return (QL_FUNCTION_FAILED);
4834 	}
4835 
4836 	if ((fdesc = kmem_zalloc(sizeof (flash_desc_t), KM_SLEEP)) == NULL) {
4837 		EL(ha, "kmem_zalloc=null\n");
4838 		return (QL_MEMORY_ALLOC_FAILED);
4839 	}
4840 	rval = ql_dump_fcode(ha, (uint8_t *)fdesc, sizeof (flash_desc_t),
4841 	    ha->flash_desc_addr << 2);
4842 	if (rval != QL_SUCCESS) {
4843 		EL(ha, "read status=%xh\n", rval);
4844 		kmem_free(fdesc, sizeof (flash_desc_t));
4845 		return (rval);
4846 	}
4847 
4848 	chksum = 0;
4849 	bp = (uint16_t *)fdesc;
4850 	for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) {
4851 		data = *bp++;
4852 		LITTLE_ENDIAN_16(&data);
4853 		chksum += data;
4854 	}
4855 
4856 	LITTLE_ENDIAN_32(&fdesc->flash_valid);
4857 	LITTLE_ENDIAN_16(&fdesc->flash_version);
4858 	LITTLE_ENDIAN_16(&fdesc->flash_len);
4859 	LITTLE_ENDIAN_16(&fdesc->flash_checksum);
4860 	LITTLE_ENDIAN_16(&fdesc->flash_manuf);
4861 	LITTLE_ENDIAN_16(&fdesc->flash_id);
4862 	LITTLE_ENDIAN_32(&fdesc->block_size);
4863 	LITTLE_ENDIAN_32(&fdesc->alt_block_size);
4864 	LITTLE_ENDIAN_32(&fdesc->flash_size);
4865 	LITTLE_ENDIAN_32(&fdesc->write_enable_data);
4866 	LITTLE_ENDIAN_32(&fdesc->read_timeout);
4867 
4868 	/* flash size in desc table is in 1024 bytes */
4869 	fdesc->flash_size = fdesc->flash_size * 0x400;
4870 
4871 	if (chksum != 0 || fdesc->flash_valid != FLASH_DESC_VAILD ||
4872 	    fdesc->flash_version != FLASH_DESC_VERSION) {
4873 		EL(ha, "invalid descriptor table\n");
4874 		kmem_free(fdesc, sizeof (flash_desc_t));
4875 		return (QL_FUNCTION_FAILED);
4876 	}
4877 
4878 	bcopy(fdesc, &xp->fdesc, sizeof (flash_desc_t));
4879 	kmem_free(fdesc, sizeof (flash_desc_t));
4880 
4881 	QL_PRINT_9(ha, "done\n");
4882 
4883 	return (QL_SUCCESS);
4884 }
4885 
4886 /*
4887  * ql_setup_flash
4888  *	Gets the manufacturer and id number of the flash chip, and
4889  *	sets up the size parameter.
4890  *
4891  * Input:
4892  *	ha:	adapter state pointer.
4893  *
4894  * Returns:
4895  *	int:	ql local function return status code.
4896  *
4897  * Context:
4898  *	Kernel context.
4899  */
4900 static int
ql_setup_flash(ql_adapter_state_t * ha)4901 ql_setup_flash(ql_adapter_state_t *ha)
4902 {
4903 	ql_xioctl_t	*xp = ha->xioctl;
4904 	int		rval = QL_SUCCESS;
4905 
4906 	if (xp->fdesc.flash_size != 0) {
4907 		return (rval);
4908 	}
4909 
4910 	if (CFG_IST(ha, CFG_CTRL_22XX) && !ha->subven_id) {
4911 		return (QL_FUNCTION_FAILED);
4912 	}
4913 
4914 	if (CFG_IST(ha, CFG_CTRL_252780818283)) {
4915 		/*
4916 		 * Temporarily set the ha->xioctl->fdesc.flash_size to
4917 		 * 25xx flash size to avoid failing of ql_dump_focde.
4918 		 */
4919 		if (CFG_IST(ha, CFG_CTRL_278083)) {
4920 			ha->xioctl->fdesc.flash_size = 0x1000000;
4921 		} else if (CFG_IST(ha, CFG_CTRL_82XX)) {
4922 			ha->xioctl->fdesc.flash_size = 0x800000;
4923 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
4924 			ha->xioctl->fdesc.flash_size = 0x200000;
4925 		} else {
4926 			ha->xioctl->fdesc.flash_size = 0x400000;
4927 		}
4928 
4929 		if (ql_24xx_flash_desc(ha) == QL_SUCCESS) {
4930 			EL(ha, "flash desc table ok, exit\n");
4931 			return (rval);
4932 		}
4933 		if (CFG_IST(ha, CFG_CTRL_82XX)) {
4934 			xp->fdesc.flash_manuf = MXIC_FLASH;
4935 			xp->fdesc.flash_id = MXIC_FLASHID_25LXX;
4936 			xp->fdesc.flash_len = 0x17;
4937 		} else {
4938 			(void) ql_24xx_flash_id(ha);
4939 		}
4940 
4941 	} else if (CFG_IST(ha, CFG_CTRL_24XX)) {
4942 		(void) ql_24xx_flash_id(ha);
4943 	} else {
4944 		ql_flash_enable(ha);
4945 
4946 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4947 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4948 		ql_write_flash_byte(ha, 0x5555, 0x90);
4949 		xp->fdesc.flash_manuf = (uint8_t)ql_read_flash_byte(ha, 0x0000);
4950 
4951 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
4952 			ql_write_flash_byte(ha, 0xaaaa, 0xaa);
4953 			ql_write_flash_byte(ha, 0x5555, 0x55);
4954 			ql_write_flash_byte(ha, 0xaaaa, 0x90);
4955 			xp->fdesc.flash_id = (uint16_t)
4956 			    ql_read_flash_byte(ha, 0x0002);
4957 		} else {
4958 			ql_write_flash_byte(ha, 0x5555, 0xaa);
4959 			ql_write_flash_byte(ha, 0x2aaa, 0x55);
4960 			ql_write_flash_byte(ha, 0x5555, 0x90);
4961 			xp->fdesc.flash_id = (uint16_t)
4962 			    ql_read_flash_byte(ha, 0x0001);
4963 		}
4964 
4965 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4966 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4967 		ql_write_flash_byte(ha, 0x5555, 0xf0);
4968 
4969 		ql_flash_disable(ha);
4970 	}
4971 
4972 	/* Default flash descriptor table. */
4973 	xp->fdesc.write_statusreg_cmd = 1;
4974 	xp->fdesc.write_enable_bits = 0;
4975 	xp->fdesc.unprotect_sector_cmd = 0;
4976 	xp->fdesc.protect_sector_cmd = 0;
4977 	xp->fdesc.write_disable_bits = 0xbc;
4978 	xp->fdesc.block_size = 0x10000;
4979 	xp->fdesc.erase_cmd = 0xd8;
4980 
4981 	switch (xp->fdesc.flash_manuf) {
4982 	case AMD_FLASH:
4983 		switch (xp->fdesc.flash_id) {
4984 		case SPAN_FLASHID_16384K:
4985 			if (xp->fdesc.flash_len == 0x18) {
4986 				xp->fdesc.flash_size = 0x1000000;
4987 			} else {
4988 				rval = QL_FUNCTION_FAILED;
4989 			}
4990 			break;
4991 		case SPAN_FLASHID_2048K:
4992 			xp->fdesc.flash_size = 0x200000;
4993 			break;
4994 		case AMD_FLASHID_1024K:
4995 			xp->fdesc.flash_size = 0x100000;
4996 			break;
4997 		case AMD_FLASHID_512K:
4998 		case AMD_FLASHID_512Kt:
4999 		case AMD_FLASHID_512Kb:
5000 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
5001 				xp->fdesc.flash_size = QL_SBUS_FCODE_SIZE;
5002 			} else {
5003 				xp->fdesc.flash_size = 0x80000;
5004 			}
5005 			break;
5006 		case AMD_FLASHID_128K:
5007 			xp->fdesc.flash_size = 0x20000;
5008 			break;
5009 		default:
5010 			rval = QL_FUNCTION_FAILED;
5011 			break;
5012 		}
5013 		break;
5014 	case ST_FLASH:
5015 		switch (xp->fdesc.flash_id) {
5016 		case ST_FLASHID_128K:
5017 			xp->fdesc.flash_size = 0x20000;
5018 			break;
5019 		case ST_FLASHID_512K:
5020 			xp->fdesc.flash_size = 0x80000;
5021 			break;
5022 		case ST_FLASHID_M25PXX:
5023 			if (xp->fdesc.flash_len == 0x14) {
5024 				xp->fdesc.flash_size = 0x100000;
5025 			} else if (xp->fdesc.flash_len == 0x15) {
5026 				xp->fdesc.flash_size = 0x200000;
5027 			} else {
5028 				rval = QL_FUNCTION_FAILED;
5029 			}
5030 			break;
5031 		case ST_FLASHID_N25QXXX:
5032 			if (xp->fdesc.flash_len == 0x18) {
5033 				xp->fdesc.flash_size = 0x1000000;
5034 			} else {
5035 				rval = QL_FUNCTION_FAILED;
5036 			}
5037 			break;
5038 		default:
5039 			rval = QL_FUNCTION_FAILED;
5040 			break;
5041 		}
5042 		break;
5043 	case SST_FLASH:
5044 		switch (xp->fdesc.flash_id) {
5045 		case SST_FLASHID_128K:
5046 			xp->fdesc.flash_size = 0x20000;
5047 			break;
5048 		case SST_FLASHID_1024K_A:
5049 			xp->fdesc.flash_size = 0x100000;
5050 			xp->fdesc.block_size = 0x8000;
5051 			xp->fdesc.erase_cmd = 0x52;
5052 			break;
5053 		case SST_FLASHID_1024K:
5054 		case SST_FLASHID_1024K_B:
5055 			xp->fdesc.flash_size = 0x100000;
5056 			break;
5057 		case SST_FLASHID_2048K:
5058 			xp->fdesc.flash_size = 0x200000;
5059 			break;
5060 		default:
5061 			rval = QL_FUNCTION_FAILED;
5062 			break;
5063 		}
5064 		break;
5065 	case MXIC_FLASH:
5066 		switch (xp->fdesc.flash_id) {
5067 		case MXIC_FLASHID_512K:
5068 			xp->fdesc.flash_size = 0x80000;
5069 			break;
5070 		case MXIC_FLASHID_1024K:
5071 			xp->fdesc.flash_size = 0x100000;
5072 			break;
5073 		case MXIC_FLASHID_25LXX:
5074 			xp->fdesc.write_disable_bits = 0xbc;
5075 			if (xp->fdesc.flash_len == 0x14) {
5076 				xp->fdesc.flash_size = 0x100000;
5077 			} else if (xp->fdesc.flash_len == 0x15) {
5078 				xp->fdesc.flash_size = 0x200000;
5079 			} else if (xp->fdesc.flash_len == 0x16) {
5080 				xp->fdesc.flash_size = 0x400000;
5081 			} else if (xp->fdesc.flash_len == 0x17) {
5082 				xp->fdesc.flash_size = 0x800000;
5083 			} else if (xp->fdesc.flash_len == 0x18) {
5084 				xp->fdesc.flash_size = 0x1000000;
5085 			} else {
5086 				rval = QL_FUNCTION_FAILED;
5087 			}
5088 			break;
5089 		default:
5090 			rval = QL_FUNCTION_FAILED;
5091 			break;
5092 		}
5093 		break;
5094 	case ATMEL_FLASH:
5095 		switch (xp->fdesc.flash_id) {
5096 		case ATMEL_FLASHID_1024K:
5097 			xp->fdesc.flash_size = 0x100000;
5098 			xp->fdesc.write_disable_bits = 0xbc;
5099 			xp->fdesc.unprotect_sector_cmd = 0x39;
5100 			xp->fdesc.protect_sector_cmd = 0x36;
5101 			break;
5102 		default:
5103 			rval = QL_FUNCTION_FAILED;
5104 			break;
5105 		}
5106 		break;
5107 	case WINBOND_FLASH:
5108 		switch (xp->fdesc.flash_id) {
5109 		case WINBOND_FLASHID:
5110 			if (xp->fdesc.flash_len == 0x15) {
5111 				xp->fdesc.flash_size = 0x200000;
5112 			} else if (xp->fdesc.flash_len == 0x16) {
5113 				xp->fdesc.flash_size = 0x400000;
5114 			} else if (xp->fdesc.flash_len == 0x17) {
5115 				xp->fdesc.flash_size = 0x800000;
5116 			} else if (xp->fdesc.flash_len == 0x18) {
5117 				xp->fdesc.flash_size = 0x1000000;
5118 			} else {
5119 				rval = QL_FUNCTION_FAILED;
5120 			}
5121 			break;
5122 		default:
5123 			rval = QL_FUNCTION_FAILED;
5124 			break;
5125 		}
5126 		break;
5127 	case INTEL_FLASH:
5128 		switch (xp->fdesc.flash_id) {
5129 		case INTEL_FLASHID:
5130 			if (xp->fdesc.flash_len == 0x11) {
5131 				xp->fdesc.flash_size = 0x200000;
5132 			} else if (xp->fdesc.flash_len == 0x12) {
5133 				xp->fdesc.flash_size = 0x400000;
5134 			} else if (xp->fdesc.flash_len == 0x13) {
5135 				xp->fdesc.flash_size = 0x800000;
5136 			} else {
5137 				rval = QL_FUNCTION_FAILED;
5138 			}
5139 			break;
5140 		default:
5141 			rval = QL_FUNCTION_FAILED;
5142 			break;
5143 		}
5144 		break;
5145 	case EON_FLASH:
5146 		switch (xp->fdesc.flash_id) {
5147 		case EON_FLASHID_EN25QXXX:
5148 			if (xp->fdesc.flash_len == 0x18) {
5149 				xp->fdesc.flash_size = 0x1000000;
5150 			} else {
5151 				rval = QL_FUNCTION_FAILED;
5152 			}
5153 			break;
5154 		default:
5155 			rval = QL_FUNCTION_FAILED;
5156 			break;
5157 		}
5158 		break;
5159 	default:
5160 		rval = QL_FUNCTION_FAILED;
5161 		break;
5162 	}
5163 
5164 	/* Try flash table later. */
5165 	if (rval != QL_SUCCESS && CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
5166 		EL(ha, "no default id\n");
5167 		return (QL_SUCCESS);
5168 	}
5169 
5170 	/*
5171 	 * hack for non std 2312/2322 and 6312/6322 boards. hardware people
5172 	 * need to use either the 128k flash chip (original), or something
5173 	 * larger. For driver purposes, we'll treat it as a 128k flash chip.
5174 	 */
5175 	if ((ha->device_id == 0x2312 || ha->device_id == 0x6312 ||
5176 	    ha->device_id == 0x2322 || ha->device_id == 0x6322) &&
5177 	    (xp->fdesc.flash_size > 0x20000) &&
5178 	    (CFG_IST(ha, CFG_SBUS_CARD) == 0)) {
5179 		EL(ha, "chip exceeds max size: %xh, using 128k\n",
5180 		    xp->fdesc.flash_size);
5181 		xp->fdesc.flash_size = 0x20000;
5182 	}
5183 
5184 	if (rval == QL_SUCCESS) {
5185 		EL(ha, "man_id=%xh, flash_id=%xh, size=%xh\n",
5186 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id,
5187 		    xp->fdesc.flash_size);
5188 	} else {
5189 		EL(ha, "unsupported mfr / type: man_id=%xh, flash_id=%xh\n",
5190 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id);
5191 	}
5192 
5193 	return (rval);
5194 }
5195 
5196 /*
5197  * ql_flash_fcode_load
5198  *	Loads fcode data into flash from application.
5199  *
5200  * Input:
5201  *	ha:	adapter state pointer.
5202  *	bp:	user buffer address.
5203  *	size:	user buffer size.
5204  *	mode:	flags
5205  *
5206  * Returns:
5207  *
5208  * Context:
5209  *	Kernel context.
5210  */
5211 static int
ql_flash_fcode_load(ql_adapter_state_t * ha,void * bp,uint32_t bsize,int mode)5212 ql_flash_fcode_load(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5213     int mode)
5214 {
5215 	uint8_t		*bfp;
5216 	ql_xioctl_t	*xp = ha->xioctl;
5217 	int		rval = 0;
5218 
5219 	QL_PRINT_9(ha, "started\n");
5220 
5221 	if (bsize > xp->fdesc.flash_size) {
5222 		EL(ha, "failed, bufsize: %xh, flash size: %xh\n", bsize,
5223 		    xp->fdesc.flash_size);
5224 		return (ENOMEM);
5225 	}
5226 
5227 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5228 		EL(ha, "failed, kmem_zalloc\n");
5229 		rval = ENOMEM;
5230 	} else {
5231 		if (ddi_copyin(bp, bfp, bsize, mode) != 0) {
5232 			EL(ha, "failed, ddi_copyin\n");
5233 			rval = EFAULT;
5234 		} else if (ql_load_fcode(ha, bfp, bsize, 0) != QL_SUCCESS) {
5235 			EL(ha, "failed, load_fcode\n");
5236 			rval = EFAULT;
5237 		} else {
5238 			/* Reset caches on all adapter instances. */
5239 			ql_update_flash_caches(ha);
5240 			rval = 0;
5241 		}
5242 		kmem_free(bfp, bsize);
5243 	}
5244 
5245 	QL_PRINT_9(ha, "done\n");
5246 
5247 	return (rval);
5248 }
5249 
5250 /*
5251  * ql_load_fcode
5252  *	Loads fcode in to flash.
5253  *
5254  * Input:
5255  *	ha:	adapter state pointer.
5256  *	dp:	data pointer.
5257  *	size:	data length.
5258  *	addr:	flash byte address.
5259  *
5260  * Returns:
5261  *	ql local function return status code.
5262  *
5263  * Context:
5264  *	Kernel context.
5265  */
5266 int
ql_load_fcode(ql_adapter_state_t * ha,uint8_t * dp,uint32_t size,uint32_t addr)5267 ql_load_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size, uint32_t addr)
5268 {
5269 	uint32_t	cnt;
5270 	int		rval;
5271 
5272 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
5273 		return (ql_24xx_load_flash(ha, dp, size, addr));
5274 	}
5275 
5276 	QL_PRINT_9(ha, "started\n");
5277 
5278 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
5279 		/*
5280 		 * sbus has an additional check to make
5281 		 * sure they don't brick the HBA.
5282 		 */
5283 		if (dp[0] != 0xf1) {
5284 			EL(ha, "failed, incorrect fcode for sbus\n");
5285 			return (QL_FUNCTION_PARAMETER_ERROR);
5286 		}
5287 	}
5288 
5289 	GLOBAL_HW_LOCK();
5290 
5291 	/* Enable Flash Read/Write. */
5292 	ql_flash_enable(ha);
5293 
5294 	/* Erase flash prior to write. */
5295 	rval = ql_erase_flash(ha, 0);
5296 
5297 	if (rval == QL_SUCCESS) {
5298 		/* Write fcode data to flash. */
5299 		for (cnt = 0; cnt < (uint32_t)size; cnt++) {
5300 			/* Allow other system activity. */
5301 			if (cnt % 0x1000 == 0) {
5302 				drv_usecwait(1);
5303 			}
5304 			rval = ql_program_flash_address(ha, addr++, *dp++);
5305 			if (rval != QL_SUCCESS)
5306 				break;
5307 		}
5308 	}
5309 
5310 	ql_flash_disable(ha);
5311 
5312 	GLOBAL_HW_UNLOCK();
5313 
5314 	if (rval != QL_SUCCESS) {
5315 		EL(ha, "failed, rval=%xh\n", rval);
5316 	} else {
5317 		/*EMPTY*/
5318 		QL_PRINT_9(ha, "done\n");
5319 	}
5320 	return (rval);
5321 }
5322 
5323 /*
5324  * ql_flash_fcode_dump
5325  *	Dumps FLASH to application.
5326  *
5327  * Input:
5328  *	ha:	adapter state pointer.
5329  *	bp:	user buffer address.
5330  *	bsize:	user buffer size
5331  *	faddr:	flash byte address
5332  *	mode:	flags
5333  *
5334  * Returns:
5335  *
5336  * Context:
5337  *	Kernel context.
5338  */
5339 static int
ql_flash_fcode_dump(ql_adapter_state_t * ha,void * bp,uint32_t bsize,uint32_t faddr,int mode)5340 ql_flash_fcode_dump(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5341     uint32_t faddr, int mode)
5342 {
5343 	uint8_t		*bfp;
5344 	int		rval;
5345 	ql_xioctl_t	*xp = ha->xioctl;
5346 
5347 	QL_PRINT_9(ha, "started\n");
5348 
5349 	/* adjust max read size to flash size */
5350 	if (bsize > xp->fdesc.flash_size) {
5351 		EL(ha, "adjusting req=%xh, max=%xh\n", bsize,
5352 		    xp->fdesc.flash_size);
5353 		bsize = xp->fdesc.flash_size;
5354 	}
5355 
5356 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5357 		EL(ha, "failed, kmem_zalloc\n");
5358 		rval = ENOMEM;
5359 	} else {
5360 		/* Dump Flash fcode. */
5361 		rval = ql_dump_fcode(ha, bfp, bsize, faddr);
5362 
5363 		if (rval != QL_SUCCESS) {
5364 			EL(ha, "failed, dump_fcode = %x\n", rval);
5365 			rval = EFAULT;
5366 		} else if (ddi_copyout(bfp, bp, bsize, mode) != 0) {
5367 			EL(ha, "failed, ddi_copyout\n");
5368 			rval = EFAULT;
5369 		} else {
5370 			rval = 0;
5371 		}
5372 		kmem_free(bfp, bsize);
5373 	}
5374 
5375 	QL_PRINT_9(ha, "done\n");
5376 
5377 	return (rval);
5378 }
5379 
5380 /*
5381  * ql_dump_fcode
5382  *	Dumps fcode from flash.
5383  *
5384  * Input:
5385  *	ha:		adapter state pointer.
5386  *	dp:		data pointer.
5387  *	size:		data length in bytes.
5388  *	startpos:	starting position in flash (byte address).
5389  *
5390  * Returns:
5391  *	ql local function return status code.
5392  *
5393  * Context:
5394  *	Kernel context.
5395  *
5396  */
5397 int
ql_dump_fcode(ql_adapter_state_t * ha,uint8_t * dp,uint32_t size,uint32_t startpos)5398 ql_dump_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size,
5399     uint32_t startpos)
5400 {
5401 	uint32_t	cnt, data, addr;
5402 	uint8_t		bp[4], *src;
5403 	int		fp_rval, rval = QL_SUCCESS;
5404 	dma_mem_t	mem;
5405 
5406 	QL_PRINT_9(ha, "started\n");
5407 
5408 	/* make sure startpos+size doesn't exceed flash */
5409 	if (size + startpos > ha->xioctl->fdesc.flash_size) {
5410 		EL(ha, "exceeded flash range, sz=%xh, stp=%xh, flsz=%xh\n",
5411 		    size, startpos, ha->xioctl->fdesc.flash_size);
5412 		return (QL_FUNCTION_PARAMETER_ERROR);
5413 	}
5414 
5415 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
5416 		/* check start addr is 32 bit aligned for 24xx */
5417 		if ((startpos & 0x3) != 0) {
5418 			rval = ql_24xx_read_flash(ha,
5419 			    ha->flash_data_addr | startpos >> 2, &data);
5420 			if (rval != QL_SUCCESS) {
5421 				EL(ha, "failed2, rval = %xh\n", rval);
5422 				return (rval);
5423 			}
5424 			bp[0] = LSB(LSW(data));
5425 			bp[1] = MSB(LSW(data));
5426 			bp[2] = LSB(MSW(data));
5427 			bp[3] = MSB(MSW(data));
5428 			while (size && startpos & 0x3) {
5429 				*dp++ = bp[startpos & 0x3];
5430 				startpos++;
5431 				size--;
5432 			}
5433 			if (size == 0) {
5434 				QL_PRINT_9(ha, "done2\n",
5435 				    ha->instance);
5436 				return (rval);
5437 			}
5438 		}
5439 
5440 		/* adjust 24xx start addr for 32 bit words */
5441 		addr = startpos / 4 | ha->flash_data_addr;
5442 	}
5443 
5444 	bzero(&mem, sizeof (dma_mem_t));
5445 	/* Check for Fast page is supported */
5446 	if ((ha->pha->task_daemon_flags & FIRMWARE_UP) &&
5447 	    (CFG_IST(ha, CFG_FLASH_DMA_SUPPORT))) {
5448 		fp_rval = QL_SUCCESS;
5449 		/* Setup DMA buffer. */
5450 		rval = ql_get_dma_mem(ha, &mem, size,
5451 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN);
5452 		if (rval != QL_SUCCESS) {
5453 			EL(ha, "failed, ql_get_dma_mem=%xh\n",
5454 			    rval);
5455 			return (ENOMEM);
5456 		}
5457 	} else {
5458 		fp_rval = QL_NOT_SUPPORTED;
5459 	}
5460 
5461 	GLOBAL_HW_LOCK();
5462 
5463 	/* Enable Flash Read/Write. */
5464 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
5465 		ql_flash_enable(ha);
5466 	}
5467 
5468 	/* Read fcode data from flash. */
5469 	while (size) {
5470 		/* Allow other system activity. */
5471 		if (size % 0x1000 == 0) {
5472 			ql_delay(ha, 10000);
5473 		}
5474 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
5475 			if (fp_rval == QL_SUCCESS && (addr & 0x3f) == 0) {
5476 				cnt = (size + 3) >> 2;
5477 				fp_rval = ql_rd_risc_ram(ha, addr,
5478 				    mem.cookie.dmac_laddress, cnt);
5479 				if (fp_rval == QL_SUCCESS) {
5480 					for (src = mem.bp; size; size--) {
5481 						*dp++ = *src++;
5482 					}
5483 					addr += cnt;
5484 					continue;
5485 				}
5486 			}
5487 			rval = ql_24xx_read_flash(ha, addr++,
5488 			    &data);
5489 			if (rval != QL_SUCCESS) {
5490 				break;
5491 			}
5492 			bp[0] = LSB(LSW(data));
5493 			bp[1] = MSB(LSW(data));
5494 			bp[2] = LSB(MSW(data));
5495 			bp[3] = MSB(MSW(data));
5496 			for (cnt = 0; size && cnt < 4; size--) {
5497 				*dp++ = bp[cnt++];
5498 			}
5499 		} else {
5500 			*dp++ = (uint8_t)ql_read_flash_byte(ha, startpos++);
5501 			size--;
5502 		}
5503 	}
5504 
5505 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
5506 		ql_flash_disable(ha);
5507 	}
5508 
5509 	GLOBAL_HW_UNLOCK();
5510 
5511 	if (mem.dma_handle != NULL) {
5512 		ql_free_dma_resource(ha, &mem);
5513 	}
5514 
5515 	if (rval != QL_SUCCESS) {
5516 		EL(ha, "failed, rval = %xh\n", rval);
5517 	} else {
5518 		/*EMPTY*/
5519 		QL_PRINT_9(ha, "done\n");
5520 	}
5521 	return (rval);
5522 }
5523 
5524 /*
5525  * ql_program_flash_address
5526  *	Program flash address.
5527  *
5528  * Input:
5529  *	ha:	adapter state pointer.
5530  *	addr:	flash byte address.
5531  *	data:	data to be written to flash.
5532  *
5533  * Returns:
5534  *	ql local function return status code.
5535  *
5536  * Context:
5537  *	Kernel context.
5538  */
5539 static int
ql_program_flash_address(ql_adapter_state_t * ha,uint32_t addr,uint8_t data)5540 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr,
5541     uint8_t data)
5542 {
5543 	int	rval;
5544 
5545 	/* Write Program Command Sequence */
5546 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
5547 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5548 		ql_write_flash_byte(ha, addr, data);
5549 	} else {
5550 		ql_write_flash_byte(ha, 0x5555, 0xaa);
5551 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
5552 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5553 		ql_write_flash_byte(ha, addr, data);
5554 	}
5555 
5556 	/* Wait for write to complete. */
5557 	rval = ql_poll_flash(ha, addr, data);
5558 
5559 	if (rval != QL_SUCCESS) {
5560 		EL(ha, "failed, rval=%xh\n", rval);
5561 	}
5562 	return (rval);
5563 }
5564 
5565 /*
5566  * ql_set_rnid_parameters
5567  *	Set RNID parameters.
5568  *
5569  * Input:
5570  *	ha:	adapter state pointer.
5571  *	cmd:	User space CT arguments pointer.
5572  *	mode:	flags.
5573  */
5574 static void
ql_set_rnid_parameters(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)5575 ql_set_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5576 {
5577 	EXT_SET_RNID_REQ	tmp_set;
5578 	EXT_RNID_DATA		*tmp_buf;
5579 	int			rval = 0;
5580 
5581 	QL_PRINT_9(ha, "started\n");
5582 
5583 	if (DRIVER_SUSPENDED(ha)) {
5584 		EL(ha, "failed, LOOP_NOT_READY\n");
5585 		cmd->Status = EXT_STATUS_BUSY;
5586 		cmd->ResponseLen = 0;
5587 		return;
5588 	}
5589 
5590 	cmd->ResponseLen = 0; /* NO response to caller. */
5591 	if (cmd->RequestLen != sizeof (EXT_SET_RNID_REQ)) {
5592 		/* parameter error */
5593 		EL(ha, "failed, RequestLen < EXT_SET_RNID_REQ, Len=%xh\n",
5594 		    cmd->RequestLen);
5595 		cmd->Status = EXT_STATUS_INVALID_PARAM;
5596 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
5597 		cmd->ResponseLen = 0;
5598 		return;
5599 	}
5600 
5601 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &tmp_set,
5602 	    cmd->RequestLen, mode);
5603 	if (rval != 0) {
5604 		EL(ha, "failed, ddi_copyin\n");
5605 		cmd->Status = EXT_STATUS_COPY_ERR;
5606 		cmd->ResponseLen = 0;
5607 		return;
5608 	}
5609 
5610 	/* Allocate memory for command. */
5611 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5612 	if (tmp_buf == NULL) {
5613 		EL(ha, "failed, kmem_zalloc\n");
5614 		cmd->Status = EXT_STATUS_NO_MEMORY;
5615 		cmd->ResponseLen = 0;
5616 		return;
5617 	}
5618 
5619 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5620 	    (caddr_t)tmp_buf);
5621 	if (rval != QL_SUCCESS) {
5622 		/* error */
5623 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5624 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5625 		cmd->Status = EXT_STATUS_ERR;
5626 		cmd->ResponseLen = 0;
5627 		return;
5628 	}
5629 
5630 	/* Now set the requested params. */
5631 	bcopy(tmp_set.IPVersion, tmp_buf->IPVersion, 2);
5632 	bcopy(tmp_set.UDPPortNumber, tmp_buf->UDPPortNumber, 2);
5633 	bcopy(tmp_set.IPAddress, tmp_buf->IPAddress, 16);
5634 
5635 	rval = ql_set_rnid_params(ha, sizeof (EXT_RNID_DATA),
5636 	    (caddr_t)tmp_buf);
5637 	if (rval != QL_SUCCESS) {
5638 		/* error */
5639 		EL(ha, "failed, set_rnid_params_mbx=%xh\n", rval);
5640 		cmd->Status = EXT_STATUS_ERR;
5641 		cmd->ResponseLen = 0;
5642 	}
5643 
5644 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5645 
5646 	QL_PRINT_9(ha, "done\n");
5647 }
5648 
5649 /*
5650  * ql_get_rnid_parameters
5651  *	Get RNID parameters.
5652  *
5653  * Input:
5654  *	ha:	adapter state pointer.
5655  *	cmd:	User space CT arguments pointer.
5656  *	mode:	flags.
5657  */
5658 static void
ql_get_rnid_parameters(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)5659 ql_get_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5660 {
5661 	EXT_RNID_DATA	*tmp_buf;
5662 	uint32_t	rval;
5663 
5664 	QL_PRINT_9(ha, "started\n");
5665 
5666 	if (DRIVER_SUSPENDED(ha)) {
5667 		EL(ha, "failed, LOOP_NOT_READY\n");
5668 		cmd->Status = EXT_STATUS_BUSY;
5669 		cmd->ResponseLen = 0;
5670 		return;
5671 	}
5672 
5673 	/* Allocate memory for command. */
5674 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5675 	if (tmp_buf == NULL) {
5676 		EL(ha, "failed, kmem_zalloc\n");
5677 		cmd->Status = EXT_STATUS_NO_MEMORY;
5678 		cmd->ResponseLen = 0;
5679 		return;
5680 	}
5681 
5682 	/* Send command */
5683 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5684 	    (caddr_t)tmp_buf);
5685 	if (rval != QL_SUCCESS) {
5686 		/* error */
5687 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5688 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5689 		cmd->Status = EXT_STATUS_ERR;
5690 		cmd->ResponseLen = 0;
5691 		return;
5692 	}
5693 
5694 	/* Copy the response */
5695 	if (ql_send_buffer_data((caddr_t)tmp_buf,
5696 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
5697 	    sizeof (EXT_RNID_DATA), mode) != sizeof (EXT_RNID_DATA)) {
5698 		EL(ha, "failed, ddi_copyout\n");
5699 		cmd->Status = EXT_STATUS_COPY_ERR;
5700 		cmd->ResponseLen = 0;
5701 	} else {
5702 		QL_PRINT_9(ha, "done\n");
5703 		cmd->ResponseLen = sizeof (EXT_RNID_DATA);
5704 	}
5705 
5706 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5707 }
5708 
5709 /*
5710  * ql_reset_statistics
5711  *	Performs EXT_SC_RST_STATISTICS subcommand. of EXT_CC_SET_DATA.
5712  *
5713  * Input:
5714  *	ha:	adapter state pointer.
5715  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5716  *
5717  * Returns:
5718  *	None, request status indicated in cmd->Status.
5719  *
5720  * Context:
5721  *	Kernel context.
5722  */
5723 static int
ql_reset_statistics(ql_adapter_state_t * ha,EXT_IOCTL * cmd)5724 ql_reset_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
5725 {
5726 	ql_xioctl_t		*xp = ha->xioctl;
5727 	int			rval = 0;
5728 
5729 	QL_PRINT_9(ha, "started\n");
5730 
5731 	if (DRIVER_SUSPENDED(ha)) {
5732 		EL(ha, "failed, LOOP_NOT_READY\n");
5733 		cmd->Status = EXT_STATUS_BUSY;
5734 		cmd->ResponseLen = 0;
5735 		return (QL_FUNCTION_SUSPENDED);
5736 	}
5737 
5738 	rval = ql_reset_link_status(ha);
5739 	if (rval != QL_SUCCESS) {
5740 		EL(ha, "failed, reset_link_status_mbx=%xh\n", rval);
5741 		cmd->Status = EXT_STATUS_MAILBOX;
5742 		cmd->DetailStatus = rval;
5743 		cmd->ResponseLen = 0;
5744 	}
5745 
5746 	TASK_DAEMON_LOCK(ha);
5747 	xp->IosRequested = 0;
5748 	xp->BytesRequested = 0;
5749 	xp->IOInputRequests = 0;
5750 	xp->IOOutputRequests = 0;
5751 	xp->IOControlRequests = 0;
5752 	xp->IOInputMByteCnt = 0;
5753 	xp->IOOutputMByteCnt = 0;
5754 	xp->IOOutputByteCnt = 0;
5755 	xp->IOInputByteCnt = 0;
5756 	TASK_DAEMON_UNLOCK(ha);
5757 
5758 	INTR_LOCK(ha);
5759 	xp->ControllerErrorCount = 0;
5760 	xp->DeviceErrorCount = 0;
5761 	xp->TotalLipResets = 0;
5762 	xp->TotalInterrupts = 0;
5763 	INTR_UNLOCK(ha);
5764 
5765 	QL_PRINT_9(ha, "done\n");
5766 
5767 	return (rval);
5768 }
5769 
5770 /*
5771  * ql_get_statistics
5772  *	Performs EXT_SC_GET_STATISTICS subcommand. of EXT_CC_GET_DATA.
5773  *
5774  * Input:
5775  *	ha:	adapter state pointer.
5776  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5777  *	mode:	flags.
5778  *
5779  * Returns:
5780  *	None, request status indicated in cmd->Status.
5781  *
5782  * Context:
5783  *	Kernel context.
5784  */
5785 static void
ql_get_statistics(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)5786 ql_get_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5787 {
5788 	EXT_HBA_PORT_STAT	ps = {0};
5789 	ql_link_stats_t		*ls;
5790 	int			rval;
5791 	ql_xioctl_t		*xp = ha->xioctl;
5792 	int			retry = 10;
5793 
5794 	QL_PRINT_9(ha, "started\n");
5795 
5796 	while (ha->task_daemon_flags &
5797 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5798 		ql_delay(ha, 10000000);	/* 10 second delay */
5799 
5800 		retry--;
5801 
5802 		if (retry == 0) { /* effectively 100 seconds */
5803 			EL(ha, "failed, LOOP_NOT_READY\n");
5804 			cmd->Status = EXT_STATUS_BUSY;
5805 			cmd->ResponseLen = 0;
5806 			return;
5807 		}
5808 	}
5809 
5810 	/* Allocate memory for command. */
5811 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5812 	if (ls == NULL) {
5813 		EL(ha, "failed, kmem_zalloc\n");
5814 		cmd->Status = EXT_STATUS_NO_MEMORY;
5815 		cmd->ResponseLen = 0;
5816 		return;
5817 	}
5818 
5819 	/*
5820 	 * I think these are supposed to be port statistics
5821 	 * the loop ID or port ID should be in cmd->Instance.
5822 	 */
5823 	rval = ql_get_status_counts(ha, (uint16_t)
5824 	    (ha->task_daemon_flags & LOOP_DOWN ? 0xFF : ha->loop_id),
5825 	    sizeof (ql_link_stats_t), (caddr_t)ls, 0);
5826 	if (rval != QL_SUCCESS) {
5827 		EL(ha, "failed, get_link_status=%xh, id=%xh\n", rval,
5828 		    ha->loop_id);
5829 		cmd->Status = EXT_STATUS_MAILBOX;
5830 		cmd->DetailStatus = rval;
5831 		cmd->ResponseLen = 0;
5832 	} else {
5833 		ps.ControllerErrorCount = xp->ControllerErrorCount;
5834 		ps.DeviceErrorCount = xp->DeviceErrorCount;
5835 		ps.IoCount = (uint32_t)(xp->IOInputRequests +
5836 		    xp->IOOutputRequests + xp->IOControlRequests);
5837 		ps.MBytesCount = (uint32_t)(xp->IOInputMByteCnt +
5838 		    xp->IOOutputMByteCnt);
5839 		ps.LipResetCount = xp->TotalLipResets;
5840 		ps.InterruptCount = xp->TotalInterrupts;
5841 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5842 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5843 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5844 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5845 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5846 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5847 
5848 		rval = ddi_copyout((void *)&ps,
5849 		    (void *)(uintptr_t)cmd->ResponseAdr,
5850 		    sizeof (EXT_HBA_PORT_STAT), mode);
5851 		if (rval != 0) {
5852 			EL(ha, "failed, ddi_copyout\n");
5853 			cmd->Status = EXT_STATUS_COPY_ERR;
5854 			cmd->ResponseLen = 0;
5855 		} else {
5856 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5857 		}
5858 	}
5859 
5860 	kmem_free(ls, sizeof (ql_link_stats_t));
5861 
5862 	QL_PRINT_9(ha, "done\n");
5863 }
5864 
5865 /*
5866  * ql_get_statistics_fc
5867  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5868  *
5869  * Input:
5870  *	ha:	adapter state pointer.
5871  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5872  *	mode:	flags.
5873  *
5874  * Returns:
5875  *	None, request status indicated in cmd->Status.
5876  *
5877  * Context:
5878  *	Kernel context.
5879  */
5880 static void
ql_get_statistics_fc(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)5881 ql_get_statistics_fc(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5882 {
5883 	EXT_HBA_PORT_STAT	ps = {0};
5884 	ql_link_stats_t		*ls;
5885 	int			rval;
5886 	uint16_t		qlnt;
5887 	EXT_DEST_ADDR		pextdestaddr;
5888 	uint8_t			*name;
5889 	ql_tgt_t		*tq = NULL;
5890 	int			retry = 10;
5891 
5892 	QL_PRINT_9(ha, "started\n");
5893 
5894 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
5895 	    (void *)&pextdestaddr, sizeof (EXT_DEST_ADDR), mode) != 0) {
5896 		EL(ha, "failed, ddi_copyin\n");
5897 		cmd->Status = EXT_STATUS_COPY_ERR;
5898 		cmd->ResponseLen = 0;
5899 		return;
5900 	}
5901 
5902 	qlnt = QLNT_PORT;
5903 	name = pextdestaddr.DestAddr.WWPN;
5904 
5905 	QL_PRINT_9(ha, "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
5906 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
5907 	    name[5], name[6], name[7]);
5908 
5909 	tq = ql_find_port(ha, name, qlnt);
5910 
5911 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
5912 		EL(ha, "failed, fc_port not found\n");
5913 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
5914 		cmd->ResponseLen = 0;
5915 		return;
5916 	}
5917 
5918 	while (ha->task_daemon_flags &
5919 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5920 		ql_delay(ha, 10000000);	/* 10 second delay */
5921 
5922 		retry--;
5923 
5924 		if (retry == 0) { /* effectively 100 seconds */
5925 			EL(ha, "failed, LOOP_NOT_READY\n");
5926 			cmd->Status = EXT_STATUS_BUSY;
5927 			cmd->ResponseLen = 0;
5928 			return;
5929 		}
5930 	}
5931 
5932 	/* Allocate memory for command. */
5933 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5934 	if (ls == NULL) {
5935 		EL(ha, "failed, kmem_zalloc\n");
5936 		cmd->Status = EXT_STATUS_NO_MEMORY;
5937 		cmd->ResponseLen = 0;
5938 		return;
5939 	}
5940 
5941 	rval = ql_get_link_status(ha, tq->loop_id, sizeof (ql_link_stats_t),
5942 	    (caddr_t)ls, 0);
5943 	if (rval != QL_SUCCESS) {
5944 		EL(ha, "failed, get_link_status=%xh, d_id=%xh\n", rval,
5945 		    tq->d_id.b24);
5946 		cmd->Status = EXT_STATUS_MAILBOX;
5947 		cmd->DetailStatus = rval;
5948 		cmd->ResponseLen = 0;
5949 	} else {
5950 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5951 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5952 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5953 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5954 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5955 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5956 
5957 		rval = ddi_copyout((void *)&ps,
5958 		    (void *)(uintptr_t)cmd->ResponseAdr,
5959 		    sizeof (EXT_HBA_PORT_STAT), mode);
5960 
5961 		if (rval != 0) {
5962 			EL(ha, "failed, ddi_copyout\n");
5963 			cmd->Status = EXT_STATUS_COPY_ERR;
5964 			cmd->ResponseLen = 0;
5965 		} else {
5966 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5967 		}
5968 	}
5969 
5970 	kmem_free(ls, sizeof (ql_link_stats_t));
5971 
5972 	QL_PRINT_9(ha, "done\n");
5973 }
5974 
5975 /*
5976  * ql_get_statistics_fc4
5977  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5978  *
5979  * Input:
5980  *	ha:	adapter state pointer.
5981  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5982  *	mode:	flags.
5983  *
5984  * Returns:
5985  *	None, request status indicated in cmd->Status.
5986  *
5987  * Context:
5988  *	Kernel context.
5989  */
5990 static void
ql_get_statistics_fc4(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)5991 ql_get_statistics_fc4(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5992 {
5993 	uint32_t		rval;
5994 	EXT_HBA_FC4STATISTICS	fc4stats = {0};
5995 	ql_xioctl_t		*xp = ha->xioctl;
5996 
5997 	QL_PRINT_9(ha, "started\n");
5998 
5999 	fc4stats.InputRequests = xp->IOInputRequests;
6000 	fc4stats.OutputRequests = xp->IOOutputRequests;
6001 	fc4stats.ControlRequests = xp->IOControlRequests;
6002 	fc4stats.InputMegabytes = xp->IOInputMByteCnt;
6003 	fc4stats.OutputMegabytes = xp->IOOutputMByteCnt;
6004 
6005 	rval = ddi_copyout((void *)&fc4stats,
6006 	    (void *)(uintptr_t)cmd->ResponseAdr,
6007 	    sizeof (EXT_HBA_FC4STATISTICS), mode);
6008 
6009 	if (rval != 0) {
6010 		EL(ha, "failed, ddi_copyout\n");
6011 		cmd->Status = EXT_STATUS_COPY_ERR;
6012 		cmd->ResponseLen = 0;
6013 	} else {
6014 		cmd->ResponseLen = sizeof (EXT_HBA_FC4STATISTICS);
6015 	}
6016 
6017 	QL_PRINT_9(ha, "done\n");
6018 }
6019 
6020 /*
6021  * ql_set_led_state
6022  *	Performs EXT_SET_BEACON_STATE subcommand of EXT_CC_SET_DATA.
6023  *
6024  * Input:
6025  *	ha:	adapter state pointer.
6026  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6027  *	mode:	flags.
6028  *
6029  * Returns:
6030  *	None, request status indicated in cmd->Status.
6031  *
6032  * Context:
6033  *	Kernel context.
6034  */
6035 static void
ql_set_led_state(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)6036 ql_set_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6037 {
6038 	EXT_BEACON_CONTROL	bstate;
6039 	int			rval;
6040 	ql_mbx_data_t		mr;
6041 
6042 	QL_PRINT_9(ha, "started\n");
6043 
6044 	if (cmd->RequestLen < sizeof (EXT_BEACON_CONTROL)) {
6045 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6046 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
6047 		EL(ha, "done - failed, RequestLen < EXT_BEACON_CONTROL,"
6048 		    " Len=%xh\n", cmd->RequestLen);
6049 		cmd->ResponseLen = 0;
6050 		return;
6051 	}
6052 
6053 	if (!CFG_IST(ha, CFG_SET_LEDS_SUPPORT)) {
6054 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
6055 		cmd->DetailStatus = 0;
6056 		EL(ha, "done - failed, Invalid function for HBA model\n");
6057 		cmd->ResponseLen = 0;
6058 		return;
6059 	}
6060 
6061 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &bstate,
6062 	    cmd->RequestLen, mode);
6063 
6064 	if (rval != 0) {
6065 		cmd->Status = EXT_STATUS_COPY_ERR;
6066 		EL(ha, "done -  failed, ddi_copyin\n");
6067 		return;
6068 	}
6069 
6070 	switch (bstate.State) {
6071 	case EXT_DEF_GRN_BLINK_OFF:	/* turn beacon off */
6072 		if (ha->ledstate.BeaconState == BEACON_OFF) {
6073 			/* not quite an error -- LED state is already off */
6074 			cmd->Status = EXT_STATUS_OK;
6075 			EL(ha, "LED off request -- LED is already off\n");
6076 			break;
6077 		}
6078 
6079 		if (CFG_IST(ha, CFG_CTRL_82XX)) {
6080 			rval = ql_diag_beacon(ha, QL_BEACON_DISABLE,
6081 			    &mr);
6082 
6083 			if (rval == QL_SUCCESS) {
6084 				ha->ledstate.BeaconState = BEACON_OFF;
6085 				ha->ledstate.LEDflags = LED_ALL_OFF;
6086 				cmd->Status = EXT_STATUS_OK;
6087 			} else {
6088 				cmd->Status = EXT_STATUS_ERR;
6089 				EL(ha, "failed, disable beacon request %xh\n",
6090 				    bstate.State);
6091 			}
6092 			break;
6093 		}
6094 
6095 		ha->ledstate.BeaconState = BEACON_OFF;
6096 		ha->ledstate.LEDflags = LED_ALL_OFF;
6097 
6098 		if ((rval = ql_wrapup_led(ha)) != QL_SUCCESS) {
6099 			cmd->Status = EXT_STATUS_MAILBOX;
6100 		} else {
6101 			cmd->Status = EXT_STATUS_OK;
6102 		}
6103 		break;
6104 
6105 	case EXT_DEF_GRN_BLINK_ON:	/* turn beacon on */
6106 		if (ha->ledstate.BeaconState == BEACON_ON) {
6107 			/* not quite an error -- LED state is already on */
6108 			cmd->Status = EXT_STATUS_OK;
6109 			EL(ha, "LED on request  - LED is already on\n");
6110 			break;
6111 		}
6112 
6113 		if (CFG_IST(ha, CFG_CTRL_82XX)) {
6114 			rval = ql_diag_beacon(ha, QL_BEACON_ENABLE,
6115 			    &mr);
6116 
6117 			if (rval == QL_SUCCESS) {
6118 				ha->ledstate.BeaconState = BEACON_ON;
6119 				ha->ledstate.LEDflags = LED_GREEN;
6120 				cmd->Status = EXT_STATUS_OK;
6121 			} else {
6122 				cmd->Status = EXT_STATUS_ERR;
6123 				EL(ha, "failed, enable beacon request %xh\n",
6124 				    bstate.State);
6125 			}
6126 			break;
6127 		}
6128 
6129 		if ((rval = ql_setup_led(ha)) != QL_SUCCESS) {
6130 			cmd->Status = EXT_STATUS_MAILBOX;
6131 			break;
6132 		}
6133 
6134 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
6135 			ha->ledstate.LEDflags = LED_YELLOW_24 | LED_AMBER_24;
6136 		} else {
6137 			ha->ledstate.LEDflags = LED_GREEN;
6138 		}
6139 		ha->ledstate.BeaconState = BEACON_ON;
6140 
6141 		cmd->Status = EXT_STATUS_OK;
6142 		break;
6143 	default:
6144 		cmd->Status = EXT_STATUS_ERR;
6145 		EL(ha, "failed, unknown state request %xh\n", bstate.State);
6146 		break;
6147 	}
6148 
6149 	QL_PRINT_9(ha, "done\n");
6150 }
6151 
6152 /*
6153  * ql_get_led_state
6154  *	Performs EXT_GET_BEACON_STATE subcommand of EXT_CC_GET_DATA.
6155  *
6156  * Input:
6157  *	ha:	adapter state pointer.
6158  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6159  *	mode:	flags.
6160  *
6161  * Returns:
6162  *	None, request status indicated in cmd->Status.
6163  *
6164  * Context:
6165  *	Kernel context.
6166  */
6167 static void
ql_get_led_state(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)6168 ql_get_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6169 {
6170 	EXT_BEACON_CONTROL	bstate = {0};
6171 	uint32_t		rval;
6172 
6173 	QL_PRINT_9(ha, "started\n");
6174 
6175 	if (cmd->ResponseLen < sizeof (EXT_BEACON_CONTROL)) {
6176 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6177 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
6178 		EL(ha, "done - failed, ResponseLen < EXT_BEACON_CONTROL,"
6179 		    "Len=%xh\n", cmd->ResponseLen);
6180 		cmd->ResponseLen = 0;
6181 		return;
6182 	}
6183 
6184 	if (!CFG_IST(ha, CFG_SET_LEDS_SUPPORT)) {
6185 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
6186 		cmd->DetailStatus = 0;
6187 		EL(ha, "done - failed, Invalid function for HBA model\n");
6188 		cmd->ResponseLen = 0;
6189 		return;
6190 	}
6191 
6192 	if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
6193 		cmd->Status = EXT_STATUS_BUSY;
6194 		EL(ha, "done -  failed, isp abort active\n");
6195 		cmd->ResponseLen = 0;
6196 		return;
6197 	}
6198 
6199 	/* inform the user of the current beacon state (off or on) */
6200 	bstate.State = ha->ledstate.BeaconState;
6201 
6202 	rval = ddi_copyout((void *)&bstate,
6203 	    (void *)(uintptr_t)cmd->ResponseAdr,
6204 	    sizeof (EXT_BEACON_CONTROL), mode);
6205 
6206 	if (rval != 0) {
6207 		EL(ha, "failed, ddi_copyout\n");
6208 		cmd->Status = EXT_STATUS_COPY_ERR;
6209 		cmd->ResponseLen = 0;
6210 	} else {
6211 		cmd->Status = EXT_STATUS_OK;
6212 		cmd->ResponseLen = sizeof (EXT_BEACON_CONTROL);
6213 	}
6214 
6215 	QL_PRINT_9(ha, "done\n");
6216 }
6217 
6218 /*
6219  * ql_blink_led
6220  *	Determine the next state of the LED and drive it
6221  *
6222  * Input:
6223  *	ha:	adapter state pointer.
6224  *
6225  * Context:
6226  *	Interrupt context.
6227  */
6228 void
ql_blink_led(ql_adapter_state_t * ha)6229 ql_blink_led(ql_adapter_state_t *ha)
6230 {
6231 	uint32_t	nextstate;
6232 	ql_mbx_data_t	mr;
6233 
6234 	QL_PRINT_9(ha, "started\n");
6235 
6236 	if (ha->ledstate.BeaconState == BEACON_ON) {
6237 		if (CFG_IST(ha, CFG_CTRL_2363 | CFG_CTRL_2425)) {
6238 			/* determine the next led state */
6239 			if (CFG_IST(ha, CFG_CTRL_2425)) {
6240 				nextstate = (ha->ledstate.LEDflags) &
6241 				    (~(RD32_IO_REG(ha, gpiod)));
6242 			} else {
6243 				nextstate = (ha->ledstate.LEDflags) &
6244 				    (~(RD16_IO_REG(ha, gpiod)));
6245 			}
6246 
6247 			/* turn the led on or off */
6248 			ql_drive_led(ha, nextstate);
6249 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
6250 			if (ha->ledstate.flags & LED_ACTIVE) {
6251 				mr.mb[1] = 0x2000;
6252 				mr.mb[2] = 0x4000;
6253 				ha->ledstate.flags &= ~LED_ACTIVE;
6254 			} else {
6255 				mr.mb[1] = 0x4000;
6256 				mr.mb[2] = 0x2000;
6257 				ha->ledstate.flags |= LED_ACTIVE;
6258 			}
6259 			(void) ql_set_led_config(ha, &mr);
6260 		} else if (CFG_IST(ha, CFG_CTRL_80XX)) {
6261 			if (ha->ledstate.flags & LED_ACTIVE) {
6262 				mr.mb[1] = 0x4000;
6263 				mr.mb[2] = 0x2000;
6264 				mr.mb[3] = 0x4000;
6265 				mr.mb[4] = 0x4000;
6266 				mr.mb[5] = 0;
6267 				mr.mb[6] = 0x2000;
6268 				(void) ql_set_led_config(ha, &mr);
6269 				ha->ledstate.flags &= ~LED_ACTIVE;
6270 			} else {
6271 				mr.mb[1] = 0x4000;
6272 				mr.mb[2] = 0x4000;
6273 				mr.mb[3] = 0x4000;
6274 				mr.mb[4] = 0x2000;
6275 				mr.mb[5] = 0;
6276 				mr.mb[6] = 0x2000;
6277 				(void) ql_set_led_config(ha, &mr);
6278 				ha->ledstate.flags |= LED_ACTIVE;
6279 			}
6280 		} else if (CFG_IST(ha, CFG_CTRL_83XX)) {
6281 			if (ha->ledstate.flags & LED_ACTIVE) {
6282 				(void) ql_write_remote_reg(ha,
6283 				    ha->ledstate.select,
6284 				    0x40004000);
6285 				(void) ql_write_remote_reg(ha,
6286 				    ha->ledstate.select + 4,
6287 				    0x40004000);
6288 				ha->ledstate.flags &= ~LED_ACTIVE;
6289 			} else {
6290 				(void) ql_write_remote_reg(ha,
6291 				    ha->ledstate.select,
6292 				    0x40002000);
6293 				(void) ql_write_remote_reg(ha,
6294 				    ha->ledstate.select + 4,
6295 				    0x40002000);
6296 				ha->ledstate.flags |= LED_ACTIVE;
6297 			}
6298 		} else if (!CFG_IST(ha, CFG_CTRL_27XX)) {
6299 			EL(ha, "unsupported HBA: %xh\n", ha->device_id);
6300 		}
6301 	}
6302 
6303 	QL_PRINT_9(ha, "done\n");
6304 }
6305 
6306 /*
6307  * ql_drive_led
6308  *	drive the led's as determined by LEDflags
6309  *
6310  * Input:
6311  *	ha:		adapter state pointer.
6312  *	LEDflags:	LED flags
6313  *
6314  * Context:
6315  *	Kernel/Interrupt context.
6316  */
6317 static void
ql_drive_led(ql_adapter_state_t * ha,uint32_t LEDflags)6318 ql_drive_led(ql_adapter_state_t *ha, uint32_t LEDflags)
6319 {
6320 	QL_PRINT_9(ha, "started\n");
6321 
6322 	if (CFG_IST(ha, CFG_CTRL_2363)) {
6323 
6324 		uint16_t	gpio_enable, gpio_data;
6325 
6326 		/* setup to send new data */
6327 		gpio_enable = (uint16_t)RD16_IO_REG(ha, gpioe);
6328 		gpio_enable = (uint16_t)(gpio_enable | LED_MASK);
6329 		WRT16_IO_REG(ha, gpioe, gpio_enable);
6330 
6331 		/* read current data and clear out old led data */
6332 		gpio_data = (uint16_t)RD16_IO_REG(ha, gpiod);
6333 		gpio_data = (uint16_t)(gpio_data & ~LED_MASK);
6334 
6335 		/* set in the new led data. */
6336 		gpio_data = (uint16_t)(gpio_data | LEDflags);
6337 
6338 		/* write out the new led data */
6339 		WRT16_IO_REG(ha, gpiod, gpio_data);
6340 
6341 	} else if (CFG_IST(ha, CFG_CTRL_2425)) {
6342 		uint32_t	gpio_data;
6343 
6344 		/* setup to send new data */
6345 		gpio_data = RD32_IO_REG(ha, gpiod);
6346 		gpio_data |= LED_MASK_UPDATE_24;
6347 		WRT32_IO_REG(ha, gpiod, gpio_data);
6348 
6349 		/* read current data and clear out old led data */
6350 		gpio_data = RD32_IO_REG(ha, gpiod);
6351 		gpio_data &= ~LED_MASK_COLORS_24;
6352 
6353 		/* set in the new led data */
6354 		gpio_data |= LEDflags;
6355 
6356 		/* write out the new led data */
6357 		WRT32_IO_REG(ha, gpiod, gpio_data);
6358 
6359 	} else {
6360 		EL(ha, "unsupported HBA: %xh\n", ha->device_id);
6361 	}
6362 
6363 	QL_PRINT_9(ha, "done\n");
6364 }
6365 
6366 /*
6367  * ql_setup_led
6368  *	Setup LED for driver control
6369  *
6370  * Input:
6371  *	ha:	adapter state pointer.
6372  *
6373  * Context:
6374  *	Kernel/Interrupt context.
6375  */
6376 static int
ql_setup_led(ql_adapter_state_t * ha)6377 ql_setup_led(ql_adapter_state_t *ha)
6378 {
6379 	int		rval = QL_SUCCESS;
6380 	ql_mbx_data_t	mr;
6381 
6382 	QL_PRINT_9(ha, "started\n");
6383 
6384 	if (CFG_IST(ha, CFG_CTRL_2363 | CFG_CTRL_2425)) {
6385 		/* decouple the LED control from the fw */
6386 		rval = ql_get_firmware_option(ha, &mr);
6387 		if (rval != QL_SUCCESS) {
6388 			EL(ha, "failed, get_firmware_option=%xh\n", rval);
6389 			return (rval);
6390 		}
6391 
6392 		/* set the appropriate options */
6393 		mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_GPIO);
6394 
6395 		/* send it back to the firmware */
6396 		rval = ql_set_firmware_option(ha, &mr);
6397 		if (rval != QL_SUCCESS) {
6398 			EL(ha, "failed, set_firmware_option=%xh\n", rval);
6399 			return (rval);
6400 		}
6401 
6402 		/* initally, turn the LED's off */
6403 		ql_drive_led(ha, LED_ALL_OFF);
6404 
6405 	} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
6406 		(void) ql_get_led_config(ha, &ha->ledstate.cfg);
6407 		mr.mb[1] = 0x2000;
6408 		mr.mb[2] = 0x2000;
6409 		rval = ql_set_led_config(ha, &mr);
6410 
6411 	} else if (CFG_IST(ha, CFG_CTRL_80XX)) {
6412 		/* Save initial value */
6413 		rval = ql_get_led_config(ha, &ha->ledstate.cfg);
6414 		if (rval != QL_SUCCESS) {
6415 			EL(ha, "failed, get_led_config=%xh\n", rval);
6416 			return (rval);
6417 		}
6418 		mr.mb[1] = 0x4000;
6419 		mr.mb[2] = 0x4000;
6420 		mr.mb[3] = 0x4000;
6421 		mr.mb[4] = 0x2000;
6422 		mr.mb[5] = 0;
6423 		mr.mb[6] = 0x2000;
6424 		rval = ql_set_led_config(ha, &mr);
6425 
6426 	} else if (CFG_IST(ha, CFG_CTRL_83XX)) {
6427 		rval = ql_get_firmware_option(ha, &mr);
6428 		if (rval != QL_SUCCESS) {
6429 			EL(ha, "failed, get_firmware_option=%xh\n", rval);
6430 			return (rval);
6431 		}
6432 
6433 		mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_LEDS);
6434 
6435 		rval = ql_set_firmware_option(ha, &mr);
6436 		if (rval != QL_SUCCESS) {
6437 			EL(ha, "failed, set_firmware_option=%xh\n", rval);
6438 			return (rval);
6439 		}
6440 
6441 		(void) ql_write_remote_reg(ha, ha->ledstate.select,
6442 		    0x40002000);
6443 		(void) ql_write_remote_reg(ha, ha->ledstate.select + 4,
6444 		    0x40002000);
6445 
6446 	} else if (CFG_IST(ha, CFG_CTRL_27XX)) {
6447 		/* take control of LED */
6448 		rval = ql_get_firmware_option(ha, &mr);
6449 		if (rval != QL_SUCCESS) {
6450 			EL(ha, "failed, get_firmware_option=%xh\n", rval);
6451 			return (rval);
6452 		}
6453 
6454 		mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_LEDS);
6455 
6456 		rval = ql_set_firmware_option(ha, &mr);
6457 		if (rval != QL_SUCCESS) {
6458 			EL(ha, "failed, set_firmware_option=%xh\n", rval);
6459 			return (rval);
6460 		}
6461 
6462 		mr.mb[1] = 0xf;
6463 		mr.mb[2] = 0x230;
6464 		mr.mb[3] = 0x230;
6465 		mr.mb[4] = 0x4000;
6466 		rval = ql_led_config(ha, &mr);
6467 		if (rval != QL_SUCCESS) {
6468 			EL(ha, "failed, led_config=%xh\n", rval);
6469 			return (rval);
6470 		}
6471 	} else {
6472 		EL(ha, "unsupported HBA: %xh\n", ha->device_id);
6473 	}
6474 	ha->ledstate.flags |= LED_ACTIVE;
6475 
6476 	QL_PRINT_9(ha, "done\n");
6477 
6478 	return (rval);
6479 }
6480 
6481 /*
6482  * ql_wrapup_led
6483  *	Return LED control to the firmware
6484  *
6485  * Input:
6486  *	ha:	adapter state pointer.
6487  *
6488  * Context:
6489  *	Kernel/Interrupt context.
6490  */
6491 static int
ql_wrapup_led(ql_adapter_state_t * ha)6492 ql_wrapup_led(ql_adapter_state_t *ha)
6493 {
6494 	int		rval = QL_SUCCESS;
6495 	ql_mbx_data_t	mr;
6496 
6497 	QL_PRINT_9(ha, "started\n");
6498 
6499 
6500 	if (CFG_IST(ha, CFG_CTRL_2363 | CFG_CTRL_2425)) {
6501 		uint32_t	gpio_data;
6502 
6503 		/* Turn all LED's off */
6504 		ql_drive_led(ha, LED_ALL_OFF);
6505 
6506 		if (CFG_IST(ha, CFG_CTRL_2425)) {
6507 			/* disable the LED update mask */
6508 			gpio_data = RD32_IO_REG(ha, gpiod);
6509 			gpio_data &= ~LED_MASK_UPDATE_24;
6510 
6511 			/* write out the data */
6512 			WRT32_IO_REG(ha, gpiod, gpio_data);
6513 			/* give LED control back to the f/w */
6514 		}
6515 		rval = ql_get_firmware_option(ha, &mr);
6516 		if (rval != QL_SUCCESS) {
6517 			EL(ha, "failed, get_firmware_option=%xh\n", rval);
6518 			return (rval);
6519 		}
6520 
6521 		mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_GPIO);
6522 
6523 		rval = ql_set_firmware_option(ha, &mr);
6524 		if (rval != QL_SUCCESS) {
6525 			EL(ha, "failed, set_firmware_option=%xh\n", rval);
6526 			return (rval);
6527 		}
6528 	} else if (CFG_IST(ha, CFG_CTRL_8081)) {
6529 		rval = ql_set_led_config(ha, &ha->ledstate.cfg);
6530 
6531 	} else if (CFG_IST(ha, CFG_CTRL_2783)) {
6532 		/* give LED control back to the f/w */
6533 		rval = ql_get_firmware_option(ha, &mr);
6534 		if (rval != QL_SUCCESS) {
6535 			EL(ha, "failed, get_firmware_option=%xh\n", rval);
6536 			return (rval);
6537 		}
6538 
6539 		mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_LEDS);
6540 
6541 		rval = ql_set_firmware_option(ha, &mr);
6542 		if (rval != QL_SUCCESS) {
6543 			EL(ha, "failed, set_firmware_option=%xh\n", rval);
6544 			return (rval);
6545 		}
6546 
6547 	} else {
6548 		EL(ha, "unsupported HBA: %xh\n", ha->device_id);
6549 	}
6550 
6551 	QL_PRINT_9(ha, "done\n");
6552 
6553 	return (rval);
6554 }
6555 
6556 /*
6557  * ql_get_port_summary
6558  *	Performs EXT_SC_GET_PORT_SUMMARY subcommand. of EXT_CC_GET_DATA.
6559  *
6560  *	The EXT_IOCTL->RequestAdr points to a single
6561  *	UINT32 which identifies the device type.
6562  *
6563  * Input:
6564  *	ha:	adapter state pointer.
6565  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6566  *	mode:	flags.
6567  *
6568  * Returns:
6569  *	None, request status indicated in cmd->Status.
6570  *
6571  * Context:
6572  *	Kernel context.
6573  */
6574 static void
ql_get_port_summary(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)6575 ql_get_port_summary(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6576 {
6577 	EXT_DEVICEDATA		dd = {0};
6578 	EXT_DEVICEDATA		*uddp;
6579 	ql_link_t		*link;
6580 	ql_tgt_t		*tq;
6581 	uint32_t		rlen, dev_type, index;
6582 	int			rval = 0;
6583 	EXT_DEVICEDATAENTRY	*uddep, *ddep;
6584 
6585 	QL_PRINT_9(ha, "started\n");
6586 
6587 	ddep = &dd.EntryList[0];
6588 
6589 	/*
6590 	 * Get the type of device the requestor is looking for.
6591 	 *
6592 	 * We ignore this for now.
6593 	 */
6594 	rval = ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6595 	    (void *)&dev_type, sizeof (dev_type), mode);
6596 	if (rval != 0) {
6597 		cmd->Status = EXT_STATUS_COPY_ERR;
6598 		cmd->ResponseLen = 0;
6599 		EL(ha, "failed, ddi_copyin\n");
6600 		return;
6601 	}
6602 	/*
6603 	 * Count the number of entries to be returned. Count devices
6604 	 * that are offlline, but have been persistently bound.
6605 	 */
6606 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6607 		for (link = ha->dev[index].first; link != NULL;
6608 		    link = link->next) {
6609 			tq = link->base_address;
6610 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6611 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6612 				continue;	/* Skip this one */
6613 			}
6614 			dd.TotalDevices++;
6615 		}
6616 	}
6617 	/*
6618 	 * Compute the number of entries that can be returned
6619 	 * based upon the size of caller's response buffer.
6620 	 */
6621 	dd.ReturnListEntryCount = 0;
6622 	if (dd.TotalDevices == 0) {
6623 		rlen = sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY);
6624 	} else {
6625 		rlen = (uint32_t)(sizeof (EXT_DEVICEDATA) +
6626 		    (sizeof (EXT_DEVICEDATAENTRY) * (dd.TotalDevices - 1)));
6627 	}
6628 	if (rlen > cmd->ResponseLen) {
6629 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6630 		cmd->DetailStatus = rlen;
6631 		EL(ha, "failed, rlen > ResponseLen, rlen=%d, Len=%d\n",
6632 		    rlen, cmd->ResponseLen);
6633 		cmd->ResponseLen = 0;
6634 		return;
6635 	}
6636 	cmd->ResponseLen = 0;
6637 	uddp = (EXT_DEVICEDATA *)(uintptr_t)cmd->ResponseAdr;
6638 	uddep = &uddp->EntryList[0];
6639 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6640 		for (link = ha->dev[index].first; link != NULL;
6641 		    link = link->next) {
6642 			tq = link->base_address;
6643 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6644 			    !VALID_TARGET_ID(ha, tq->loop_id) ||
6645 			    tq->d_id.b24 == FS_MANAGEMENT_SERVER) {
6646 				continue;	/* Skip this one */
6647 			}
6648 
6649 			bzero((void *)ddep, sizeof (EXT_DEVICEDATAENTRY));
6650 
6651 			bcopy(tq->node_name, ddep->NodeWWN, 8);
6652 			bcopy(tq->port_name, ddep->PortWWN, 8);
6653 
6654 			ddep->PortID[0] = tq->d_id.b.domain;
6655 			ddep->PortID[1] = tq->d_id.b.area;
6656 			ddep->PortID[2] = tq->d_id.b.al_pa;
6657 
6658 			bcopy(tq->port_name,
6659 			    (caddr_t)&ddep->TargetAddress.Target, 8);
6660 
6661 			ddep->DeviceFlags = tq->flags;
6662 			ddep->LoopID = tq->loop_id;
6663 			QL_PRINT_9(ha, "Tgt=%lld, loop=%xh, "
6664 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x, "
6665 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6666 			    ha->instance, ddep->TargetAddress.Target,
6667 			    ddep->LoopID, ddep->NodeWWN[0], ddep->NodeWWN[1],
6668 			    ddep->NodeWWN[2], ddep->NodeWWN[3],
6669 			    ddep->NodeWWN[4], ddep->NodeWWN[5],
6670 			    ddep->NodeWWN[6], ddep->NodeWWN[7],
6671 			    ddep->PortWWN[0], ddep->PortWWN[1],
6672 			    ddep->PortWWN[2], ddep->PortWWN[3],
6673 			    ddep->PortWWN[4], ddep->PortWWN[5],
6674 			    ddep->PortWWN[6], ddep->PortWWN[7]);
6675 			rval = ddi_copyout((void *)ddep, (void *)uddep,
6676 			    sizeof (EXT_DEVICEDATAENTRY), mode);
6677 
6678 			if (rval != 0) {
6679 				cmd->Status = EXT_STATUS_COPY_ERR;
6680 				cmd->ResponseLen = 0;
6681 				EL(ha, "failed, ddi_copyout\n");
6682 				break;
6683 			}
6684 			dd.ReturnListEntryCount++;
6685 			uddep++;
6686 			cmd->ResponseLen += (uint32_t)
6687 			    sizeof (EXT_DEVICEDATAENTRY);
6688 		}
6689 	}
6690 	rval = ddi_copyout((void *)&dd, (void *)uddp,
6691 	    sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY), mode);
6692 
6693 	if (rval != 0) {
6694 		cmd->Status = EXT_STATUS_COPY_ERR;
6695 		cmd->ResponseLen = 0;
6696 		EL(ha, "failed, ddi_copyout-2\n");
6697 	} else {
6698 		cmd->ResponseLen += (uint32_t)sizeof (EXT_DEVICEDATAENTRY);
6699 		QL_PRINT_9(ha, "done\n");
6700 	}
6701 }
6702 
6703 /*
6704  * ql_get_target_id
6705  *	Performs EXT_SC_GET_TARGET_ID subcommand. of EXT_CC_GET_DATA.
6706  *
6707  * Input:
6708  *	ha:	adapter state pointer.
6709  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6710  *	mode:	flags.
6711  *
6712  * Returns:
6713  *	None, request status indicated in cmd->Status.
6714  *
6715  * Context:
6716  *	Kernel context.
6717  */
6718 static void
ql_get_target_id(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)6719 ql_get_target_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6720 {
6721 	uint32_t		rval;
6722 	uint16_t		qlnt;
6723 	EXT_DEST_ADDR		extdestaddr = {0};
6724 	uint8_t			*name;
6725 	uint8_t			wwpn[EXT_DEF_WWN_NAME_SIZE];
6726 	ql_tgt_t		*tq;
6727 
6728 	QL_PRINT_9(ha, "started\n");
6729 
6730 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6731 	    (void*)wwpn, sizeof (EXT_DEST_ADDR), mode) != 0) {
6732 		EL(ha, "failed, ddi_copyin\n");
6733 		cmd->Status = EXT_STATUS_COPY_ERR;
6734 		cmd->ResponseLen = 0;
6735 		return;
6736 	}
6737 
6738 	qlnt = QLNT_PORT;
6739 	name = wwpn;
6740 	QL_PRINT_9(ha, "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6741 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
6742 	    name[5], name[6], name[7]);
6743 
6744 	tq = ql_find_port(ha, name, qlnt);
6745 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
6746 		EL(ha, "failed, fc_port not found\n");
6747 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6748 		cmd->ResponseLen = 0;
6749 		return;
6750 	}
6751 
6752 	bcopy(tq->port_name, (caddr_t)&extdestaddr.DestAddr.ScsiAddr.Target, 8);
6753 
6754 	rval = ddi_copyout((void *)&extdestaddr,
6755 	    (void *)(uintptr_t)cmd->ResponseAdr, sizeof (EXT_DEST_ADDR), mode);
6756 	if (rval != 0) {
6757 		EL(ha, "failed, ddi_copyout\n");
6758 		cmd->Status = EXT_STATUS_COPY_ERR;
6759 		cmd->ResponseLen = 0;
6760 	}
6761 
6762 	QL_PRINT_9(ha, "done\n");
6763 }
6764 
6765 /*
6766  * ql_setup_fcache
6767  *	Populates selected flash sections into the cache
6768  *
6769  * Input:
6770  *	ha = adapter state pointer.
6771  *
6772  * Returns:
6773  *	ql local function return status code.
6774  *
6775  * Context:
6776  *	Kernel context.
6777  *
6778  * Note:
6779  *	Driver must be in stalled state prior to entering or
6780  *	add code to this function prior to calling ql_setup_flash()
6781  */
6782 int
ql_setup_fcache(ql_adapter_state_t * ha)6783 ql_setup_fcache(ql_adapter_state_t *ha)
6784 {
6785 	int		rval;
6786 	uint32_t	freadpos = 0;
6787 	uint32_t	fw_done = 0;
6788 	ql_fcache_t	*head = NULL;
6789 	ql_fcache_t	*tail = NULL;
6790 	ql_fcache_t	*ftmp;
6791 
6792 	QL_PRINT_10(ha, "started cfg=0x%llx\n", ha->cfg_flags);
6793 
6794 	/* If we already have populated it, rtn */
6795 	if (ha->fcache != NULL) {
6796 		EL(ha, "buffer already populated\n");
6797 		return (QL_SUCCESS);
6798 	}
6799 
6800 	ql_flash_nvram_defaults(ha);
6801 
6802 	if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) {
6803 		EL(ha, "unable to setup flash; rval=%xh\n", rval);
6804 		return (rval);
6805 	}
6806 
6807 	while (freadpos != 0xffffffff) {
6808 		/* Allocate & populate this node */
6809 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6810 			EL(ha, "node alloc failed\n");
6811 			rval = QL_FUNCTION_FAILED;
6812 			break;
6813 		}
6814 
6815 		/* link in the new node */
6816 		if (head == NULL) {
6817 			head = tail = ftmp;
6818 		} else {
6819 			tail->next = ftmp;
6820 			tail = ftmp;
6821 		}
6822 
6823 		/* Do the firmware node first for 24xx/25xx's */
6824 		if (fw_done == 0) {
6825 			if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
6826 				freadpos = ha->flash_fw_addr << 2;
6827 			}
6828 			fw_done = 1;
6829 		}
6830 
6831 		if ((rval = ql_dump_fcode(ha, ftmp->buf, FBUFSIZE,
6832 		    freadpos)) != QL_SUCCESS) {
6833 			EL(ha, "failed, 24xx dump_fcode"
6834 			    " pos=%xh rval=%xh\n", freadpos, rval);
6835 			rval = QL_FUNCTION_FAILED;
6836 			break;
6837 		}
6838 
6839 		/* checkout the pci data / format */
6840 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6841 			EL(ha, "flash header incorrect\n");
6842 			rval = QL_FUNCTION_FAILED;
6843 			break;
6844 		}
6845 	}
6846 
6847 	if (rval != QL_SUCCESS) {
6848 		/* release all resources we have */
6849 		ftmp = head;
6850 		while (ftmp != NULL) {
6851 			tail = ftmp->next;
6852 			kmem_free(ftmp->buf, FBUFSIZE);
6853 			kmem_free(ftmp, sizeof (ql_fcache_t));
6854 			ftmp = tail;
6855 		}
6856 
6857 		EL(ha, "failed, done\n");
6858 	} else {
6859 		ha->fcache = head;
6860 		QL_PRINT_10(ha, "done\n");
6861 	}
6862 
6863 	return (rval);
6864 }
6865 
6866 /*
6867  * ql_update_fcache
6868  *	re-populates updated flash into the fcache. If
6869  *	fcache does not exist (e.g., flash was empty/invalid on
6870  *	boot), this routine will create and the populate it.
6871  *
6872  * Input:
6873  *	ha	= adapter state pointer.
6874  *	*bpf	= Pointer to flash buffer.
6875  *	bsize	= Size of flash buffer.
6876  *
6877  * Returns:
6878  *
6879  * Context:
6880  *	Kernel context.
6881  */
6882 void
ql_update_fcache(ql_adapter_state_t * ha,uint8_t * bfp,uint32_t bsize)6883 ql_update_fcache(ql_adapter_state_t *ha, uint8_t *bfp, uint32_t bsize)
6884 {
6885 	int		rval = QL_SUCCESS;
6886 	uint32_t	freadpos = 0;
6887 	uint32_t	fw_done = 0;
6888 	ql_fcache_t	*head = NULL;
6889 	ql_fcache_t	*tail = NULL;
6890 	ql_fcache_t	*ftmp;
6891 
6892 	QL_PRINT_3(ha, "started\n");
6893 
6894 	while (freadpos != 0xffffffff) {
6895 
6896 		/* Allocate & populate this node */
6897 
6898 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6899 			EL(ha, "node alloc failed\n");
6900 			rval = QL_FUNCTION_FAILED;
6901 			break;
6902 		}
6903 
6904 		/* link in the new node */
6905 		if (head == NULL) {
6906 			head = tail = ftmp;
6907 		} else {
6908 			tail->next = ftmp;
6909 			tail = ftmp;
6910 		}
6911 
6912 		/* Do the firmware node first for 24xx's */
6913 		if (fw_done == 0) {
6914 			if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
6915 				freadpos = ha->flash_fw_addr << 2;
6916 			}
6917 			fw_done = 1;
6918 		}
6919 
6920 		/* read in first FBUFSIZE bytes of this flash section */
6921 		if (freadpos + FBUFSIZE > bsize) {
6922 			EL(ha, "passed buffer too small; fr=%xh, bsize=%xh\n",
6923 			    freadpos, bsize);
6924 			rval = QL_FUNCTION_FAILED;
6925 			break;
6926 		}
6927 		bcopy(bfp + freadpos, ftmp->buf, FBUFSIZE);
6928 
6929 		/* checkout the pci data / format */
6930 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6931 			EL(ha, "flash header incorrect\n");
6932 			rval = QL_FUNCTION_FAILED;
6933 			break;
6934 		}
6935 	}
6936 
6937 	if (rval != QL_SUCCESS) {
6938 		/*
6939 		 * release all resources we have
6940 		 */
6941 		ql_fcache_rel(head);
6942 		EL(ha, "failed, done\n");
6943 	} else {
6944 		/*
6945 		 * Release previous fcache resources and update with new
6946 		 */
6947 		ql_fcache_rel(ha->fcache);
6948 		ha->fcache = head;
6949 
6950 		QL_PRINT_3(ha, "done\n");
6951 	}
6952 }
6953 
6954 /*
6955  * ql_setup_fnode
6956  *	Allocates fcache node
6957  *
6958  * Input:
6959  *	ha = adapter state pointer.
6960  *	node = point to allocated fcache node (NULL = failed)
6961  *
6962  * Returns:
6963  *
6964  * Context:
6965  *	Kernel context.
6966  *
6967  * Note:
6968  *	Driver must be in stalled state prior to entering or
6969  *	add code to this function prior to calling ql_setup_flash()
6970  */
6971 static ql_fcache_t *
ql_setup_fnode(ql_adapter_state_t * ha)6972 ql_setup_fnode(ql_adapter_state_t *ha)
6973 {
6974 	ql_fcache_t	*fnode = NULL;
6975 
6976 	if ((fnode = (ql_fcache_t *)(kmem_zalloc(sizeof (ql_fcache_t),
6977 	    KM_SLEEP))) == NULL) {
6978 		EL(ha, "fnode alloc failed\n");
6979 		fnode = NULL;
6980 	} else if ((fnode->buf = (uint8_t *)(kmem_zalloc(FBUFSIZE,
6981 	    KM_SLEEP))) == NULL) {
6982 		EL(ha, "buf alloc failed\n");
6983 		kmem_free(fnode, sizeof (ql_fcache_t));
6984 		fnode = NULL;
6985 	} else {
6986 		fnode->buflen = FBUFSIZE;
6987 	}
6988 
6989 	return (fnode);
6990 }
6991 
6992 /*
6993  * ql_fcache_rel
6994  *	Releases the fcache resources
6995  *
6996  * Input:
6997  *	ha	= adapter state pointer.
6998  *	head	= Pointer to fcache linked list
6999  *
7000  * Returns:
7001  *
7002  * Context:
7003  *	Kernel context.
7004  *
7005  */
7006 void
ql_fcache_rel(ql_fcache_t * head)7007 ql_fcache_rel(ql_fcache_t *head)
7008 {
7009 	ql_fcache_t	*ftmp = head;
7010 	ql_fcache_t	*tail;
7011 
7012 	/* release all resources we have */
7013 	while (ftmp != NULL) {
7014 		tail = ftmp->next;
7015 		kmem_free(ftmp->buf, FBUFSIZE);
7016 		kmem_free(ftmp, sizeof (ql_fcache_t));
7017 		ftmp = tail;
7018 	}
7019 }
7020 
7021 /*
7022  * ql_update_flash_caches
7023  *	Updates driver flash caches
7024  *
7025  * Input:
7026  *	ha:	adapter state pointer.
7027  *
7028  * Context:
7029  *	Kernel context.
7030  */
7031 static void
ql_update_flash_caches(ql_adapter_state_t * ha)7032 ql_update_flash_caches(ql_adapter_state_t *ha)
7033 {
7034 	uint32_t		len;
7035 	ql_link_t		*link;
7036 	ql_adapter_state_t	*ha2;
7037 
7038 	QL_PRINT_3(ha, "started\n");
7039 
7040 	/* Get base path length. */
7041 	for (len = (uint32_t)strlen(ha->devpath); len; len--) {
7042 		if (ha->devpath[len] == ',' ||
7043 		    ha->devpath[len] == '@') {
7044 			break;
7045 		}
7046 	}
7047 
7048 	/* Reset fcache on all adapter instances. */
7049 	for (link = ql_hba.first; link != NULL; link = link->next) {
7050 		ha2 = link->base_address;
7051 
7052 		if (strncmp(ha->devpath, ha2->devpath, len) != 0) {
7053 			continue;
7054 		}
7055 
7056 		ql_fcache_rel(ha2->fcache);
7057 		ha2->fcache = NULL;
7058 
7059 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
7060 			if (ha2->vcache != NULL) {
7061 				kmem_free(ha2->vcache, QL_24XX_VPD_SIZE);
7062 				ha2->vcache = NULL;
7063 			}
7064 		}
7065 
7066 		(void) ql_setup_fcache(ha2);
7067 	}
7068 
7069 	QL_PRINT_3(ha, "done\n");
7070 }
7071 
7072 /*
7073  * ql_get_fbuf
7074  *	Search the fcache list for the type specified
7075  *
7076  * Input:
7077  *	fptr	= Pointer to fcache linked list
7078  *	ftype	= Type of image to be returned.
7079  *
7080  * Returns:
7081  *	Pointer to ql_fcache_t.
7082  *	NULL means not found.
7083  *
7084  * Context:
7085  *	Kernel context.
7086  *
7087  *
7088  */
7089 ql_fcache_t *
ql_get_fbuf(ql_fcache_t * fptr,uint32_t ftype)7090 ql_get_fbuf(ql_fcache_t *fptr, uint32_t ftype)
7091 {
7092 	while (fptr != NULL) {
7093 		/* does this image meet criteria? */
7094 		if (ftype & fptr->type) {
7095 			break;
7096 		}
7097 		fptr = fptr->next;
7098 	}
7099 	return (fptr);
7100 }
7101 
7102 /*
7103  * ql_check_pci
7104  *
7105  *	checks the passed buffer for a valid pci signature and
7106  *	expected (and in range) pci length values.
7107  *
7108  *	For firmware type, a pci header is added since the image in
7109  *	the flash does not have one (!!!).
7110  *
7111  *	On successful pci check, nextpos adjusted to next pci header.
7112  *
7113  * Returns:
7114  *	-1 --> last pci image
7115  *	0 --> pci header valid
7116  *	1 --> pci header invalid.
7117  *
7118  * Context:
7119  *	Kernel context.
7120  */
7121 static int
ql_check_pci(ql_adapter_state_t * ha,ql_fcache_t * fcache,uint32_t * nextpos)7122 ql_check_pci(ql_adapter_state_t *ha, ql_fcache_t *fcache, uint32_t *nextpos)
7123 {
7124 	pci_header_t	*pcih;
7125 	pci_data_t	*pcid;
7126 	uint32_t	doff;
7127 	uint8_t		*pciinfo;
7128 
7129 	QL_PRINT_3(ha, "started\n");
7130 
7131 	if (fcache != NULL) {
7132 		pciinfo = fcache->buf;
7133 	} else {
7134 		EL(ha, "failed, null fcache ptr passed\n");
7135 		return (1);
7136 	}
7137 
7138 	if (pciinfo == NULL) {
7139 		EL(ha, "failed, null pciinfo ptr passed\n");
7140 		return (1);
7141 	}
7142 
7143 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
7144 		caddr_t	bufp;
7145 		uint_t	len;
7146 
7147 		if (pciinfo[0] != SBUS_CODE_FCODE) {
7148 			EL(ha, "failed, unable to detect sbus fcode\n");
7149 			return (1);
7150 		}
7151 		fcache->type = FTYPE_FCODE;
7152 
7153 		/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
7154 		if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
7155 		    PROP_LEN_AND_VAL_ALLOC | DDI_PROP_DONTPASS |
7156 		    DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
7157 		    (int *)&len) == DDI_PROP_SUCCESS) {
7158 
7159 			(void) snprintf(fcache->verstr,
7160 			    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
7161 			kmem_free(bufp, len);
7162 		}
7163 
7164 		*nextpos = 0xffffffff;
7165 
7166 		QL_PRINT_3(ha, "CFG_SBUS_CARD, done\n");
7167 
7168 		return (0);
7169 	}
7170 
7171 	if (*nextpos == ha->flash_fw_addr << 2) {
7172 
7173 		pci_header_t	fwh = {0};
7174 		pci_data_t	fwd = {0};
7175 		uint8_t		*buf, *bufp;
7176 
7177 		/*
7178 		 * Build a pci header for the firmware module
7179 		 */
7180 		if ((buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, KM_SLEEP))) ==
7181 		    NULL) {
7182 			EL(ha, "failed, unable to allocate buffer\n");
7183 			return (1);
7184 		}
7185 
7186 		fwh.signature[0] = PCI_HEADER0;
7187 		fwh.signature[1] = PCI_HEADER1;
7188 		fwh.dataoffset[0] = LSB(sizeof (pci_header_t));
7189 		fwh.dataoffset[1] = MSB(sizeof (pci_header_t));
7190 
7191 		fwd.signature[0] = 'P';
7192 		fwd.signature[1] = 'C';
7193 		fwd.signature[2] = 'I';
7194 		fwd.signature[3] = 'R';
7195 		fwd.codetype = PCI_CODE_FW;
7196 		fwd.pcidatalen[0] = LSB(sizeof (pci_data_t));
7197 		fwd.pcidatalen[1] = MSB(sizeof (pci_data_t));
7198 
7199 		bufp = buf;
7200 		bcopy(&fwh, bufp, sizeof (pci_header_t));
7201 		bufp += sizeof (pci_header_t);
7202 		bcopy(&fwd, bufp, sizeof (pci_data_t));
7203 		bufp += sizeof (pci_data_t);
7204 
7205 		bcopy(fcache->buf, bufp, (FBUFSIZE - sizeof (pci_header_t) -
7206 		    sizeof (pci_data_t)));
7207 		bcopy(buf, fcache->buf, FBUFSIZE);
7208 
7209 		fcache->type = FTYPE_FW;
7210 
7211 		(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
7212 		    "%d.%02d.%02d", fcache->buf[19], fcache->buf[23],
7213 		    fcache->buf[27]);
7214 
7215 		*nextpos = ha->boot_code_addr << 2;
7216 		kmem_free(buf, FBUFSIZE);
7217 
7218 		QL_PRINT_3(ha, "FTYPE_FW, done\n");
7219 
7220 		return (0);
7221 	}
7222 
7223 	/* get to the pci header image length */
7224 	pcih = (pci_header_t *)pciinfo;
7225 
7226 	doff = pcih->dataoffset[0] | (pcih->dataoffset[1] << 8);
7227 
7228 	/* some header section sanity check */
7229 	if (pcih->signature[0] != PCI_HEADER0 ||
7230 	    pcih->signature[1] != PCI_HEADER1 || doff > 50) {
7231 		EL(ha, "buffer format error: s0=%xh, s1=%xh, off=%xh\n",
7232 		    pcih->signature[0], pcih->signature[1], doff);
7233 		return (1);
7234 	}
7235 
7236 	pcid = (pci_data_t *)(pciinfo + doff);
7237 
7238 	/* a slight sanity data section check */
7239 	if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' ||
7240 	    pcid->signature[2] != 'I' || pcid->signature[3] != 'R') {
7241 		EL(ha, "failed, data sig mismatch!\n");
7242 		return (1);
7243 	}
7244 
7245 	if (pcid->indicator == PCI_IND_LAST_IMAGE) {
7246 		QL_PRINT_3(ha, "last image\n");
7247 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
7248 			ql_flash_layout_table(ha, *nextpos +
7249 			    (pcid->imagelength[0] | (pcid->imagelength[1] <<
7250 			    8)) * PCI_SECTOR_SIZE);
7251 			(void) ql_24xx_flash_desc(ha);
7252 		}
7253 		*nextpos = 0xffffffff;
7254 	} else {
7255 		/* adjust the next flash read start position */
7256 		*nextpos += (pcid->imagelength[0] |
7257 		    (pcid->imagelength[1] << 8)) * PCI_SECTOR_SIZE;
7258 	}
7259 
7260 	switch (pcid->codetype) {
7261 	case PCI_CODE_X86PC:
7262 		fcache->type = FTYPE_BIOS;
7263 		break;
7264 	case PCI_CODE_FCODE:
7265 		fcache->type = FTYPE_FCODE;
7266 		break;
7267 	case PCI_CODE_EFI:
7268 		fcache->type = FTYPE_EFI;
7269 		break;
7270 	case PCI_CODE_HPPA:
7271 		fcache->type = FTYPE_HPPA;
7272 		break;
7273 	default:
7274 		fcache->type = FTYPE_UNKNOWN;
7275 		break;
7276 	}
7277 
7278 	(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
7279 	    "%d.%02d", pcid->revisionlevel[1], pcid->revisionlevel[0]);
7280 
7281 	QL_PRINT_3(ha, "done\n");
7282 
7283 	return (0);
7284 }
7285 
7286 /*
7287  * ql_flash_layout_table
7288  *	Obtains flash addresses from table
7289  *
7290  * Input:
7291  *	ha:		adapter state pointer.
7292  *	flt_paddr:	flash layout pointer address.
7293  *
7294  * Context:
7295  *	Kernel context.
7296  */
7297 static void
ql_flash_layout_table(ql_adapter_state_t * ha,uint32_t flt_paddr)7298 ql_flash_layout_table(ql_adapter_state_t *ha, uint32_t flt_paddr)
7299 {
7300 	ql_flt_ptr_t	*fptr;
7301 	uint8_t		*bp;
7302 	int		rval;
7303 	uint32_t	len, faddr, cnt;
7304 	uint16_t	chksum, w16;
7305 
7306 	QL_PRINT_9(ha, "started\n");
7307 
7308 	/* Process flash layout table header */
7309 	len = sizeof (ql_flt_ptr_t);
7310 	if ((bp = kmem_zalloc(len, KM_SLEEP)) == NULL) {
7311 		EL(ha, "kmem_zalloc=null\n");
7312 		return;
7313 	}
7314 
7315 	/* Process pointer to flash layout table */
7316 	if ((rval = ql_dump_fcode(ha, bp, len, flt_paddr)) != QL_SUCCESS) {
7317 		EL(ha, "fptr dump_flash pos=%xh, status=%xh\n", flt_paddr,
7318 		    rval);
7319 		kmem_free(bp, len);
7320 		return;
7321 	}
7322 	fptr = (ql_flt_ptr_t *)bp;
7323 
7324 	/* Verify pointer to flash layout table. */
7325 	for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
7326 		w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
7327 		chksum += w16;
7328 	}
7329 	if (chksum != 0 || fptr->sig[0] != 'Q' || fptr->sig[1] != 'F' ||
7330 	    fptr->sig[2] != 'L' || fptr->sig[3] != 'T') {
7331 		EL(ha, "ptr chksum=%xh, sig=%c%c%c%c \n",
7332 		    chksum, fptr->sig[0],
7333 		    fptr->sig[1], fptr->sig[2], fptr->sig[3]);
7334 		kmem_free(bp, len);
7335 		return;
7336 	}
7337 	faddr = CHAR_TO_LONG(fptr->addr[0], fptr->addr[1], fptr->addr[2],
7338 	    fptr->addr[3]);
7339 
7340 	kmem_free(bp, len);
7341 
7342 	ql_process_flt(ha, faddr);
7343 
7344 	QL_PRINT_9(ha, "done\n");
7345 }
7346 
7347 /*
7348  * ql_process_flt
7349  *	Obtains flash addresses from flash layout table
7350  *
7351  * Input:
7352  *	ha:	adapter state pointer.
7353  *	faddr:	flash layout table byte address.
7354  *
7355  * Context:
7356  *	Kernel context.
7357  */
7358 static void
ql_process_flt(ql_adapter_state_t * ha,uint32_t faddr)7359 ql_process_flt(ql_adapter_state_t *ha, uint32_t faddr)
7360 {
7361 	ql_flt_hdr_t	*fhdr;
7362 	ql_flt_region_t	*frgn;
7363 	uint8_t		*bp, *eaddr, nv_rg, vpd_rg;
7364 	int		rval;
7365 	uint32_t	len, cnt, fe_addr;
7366 	uint16_t	chksum, w16;
7367 
7368 	QL_PRINT_9(ha, "started faddr=%xh\n", faddr);
7369 
7370 	/* Process flash layout table header */
7371 	if ((bp = kmem_zalloc(FLASH_LAYOUT_TABLE_SIZE, KM_SLEEP)) == NULL) {
7372 		EL(ha, "kmem_zalloc=null\n");
7373 		return;
7374 	}
7375 	fhdr = (ql_flt_hdr_t *)bp;
7376 
7377 	/* Process flash layout table. */
7378 	if ((rval = ql_dump_fcode(ha, bp, FLASH_LAYOUT_TABLE_SIZE, faddr)) !=
7379 	    QL_SUCCESS) {
7380 		EL(ha, "fhdr dump_flash pos=%xh, status=%xh\n", faddr, rval);
7381 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7382 		return;
7383 	}
7384 
7385 	/* Verify flash layout table. */
7386 	len = (uint32_t)(CHAR_TO_SHORT(fhdr->len[0], fhdr->len[1]) +
7387 	    sizeof (ql_flt_hdr_t) + sizeof (ql_flt_region_t));
7388 	if (len > FLASH_LAYOUT_TABLE_SIZE) {
7389 		chksum = 0xffff;
7390 	} else {
7391 		for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
7392 			w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
7393 			chksum += w16;
7394 		}
7395 	}
7396 	w16 = CHAR_TO_SHORT(fhdr->version[0], fhdr->version[1]);
7397 	if (chksum != 0 || w16 != 1) {
7398 		EL(ha, "table chksum=%xh, version=%d\n", chksum, w16);
7399 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7400 		return;
7401 	}
7402 	eaddr = bp + len;
7403 
7404 	/* Process Function/Port Configuration Map. */
7405 	nv_rg = vpd_rg = 0;
7406 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
7407 		uint16_t	i;
7408 		uint8_t		*mbp = eaddr;
7409 		ql_fp_cfg_map_t	*cmp = (ql_fp_cfg_map_t *)mbp;
7410 
7411 		len = (uint32_t)(CHAR_TO_SHORT(cmp->hdr.len[0],
7412 		    cmp->hdr.len[1]));
7413 		if (len > FLASH_LAYOUT_TABLE_SIZE) {
7414 			chksum = 0xffff;
7415 		} else {
7416 			for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
7417 				w16 = (uint16_t)CHAR_TO_SHORT(mbp[cnt],
7418 				    mbp[cnt + 1]);
7419 				chksum += w16;
7420 			}
7421 		}
7422 		w16 = CHAR_TO_SHORT(cmp->hdr.version[0], cmp->hdr.version[1]);
7423 		if (chksum != 0 || w16 != 1 ||
7424 		    cmp->hdr.Signature[0] != 'F' ||
7425 		    cmp->hdr.Signature[1] != 'P' ||
7426 		    cmp->hdr.Signature[2] != 'C' ||
7427 		    cmp->hdr.Signature[3] != 'M') {
7428 			EL(ha, "cfg_map chksum=%xh, version=%d, "
7429 			    "sig=%c%c%c%c \n", chksum, w16,
7430 			    cmp->hdr.Signature[0], cmp->hdr.Signature[1],
7431 			    cmp->hdr.Signature[2], cmp->hdr.Signature[3]);
7432 		} else {
7433 			cnt = (uint16_t)
7434 			    (CHAR_TO_SHORT(cmp->hdr.NumberEntries[0],
7435 			    cmp->hdr.NumberEntries[1]));
7436 			/* Locate entry for function. */
7437 			for (i = 0; i < cnt; i++) {
7438 				if (cmp->cfg[i].FunctionType == FT_FC &&
7439 				    cmp->cfg[i].FunctionNumber[0] ==
7440 				    ha->pci_function_number &&
7441 				    cmp->cfg[i].FunctionNumber[1] == 0) {
7442 					nv_rg = cmp->cfg[i].ConfigRegion;
7443 					vpd_rg = cmp->cfg[i].VpdRegion;
7444 					break;
7445 				}
7446 			}
7447 
7448 			if (nv_rg == 0 || vpd_rg == 0) {
7449 				EL(ha, "cfg_map nv_rg=%d, vpd_rg=%d\n", nv_rg,
7450 				    vpd_rg);
7451 				nv_rg = vpd_rg = 0;
7452 			}
7453 		}
7454 	}
7455 
7456 	/* Process flash layout table regions */
7457 	for (frgn = (ql_flt_region_t *)(bp + sizeof (ql_flt_hdr_t));
7458 	    (uint8_t *)frgn < eaddr; frgn++) {
7459 		faddr = CHAR_TO_LONG(frgn->beg_addr[0], frgn->beg_addr[1],
7460 		    frgn->beg_addr[2], frgn->beg_addr[3]);
7461 		faddr >>= 2;
7462 		fe_addr = CHAR_TO_LONG(frgn->end_addr[0], frgn->end_addr[1],
7463 		    frgn->end_addr[2], frgn->end_addr[3]);
7464 		fe_addr >>= 2;
7465 
7466 		switch (frgn->region) {
7467 		case FLASH_8021_BOOTLOADER_REGION:
7468 			ha->bootloader_addr = faddr;
7469 			ha->bootloader_size = (fe_addr - faddr) + 1;
7470 			QL_PRINT_9(ha, "bootloader_addr=%xh, "
7471 			    "size=%xh\n", faddr,
7472 			    ha->bootloader_size);
7473 			break;
7474 		case FLASH_FW_REGION:
7475 		case FLASH_8021_FW_REGION:
7476 			ha->flash_fw_addr = faddr;
7477 			ha->flash_fw_size = (fe_addr - faddr) + 1;
7478 			QL_PRINT_9(ha, "flash_fw_addr=%xh, "
7479 			    "size=%xh\n", faddr,
7480 			    ha->flash_fw_size);
7481 			break;
7482 		case FLASH_GOLDEN_FW_REGION:
7483 		case FLASH_8021_GOLDEN_FW_REGION:
7484 			ha->flash_golden_fw_addr = faddr;
7485 			QL_PRINT_9(ha, "flash_golden_fw_addr=%xh\n",
7486 			    ha->instance, faddr);
7487 			break;
7488 		case FLASH_8021_VPD_REGION:
7489 			if (!vpd_rg || vpd_rg == FLASH_8021_VPD_REGION) {
7490 				ha->flash_vpd_addr = faddr;
7491 				QL_PRINT_9(ha, "8021_flash_vpd_"
7492 				    "addr=%xh\n", faddr);
7493 			}
7494 			break;
7495 		case FLASH_VPD_0_REGION:
7496 			if (vpd_rg) {
7497 				if (vpd_rg == FLASH_VPD_0_REGION) {
7498 					ha->flash_vpd_addr = faddr;
7499 					QL_PRINT_9(ha, "vpd_rg  "
7500 					    "flash_vpd_addr=%xh\n",
7501 					    ha->instance, faddr);
7502 				}
7503 			} else if (ha->function_number == 0 &&
7504 			    !(CFG_IST(ha, CFG_CTRL_82XX))) {
7505 				ha->flash_vpd_addr = faddr;
7506 				QL_PRINT_9(ha, "flash_vpd_addr=%xh"
7507 				    "\n", faddr);
7508 			}
7509 			break;
7510 		case FLASH_NVRAM_0_REGION:
7511 			if (nv_rg) {
7512 				if (nv_rg == FLASH_NVRAM_0_REGION) {
7513 					ADAPTER_STATE_LOCK(ha);
7514 					ha->function_number = 0;
7515 					ADAPTER_STATE_UNLOCK(ha);
7516 					ha->flash_nvram_addr = faddr;
7517 					QL_PRINT_9(ha, "nv_rg "
7518 					    "flash_nvram_addr=%xh\n",
7519 					    ha->instance, faddr);
7520 				}
7521 			} else if (ha->function_number == 0) {
7522 				ha->flash_nvram_addr = faddr;
7523 				QL_PRINT_9(ha, "flash_nvram_addr="
7524 				    "%xh\n", faddr);
7525 			}
7526 			break;
7527 		case FLASH_VPD_1_REGION:
7528 			if (vpd_rg) {
7529 				if (vpd_rg == FLASH_VPD_1_REGION) {
7530 					ha->flash_vpd_addr = faddr;
7531 					QL_PRINT_9(ha, "vpd_rg "
7532 					    "flash_vpd_addr=%xh\n",
7533 					    ha->instance, faddr);
7534 				}
7535 			} else if (ha->function_number &&
7536 			    !(CFG_IST(ha, CFG_CTRL_82XX))) {
7537 				ha->flash_vpd_addr = faddr;
7538 				QL_PRINT_9(ha, "flash_vpd_addr=%xh"
7539 				    "\n", faddr);
7540 			}
7541 			break;
7542 		case FLASH_NVRAM_1_REGION:
7543 			if (nv_rg) {
7544 				if (nv_rg == FLASH_NVRAM_1_REGION) {
7545 					ADAPTER_STATE_LOCK(ha);
7546 					ha->function_number = 1;
7547 					ADAPTER_STATE_UNLOCK(ha);
7548 					ha->flash_nvram_addr = faddr;
7549 					QL_PRINT_9(ha, "nv_rg "
7550 					    "flash_nvram_addr=%xh\n",
7551 					    ha->instance, faddr);
7552 				}
7553 			} else if (ha->function_number) {
7554 				ha->flash_nvram_addr = faddr;
7555 				QL_PRINT_9(ha, "flash_nvram_addr="
7556 				    "%xh\n", faddr);
7557 			}
7558 			break;
7559 		case FLASH_DESC_TABLE_REGION:
7560 			if (!(CFG_IST(ha, CFG_CTRL_82XX))) {
7561 				ha->flash_desc_addr = faddr;
7562 				QL_PRINT_9(ha, "flash_desc_addr="
7563 				    "%xh\n", faddr);
7564 			}
7565 			break;
7566 		case FLASH_ERROR_LOG_0_REGION:
7567 			if (ha->function_number == 0) {
7568 				ha->flash_errlog_start = faddr;
7569 				QL_PRINT_9(ha, "flash_errlog_addr="
7570 				    "%xh\n", faddr);
7571 			}
7572 			break;
7573 		case FLASH_ERROR_LOG_1_REGION:
7574 			if (ha->function_number) {
7575 				ha->flash_errlog_start = faddr;
7576 				QL_PRINT_9(ha, "flash_errlog_addr="
7577 				    "%xh\n", faddr);
7578 			}
7579 			break;
7580 		default:
7581 			break;
7582 		}
7583 	}
7584 	kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7585 
7586 	QL_PRINT_9(ha, "done\n");
7587 }
7588 
7589 /*
7590  * ql_flash_nvram_defaults
7591  *	Flash default addresses.
7592  *
7593  * Input:
7594  *	ha:		adapter state pointer.
7595  *
7596  * Returns:
7597  *	ql local function return status code.
7598  *
7599  * Context:
7600  *	Kernel context.
7601  */
7602 static void
ql_flash_nvram_defaults(ql_adapter_state_t * ha)7603 ql_flash_nvram_defaults(ql_adapter_state_t *ha)
7604 {
7605 	QL_PRINT_10(ha, "started\n");
7606 
7607 	if (ha->function_number == 3) {
7608 		if (CFG_IST(ha, CFG_CTRL_27XX)) {
7609 			ha->flash_nvram_addr = NVRAM_2700_FUNC3_ADDR;
7610 			ha->flash_vpd_addr = VPD_2700_FUNC3_ADDR;
7611 			ha->ledstate.select = BEACON_2700_FUNC3_ADDR;
7612 			ha->flash_data_addr = FLASH_2700_DATA_ADDR;
7613 			ha->flash_desc_addr = FLASH_2700_DESCRIPTOR_TABLE;
7614 			ha->flash_fw_addr = FLASH_2700_FIRMWARE_ADDR;
7615 			ha->flash_fw_size = FLASH_2700_FIRMWARE_SIZE;
7616 			ha->boot_code_addr = FLASH_2700_BOOT_CODE_ADDR;
7617 		} else {
7618 			EL(ha, "unassigned flash fn%d addr: %x\n",
7619 			    ha->function_number, ha->device_id);
7620 		}
7621 	} else if (ha->function_number == 2) {
7622 		if (CFG_IST(ha, CFG_CTRL_27XX)) {
7623 			ha->flash_nvram_addr = NVRAM_2700_FUNC2_ADDR;
7624 			ha->flash_vpd_addr = VPD_2700_FUNC2_ADDR;
7625 			ha->ledstate.select = BEACON_2700_FUNC2_ADDR;
7626 			ha->flash_data_addr = FLASH_2700_DATA_ADDR;
7627 			ha->flash_desc_addr = FLASH_2700_DESCRIPTOR_TABLE;
7628 			ha->flash_fw_addr = FLASH_2700_FIRMWARE_ADDR;
7629 			ha->flash_fw_size = FLASH_2700_FIRMWARE_SIZE;
7630 			ha->boot_code_addr = FLASH_2700_BOOT_CODE_ADDR;
7631 		} else {
7632 			EL(ha, "unassigned flash fn%d addr: %x\n",
7633 			    ha->function_number, ha->device_id);
7634 		}
7635 	} else if (ha->function_number == 1) {
7636 		if (CFG_IST(ha, CFG_CTRL_23XX) ||
7637 		    (CFG_IST(ha, CFG_CTRL_63XX))) {
7638 			ha->flash_nvram_addr = NVRAM_2300_FUNC1_ADDR;
7639 			ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
7640 			ha->boot_code_addr = FLASH_2300_BOOT_CODE_ADDR;
7641 		} else if (CFG_IST(ha, CFG_CTRL_24XX)) {
7642 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7643 			ha->flash_nvram_addr = NVRAM_2400_FUNC1_ADDR;
7644 			ha->flash_vpd_addr = VPD_2400_FUNC1_ADDR;
7645 			ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_1;
7646 			ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7647 			ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7648 			ha->boot_code_addr = FLASH_2400_BOOT_CODE_ADDR;
7649 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7650 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7651 			ha->flash_nvram_addr = NVRAM_2500_FUNC1_ADDR;
7652 			ha->flash_vpd_addr = VPD_2500_FUNC1_ADDR;
7653 			ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_1;
7654 			ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7655 			ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7656 			ha->boot_code_addr = FLASH_2500_BOOT_CODE_ADDR;
7657 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7658 			ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7659 			ha->flash_nvram_addr = NVRAM_8100_FUNC1_ADDR;
7660 			ha->flash_vpd_addr = VPD_8100_FUNC1_ADDR;
7661 			ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_1;
7662 			ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7663 			ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7664 			ha->boot_code_addr = FLASH_8100_BOOT_CODE_ADDR;
7665 		} else if (CFG_IST(ha, CFG_CTRL_82XX)) {
7666 			ha->flash_data_addr = 0;
7667 			ha->flash_nvram_addr = NVRAM_8021_FUNC1_ADDR;
7668 			ha->flash_vpd_addr = VPD_8021_FUNC1_ADDR;
7669 			ha->flash_errlog_start = 0;
7670 			ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE;
7671 			ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR;
7672 			ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE;
7673 			ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR;
7674 			ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE;
7675 			ha->boot_code_addr = FLASH_8021_BOOT_CODE_ADDR;
7676 		} else if (CFG_IST(ha, CFG_CTRL_83XX)) {
7677 			ha->flash_nvram_addr = NVRAM_8300_FC_FUNC1_ADDR;
7678 			ha->flash_vpd_addr = VPD_8300_FC_FUNC1_ADDR;
7679 			ha->ledstate.select = BEACON_8300_FC_FUNC1_ADDR;
7680 			ha->flash_errlog_start = FLASH_8300_ERRLOG_START_ADDR_1;
7681 			ha->flash_data_addr = FLASH_8300_DATA_ADDR;
7682 			ha->flash_desc_addr = FLASH_8300_DESCRIPTOR_TABLE;
7683 			ha->flash_fw_addr = FLASH_8300_FC_FIRMWARE_ADDR;
7684 			ha->flash_fw_size = FLASH_8300_FIRMWARE_SIZE;
7685 			ha->bootloader_addr = FLASH_8300_BOOTLOADER_ADDR;
7686 			ha->bootloader_size = FLASH_8300_BOOTLOADER_SIZE;
7687 			ha->boot_code_addr = FLASH_8300_BOOT_CODE_ADDR;
7688 		} else if (CFG_IST(ha, CFG_CTRL_27XX)) {
7689 			ha->flash_nvram_addr = NVRAM_2700_FUNC1_ADDR;
7690 			ha->flash_vpd_addr = VPD_2700_FUNC1_ADDR;
7691 			ha->ledstate.select = BEACON_2700_FUNC1_ADDR;
7692 			ha->flash_data_addr = FLASH_2700_DATA_ADDR;
7693 			ha->flash_desc_addr = FLASH_2700_DESCRIPTOR_TABLE;
7694 			ha->flash_fw_addr = FLASH_2700_FIRMWARE_ADDR;
7695 			ha->flash_fw_size = FLASH_2700_FIRMWARE_SIZE;
7696 			ha->boot_code_addr = FLASH_2700_BOOT_CODE_ADDR;
7697 		} else {
7698 			EL(ha, "unassigned flash fn%d addr: %x\n",
7699 			    ha->function_number, ha->device_id);
7700 		}
7701 	} else if (ha->function_number == 0) {
7702 		if (CFG_IST(ha, CFG_CTRL_22XX)) {
7703 			ha->flash_nvram_addr = NVRAM_2200_FUNC0_ADDR;
7704 			ha->flash_fw_addr = FLASH_2200_FIRMWARE_ADDR;
7705 			ha->boot_code_addr = FLASH_2200_BOOT_CODE_ADDR;
7706 		} else if (CFG_IST(ha, CFG_CTRL_23XX) ||
7707 		    (CFG_IST(ha, CFG_CTRL_63XX))) {
7708 			ha->flash_nvram_addr = NVRAM_2300_FUNC0_ADDR;
7709 			ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
7710 			ha->boot_code_addr = FLASH_2300_BOOT_CODE_ADDR;
7711 		} else if (CFG_IST(ha, CFG_CTRL_24XX)) {
7712 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7713 			ha->flash_nvram_addr = NVRAM_2400_FUNC0_ADDR;
7714 			ha->flash_vpd_addr = VPD_2400_FUNC0_ADDR;
7715 			ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_0;
7716 			ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7717 			ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7718 			ha->boot_code_addr = FLASH_2400_BOOT_CODE_ADDR;
7719 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7720 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7721 			ha->flash_nvram_addr = NVRAM_2500_FUNC0_ADDR;
7722 			ha->flash_vpd_addr = VPD_2500_FUNC0_ADDR;
7723 			ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_0;
7724 			ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7725 			ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7726 			ha->boot_code_addr = FLASH_2500_BOOT_CODE_ADDR;
7727 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7728 			ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7729 			ha->flash_nvram_addr = NVRAM_8100_FUNC0_ADDR;
7730 			ha->flash_vpd_addr = VPD_8100_FUNC0_ADDR;
7731 			ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_0;
7732 			ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7733 			ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7734 			ha->boot_code_addr = FLASH_8100_BOOT_CODE_ADDR;
7735 		} else if (CFG_IST(ha, CFG_CTRL_82XX)) {
7736 			ha->flash_data_addr = 0;
7737 			ha->flash_nvram_addr = NVRAM_8021_FUNC0_ADDR;
7738 			ha->flash_vpd_addr = VPD_8021_FUNC0_ADDR;
7739 			ha->flash_errlog_start = 0;
7740 			ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE;
7741 			ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR;
7742 			ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE;
7743 			ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR;
7744 			ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE;
7745 			ha->boot_code_addr = FLASH_8021_BOOT_CODE_ADDR;
7746 		} else if (CFG_IST(ha, CFG_CTRL_83XX)) {
7747 			ha->flash_nvram_addr = NVRAM_8300_FC_FUNC0_ADDR;
7748 			ha->flash_vpd_addr = VPD_8300_FC_FUNC0_ADDR;
7749 			ha->ledstate.select = BEACON_8300_FCOE_FUNC0_ADDR;
7750 			ha->flash_errlog_start = FLASH_8300_ERRLOG_START_ADDR_0;
7751 			ha->flash_data_addr = FLASH_8300_DATA_ADDR;
7752 			ha->flash_desc_addr = FLASH_8300_DESCRIPTOR_TABLE;
7753 			ha->flash_fw_addr = FLASH_8300_FC_FIRMWARE_ADDR;
7754 			ha->flash_fw_size = FLASH_8300_FIRMWARE_SIZE;
7755 			ha->bootloader_addr = FLASH_8300_BOOTLOADER_ADDR;
7756 			ha->bootloader_size = FLASH_8300_BOOTLOADER_SIZE;
7757 			ha->boot_code_addr = FLASH_8300_BOOT_CODE_ADDR;
7758 		} else if (CFG_IST(ha, CFG_CTRL_27XX)) {
7759 			ha->flash_nvram_addr = NVRAM_2700_FUNC0_ADDR;
7760 			ha->flash_vpd_addr = VPD_2700_FUNC0_ADDR;
7761 			ha->ledstate.select = BEACON_2700_FUNC0_ADDR;
7762 			ha->flash_data_addr = FLASH_2700_DATA_ADDR;
7763 			ha->flash_desc_addr = FLASH_2700_DESCRIPTOR_TABLE;
7764 			ha->flash_fw_addr = FLASH_2700_FIRMWARE_ADDR;
7765 			ha->flash_fw_size = FLASH_2700_FIRMWARE_SIZE;
7766 			ha->boot_code_addr = FLASH_2700_BOOT_CODE_ADDR;
7767 		} else {
7768 			EL(ha, "unassigned flash fn%d addr: %x\n",
7769 			    ha->function_number, ha->device_id);
7770 		}
7771 	} else {
7772 		EL(ha, "known function=%d, device_id=%x\n",
7773 		    ha->function_number, ha->device_id);
7774 	}
7775 	QL_PRINT_10(ha, "done\n");
7776 }
7777 
7778 /*
7779  * ql_get_sfp
7780  *	Returns sfp data to sdmapi caller
7781  *
7782  * Input:
7783  *	ha:	adapter state pointer.
7784  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7785  *	mode:	flags.
7786  *
7787  * Returns:
7788  *	None, request status indicated in cmd->Status.
7789  *
7790  * Context:
7791  *	Kernel context.
7792  */
7793 static void
ql_get_sfp(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)7794 ql_get_sfp(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7795 {
7796 	QL_PRINT_9(ha, "started\n");
7797 
7798 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
7799 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7800 		EL(ha, "failed, invalid request for HBA\n");
7801 		return;
7802 	}
7803 
7804 	if (cmd->ResponseLen < QL_24XX_SFP_SIZE) {
7805 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7806 		cmd->DetailStatus = QL_24XX_SFP_SIZE;
7807 		EL(ha, "failed, ResponseLen < SFP len, len passed=%xh\n",
7808 		    cmd->ResponseLen);
7809 		return;
7810 	}
7811 
7812 	/* Dump SFP data in user buffer */
7813 	if ((ql_dump_sfp(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7814 	    mode)) != 0) {
7815 		cmd->Status = EXT_STATUS_COPY_ERR;
7816 		EL(ha, "failed, copy error\n");
7817 	} else {
7818 		cmd->Status = EXT_STATUS_OK;
7819 	}
7820 
7821 	QL_PRINT_9(ha, "done\n");
7822 }
7823 
7824 /*
7825  * ql_dump_sfp
7826  *	Dumps SFP.
7827  *
7828  * Input:
7829  *	ha:	adapter state pointer.
7830  *	bp:	buffer address.
7831  *	mode:	flags
7832  *
7833  * Returns:
7834  *
7835  * Context:
7836  *	Kernel context.
7837  */
7838 static int
ql_dump_sfp(ql_adapter_state_t * ha,void * bp,int mode)7839 ql_dump_sfp(ql_adapter_state_t *ha, void *bp, int mode)
7840 {
7841 	dma_mem_t	mem;
7842 	uint32_t	cnt;
7843 	int		rval2, rval = 0;
7844 	uint32_t	dxfer;
7845 
7846 	QL_PRINT_9(ha, "started\n");
7847 
7848 	/* Get memory for SFP. */
7849 
7850 	if ((rval2 = ql_get_dma_mem(ha, &mem, 64, LITTLE_ENDIAN_DMA,
7851 	    QL_DMA_DATA_ALIGN)) != QL_SUCCESS) {
7852 		EL(ha, "failed, ql_get_dma_mem=%xh\n", rval2);
7853 		return (ENOMEM);
7854 	}
7855 
7856 	for (cnt = 0; cnt < QL_24XX_SFP_SIZE; cnt += mem.size) {
7857 		rval2 = ql_read_sfp(ha, &mem,
7858 		    (uint16_t)(cnt < 256 ? 0xA0 : 0xA2),
7859 		    (uint16_t)(cnt & 0xff));
7860 		if (rval2 != QL_SUCCESS) {
7861 			EL(ha, "failed, read_sfp=%xh\n", rval2);
7862 			rval = EFAULT;
7863 			break;
7864 		}
7865 
7866 		/* copy the data back */
7867 		if ((dxfer = ql_send_buffer_data(mem.bp, bp, mem.size,
7868 		    mode)) != mem.size) {
7869 			/* ddi copy error */
7870 			EL(ha, "failed, ddi copy; byte cnt = %xh", dxfer);
7871 			rval = EFAULT;
7872 			break;
7873 		}
7874 
7875 		/* adjust the buffer pointer */
7876 		bp = (caddr_t)bp + mem.size;
7877 	}
7878 
7879 	ql_free_phys(ha, &mem);
7880 
7881 	QL_PRINT_9(ha, "done\n");
7882 
7883 	return (rval);
7884 }
7885 
7886 /*
7887  * ql_port_param
7888  *	Retrieves or sets the firmware port speed settings
7889  *
7890  * Input:
7891  *	ha:	adapter state pointer.
7892  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7893  *	mode:	flags.
7894  *
7895  * Returns:
7896  *	None, request status indicated in cmd->Status.
7897  *
7898  * Context:
7899  *	Kernel context.
7900  *
7901  */
7902 static void
ql_port_param(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)7903 ql_port_param(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7904 {
7905 	uint8_t			*name;
7906 	ql_tgt_t		*tq;
7907 	EXT_PORT_PARAM		port_param = {0};
7908 	uint32_t		rval = QL_SUCCESS;
7909 	uint32_t		idma_rate;
7910 
7911 	QL_PRINT_9(ha, "started\n");
7912 
7913 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
7914 		EL(ha, "invalid request for this HBA\n");
7915 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7916 		cmd->ResponseLen = 0;
7917 		return;
7918 	}
7919 
7920 	if (LOOP_NOT_READY(ha)) {
7921 		EL(ha, "failed, loop not ready\n");
7922 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
7923 		cmd->ResponseLen = 0;
7924 		return;
7925 	}
7926 
7927 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7928 	    (void*)&port_param, sizeof (EXT_PORT_PARAM), mode) != 0) {
7929 		EL(ha, "failed, ddi_copyin\n");
7930 		cmd->Status = EXT_STATUS_COPY_ERR;
7931 		cmd->ResponseLen = 0;
7932 		return;
7933 	}
7934 
7935 	if (port_param.FCScsiAddr.DestType != EXT_DEF_DESTTYPE_WWPN) {
7936 		EL(ha, "Unsupported dest lookup type: %xh\n",
7937 		    port_param.FCScsiAddr.DestType);
7938 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7939 		cmd->ResponseLen = 0;
7940 		return;
7941 	}
7942 
7943 	name = port_param.FCScsiAddr.DestAddr.WWPN;
7944 
7945 	QL_PRINT_9(ha, "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
7946 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
7947 	    name[5], name[6], name[7]);
7948 
7949 	tq = ql_find_port(ha, name, (uint16_t)QLNT_PORT);
7950 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id) ||
7951 	    tq->d_id.b24 == FS_MANAGEMENT_SERVER) {
7952 		EL(ha, "failed, fc_port not found\n");
7953 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7954 		cmd->ResponseLen = 0;
7955 		return;
7956 	}
7957 
7958 	cmd->Status = EXT_STATUS_OK;
7959 	cmd->DetailStatus = EXT_STATUS_OK;
7960 
7961 	switch (port_param.Mode) {
7962 	case EXT_IIDMA_MODE_GET:
7963 		/*
7964 		 * Report the firmware's port rate for the wwpn
7965 		 */
7966 		rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7967 		    port_param.Mode);
7968 
7969 		if (rval != QL_SUCCESS) {
7970 			EL(ha, "iidma get failed: %xh\n", rval);
7971 			cmd->Status = EXT_STATUS_MAILBOX;
7972 			cmd->DetailStatus = rval;
7973 			cmd->ResponseLen = 0;
7974 		} else {
7975 			switch (idma_rate) {
7976 			case IIDMA_RATE_1GB:
7977 				port_param.Speed =
7978 				    EXT_DEF_PORTSPEED_1GBIT;
7979 				break;
7980 			case IIDMA_RATE_2GB:
7981 				port_param.Speed =
7982 				    EXT_DEF_PORTSPEED_2GBIT;
7983 				break;
7984 			case IIDMA_RATE_4GB:
7985 				port_param.Speed =
7986 				    EXT_DEF_PORTSPEED_4GBIT;
7987 				break;
7988 			case IIDMA_RATE_8GB:
7989 				port_param.Speed =
7990 				    EXT_DEF_PORTSPEED_8GBIT;
7991 				break;
7992 			case IIDMA_RATE_10GB:
7993 				port_param.Speed =
7994 				    EXT_DEF_PORTSPEED_10GBIT;
7995 				break;
7996 			case IIDMA_RATE_16GB:
7997 				port_param.Speed =
7998 				    EXT_DEF_PORTSPEED_16GBIT;
7999 				break;
8000 			case IIDMA_RATE_32GB:
8001 				port_param.Speed =
8002 				    EXT_DEF_PORTSPEED_32GBIT;
8003 				break;
8004 			default:
8005 				port_param.Speed =
8006 				    EXT_DEF_PORTSPEED_UNKNOWN;
8007 				EL(ha, "failed, Port speed rate=%xh\n",
8008 				    idma_rate);
8009 				break;
8010 			}
8011 
8012 			/* Copy back the data */
8013 			rval = ddi_copyout((void *)&port_param,
8014 			    (void *)(uintptr_t)cmd->ResponseAdr,
8015 			    sizeof (EXT_PORT_PARAM), mode);
8016 
8017 			if (rval != 0) {
8018 				cmd->Status = EXT_STATUS_COPY_ERR;
8019 				cmd->ResponseLen = 0;
8020 				EL(ha, "failed, ddi_copyout\n");
8021 			} else {
8022 				cmd->ResponseLen = (uint32_t)
8023 				    sizeof (EXT_PORT_PARAM);
8024 			}
8025 		}
8026 		break;
8027 
8028 	case EXT_IIDMA_MODE_SET:
8029 		/*
8030 		 * Set the firmware's port rate for the wwpn
8031 		 */
8032 		switch (port_param.Speed) {
8033 		case EXT_DEF_PORTSPEED_1GBIT:
8034 			idma_rate = IIDMA_RATE_1GB;
8035 			break;
8036 		case EXT_DEF_PORTSPEED_2GBIT:
8037 			idma_rate = IIDMA_RATE_2GB;
8038 			break;
8039 		case EXT_DEF_PORTSPEED_4GBIT:
8040 			idma_rate = IIDMA_RATE_4GB;
8041 			break;
8042 		case EXT_DEF_PORTSPEED_8GBIT:
8043 			idma_rate = IIDMA_RATE_8GB;
8044 			break;
8045 		case EXT_DEF_PORTSPEED_10GBIT:
8046 			idma_rate = IIDMA_RATE_10GB;
8047 			break;
8048 		case EXT_DEF_PORTSPEED_16GBIT:
8049 			idma_rate = IIDMA_RATE_16GB;
8050 			break;
8051 		case EXT_DEF_PORTSPEED_32GBIT:
8052 			idma_rate = IIDMA_RATE_32GB;
8053 			break;
8054 		default:
8055 			EL(ha, "invalid set iidma rate: %x\n",
8056 			    port_param.Speed);
8057 			cmd->Status = EXT_STATUS_INVALID_PARAM;
8058 			cmd->ResponseLen = 0;
8059 			rval = QL_PARAMETER_ERROR;
8060 			break;
8061 		}
8062 
8063 		if (rval == QL_SUCCESS) {
8064 			rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
8065 			    port_param.Mode);
8066 			if (rval != QL_SUCCESS) {
8067 				EL(ha, "iidma set failed: %xh\n", rval);
8068 				cmd->Status = EXT_STATUS_MAILBOX;
8069 				cmd->DetailStatus = rval;
8070 				cmd->ResponseLen = 0;
8071 			}
8072 		}
8073 		break;
8074 	default:
8075 		EL(ha, "invalid mode specified: %x\n", port_param.Mode);
8076 		cmd->Status = EXT_STATUS_INVALID_PARAM;
8077 		cmd->ResponseLen = 0;
8078 		cmd->DetailStatus = 0;
8079 		break;
8080 	}
8081 
8082 	QL_PRINT_9(ha, "done\n");
8083 }
8084 
8085 /*
8086  * ql_get_fwexttrace
8087  *	Dumps f/w extended trace buffer
8088  *
8089  * Input:
8090  *	ha:	adapter state pointer.
8091  *	bp:	buffer address.
8092  *	mode:	flags
8093  *
8094  * Returns:
8095  *
8096  * Context:
8097  *	Kernel context.
8098  */
8099 /* ARGSUSED */
8100 static void
ql_get_fwexttrace(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8101 ql_get_fwexttrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8102 {
8103 	int	rval;
8104 	caddr_t	payload;
8105 
8106 	QL_PRINT_9(ha, "started\n");
8107 
8108 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
8109 		EL(ha, "invalid request for this HBA\n");
8110 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8111 		cmd->ResponseLen = 0;
8112 		return;
8113 	}
8114 
8115 	if ((CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) == 0) ||
8116 	    (ha->fwexttracebuf.bp == NULL)) {
8117 		EL(ha, "f/w extended trace is not enabled\n");
8118 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8119 		cmd->ResponseLen = 0;
8120 		return;
8121 	}
8122 
8123 	if (cmd->ResponseLen < FWEXTSIZE) {
8124 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8125 		cmd->DetailStatus = FWEXTSIZE;
8126 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWEXTSIZE)\n",
8127 		    cmd->ResponseLen, FWEXTSIZE);
8128 		cmd->ResponseLen = 0;
8129 		return;
8130 	}
8131 
8132 	/* Time Stamp */
8133 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_INSERT_TIME_STAMP,
8134 	    NULL);
8135 	if (rval != QL_SUCCESS) {
8136 		EL(ha, "f/w extended trace insert"
8137 		    "time stamp failed: %xh\n", rval);
8138 		cmd->Status = EXT_STATUS_ERR;
8139 		cmd->ResponseLen = 0;
8140 		return;
8141 	}
8142 
8143 	/* Disable Tracing */
8144 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_EXT_TRACE_DISABLE,
8145 	    NULL);
8146 	if (rval != QL_SUCCESS) {
8147 		EL(ha, "f/w extended trace disable failed: %xh\n", rval);
8148 		cmd->Status = EXT_STATUS_ERR;
8149 		cmd->ResponseLen = 0;
8150 		return;
8151 	}
8152 
8153 	/* Allocate payload buffer */
8154 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
8155 	if (payload == NULL) {
8156 		EL(ha, "failed, kmem_zalloc\n");
8157 		cmd->Status = EXT_STATUS_NO_MEMORY;
8158 		cmd->ResponseLen = 0;
8159 		return;
8160 	}
8161 
8162 	/* Sync DMA buffer. */
8163 	(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
8164 	    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
8165 
8166 	/* Copy trace buffer data. */
8167 	ddi_rep_get8(ha->fwexttracebuf.acc_handle, (uint8_t *)payload,
8168 	    (uint8_t *)ha->fwexttracebuf.bp, FWEXTSIZE,
8169 	    DDI_DEV_AUTOINCR);
8170 
8171 	/* Send payload to application. */
8172 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
8173 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
8174 		EL(ha, "failed, send_buffer_data\n");
8175 		cmd->Status = EXT_STATUS_COPY_ERR;
8176 		cmd->ResponseLen = 0;
8177 	} else {
8178 		cmd->Status = EXT_STATUS_OK;
8179 	}
8180 
8181 	kmem_free(payload, FWEXTSIZE);
8182 
8183 	QL_PRINT_9(ha, "done\n");
8184 }
8185 
8186 /*
8187  * ql_get_fwfcetrace
8188  *	Dumps f/w fibre channel event trace buffer
8189  *
8190  * Input:
8191  *	ha:	adapter state pointer.
8192  *	bp:	buffer address.
8193  *	mode:	flags
8194  *
8195  * Returns:
8196  *
8197  * Context:
8198  *	Kernel context.
8199  */
8200 /* ARGSUSED */
8201 static void
ql_get_fwfcetrace(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8202 ql_get_fwfcetrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8203 {
8204 	int			rval;
8205 	caddr_t			fce_trace_p;
8206 	ql_mbx_data_t		mr;
8207 	EXT_FW_FCE_TRACE	*fce_trace;
8208 	size_t			cnt;
8209 	uint32_t		*bp;
8210 
8211 	QL_PRINT_9(ha, "started\n");
8212 
8213 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
8214 		EL(ha, "invalid request for this HBA\n");
8215 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8216 		cmd->ResponseLen = 0;
8217 		return;
8218 	}
8219 
8220 	if ((CFG_IST(ha, CFG_ENABLE_FWFCETRACE) == 0) ||
8221 	    (ha->fwfcetracebuf.bp == NULL)) {
8222 		EL(ha, "f/w FCE trace is not enabled\n");
8223 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8224 		cmd->ResponseLen = 0;
8225 		return;
8226 	}
8227 
8228 	if (cmd->ResponseLen < FWFCESIZE) {
8229 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8230 		cmd->DetailStatus = FWFCESIZE;
8231 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWFCESIZE)\n",
8232 		    cmd->ResponseLen, FWFCESIZE);
8233 		cmd->ResponseLen = 0;
8234 		return;
8235 	}
8236 
8237 	/* Disable Tracing */
8238 	rval = ql_fw_etrace(ha, &ha->fwfcetracebuf, FTO_FCE_TRACE_DISABLE, &mr);
8239 	if (rval != QL_SUCCESS) {
8240 		EL(ha, "f/w FCE trace disable failed: %xh\n", rval);
8241 		cmd->Status = EXT_STATUS_ERR;
8242 		cmd->ResponseLen = 0;
8243 		return;
8244 	}
8245 
8246 	/* Allocate payload buffer */
8247 	fce_trace = kmem_zalloc(FWFCESIZE, KM_SLEEP);
8248 	if (fce_trace == NULL) {
8249 		EL(ha, "failed, kmem_zalloc\n");
8250 		cmd->Status = EXT_STATUS_NO_MEMORY;
8251 		cmd->ResponseLen = 0;
8252 		return;
8253 	}
8254 	fce_trace_p = (caddr_t)&fce_trace->TraceData[0];
8255 
8256 	/* Copy In Ponter and Base Pointer values */
8257 	fce_trace->Registers[0] = mr.mb[2];
8258 	fce_trace->Registers[1] = mr.mb[3];
8259 	fce_trace->Registers[2] = mr.mb[4];
8260 	fce_trace->Registers[3] = mr.mb[5];
8261 
8262 	fce_trace->Registers[4] = LSW(ha->fwexttracebuf.cookies->dmac_address);
8263 	fce_trace->Registers[5] = MSW(ha->fwexttracebuf.cookies->dmac_address);
8264 	fce_trace->Registers[6] = LSW(ha->fwexttracebuf.cookies->dmac_notused);
8265 	fce_trace->Registers[7] = MSW(ha->fwexttracebuf.cookies->dmac_notused);
8266 
8267 	/* Copy FCE Trace Enable Registers */
8268 	fce_trace->Registers[8] = ha->fw_fce_trace_enable.mb[0];
8269 	fce_trace->Registers[9] = ha->fw_fce_trace_enable.mb[2];
8270 	fce_trace->Registers[10] = ha->fw_fce_trace_enable.mb[3];
8271 	fce_trace->Registers[11] = ha->fw_fce_trace_enable.mb[4];
8272 	fce_trace->Registers[12] = ha->fw_fce_trace_enable.mb[5];
8273 	fce_trace->Registers[13] = ha->fw_fce_trace_enable.mb[6];
8274 
8275 	/* Sync DMA buffer. */
8276 	(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
8277 	    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
8278 
8279 	/* Copy trace buffer data. */
8280 	ddi_rep_get8(ha->fwfcetracebuf.acc_handle, (uint8_t *)fce_trace_p,
8281 	    (uint8_t *)ha->fwfcetracebuf.bp, FWFCESIZE,
8282 	    DDI_DEV_AUTOINCR);
8283 
8284 	/* Swap bytes in buffer in case of Big Endian */
8285 	bp = (uint32_t *)&fce_trace->TraceData[0];
8286 	for (cnt = 0; cnt < (FWFCESIZE / sizeof (uint32_t)); cnt++) {
8287 		LITTLE_ENDIAN_32(bp);
8288 		bp++;
8289 	}
8290 
8291 	/* Send payload to application. */
8292 	if (ql_send_buffer_data((caddr_t)fce_trace,
8293 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
8294 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
8295 		EL(ha, "failed, send_buffer_data\n");
8296 		cmd->Status = EXT_STATUS_COPY_ERR;
8297 		cmd->ResponseLen = 0;
8298 	} else {
8299 		cmd->Status = EXT_STATUS_OK;
8300 	}
8301 
8302 	/* Re-enable Tracing */
8303 	bzero(ha->fwfcetracebuf.bp, ha->fwfcetracebuf.size);
8304 	if ((rval = ql_fw_etrace(ha, &ha->fwfcetracebuf,
8305 	    FTO_FCE_TRACE_ENABLE, &mr)) != QL_SUCCESS) {
8306 		EL(ha, "fcetrace enable failed: %xh\n", rval);
8307 	} else {
8308 		ha->fw_fce_trace_enable = mr;
8309 		EL(ha, "FCE Trace Re-Enabled\n");
8310 	}
8311 
8312 	kmem_free(fce_trace, FWFCESIZE);
8313 
8314 	QL_PRINT_9(ha, "done\n");
8315 }
8316 
8317 /*
8318  * ql_get_pci_data
8319  *	Retrieves pci config space data
8320  *
8321  * Input:
8322  *	ha:	adapter state pointer.
8323  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8324  *	mode:	flags.
8325  *
8326  * Returns:
8327  *	None, request status indicated in cmd->Status.
8328  *
8329  * Context:
8330  *	Kernel context.
8331  *
8332  */
8333 static void
ql_get_pci_data(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8334 ql_get_pci_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8335 {
8336 	uint8_t		cap_ptr;
8337 	uint8_t		cap_id;
8338 	uint32_t	buf_size = 256;
8339 
8340 	QL_PRINT_9(ha, "started\n");
8341 
8342 	/*
8343 	 * First check the "Capabilities List" bit of the status register.
8344 	 */
8345 	if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
8346 		/*
8347 		 * Now get the capability pointer
8348 		 */
8349 		cap_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
8350 		while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
8351 			/*
8352 			 * Check for the pcie capability.
8353 			 */
8354 			cap_id = (uint8_t)ql_pci_config_get8(ha, cap_ptr);
8355 			if (cap_id == PCI_CAP_ID_PCI_E) {
8356 				buf_size = 4096;
8357 				break;
8358 			}
8359 			cap_ptr = (uint8_t)ql_pci_config_get8(ha,
8360 			    (cap_ptr + PCI_CAP_NEXT_PTR));
8361 		}
8362 	}
8363 
8364 	if (cmd->ResponseLen < buf_size) {
8365 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8366 		cmd->DetailStatus = buf_size;
8367 		EL(ha, "failed ResponseLen < buf_size, len passed=%xh\n",
8368 		    cmd->ResponseLen);
8369 		return;
8370 	}
8371 
8372 	/* Dump PCI config data. */
8373 	if ((ql_pci_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
8374 	    buf_size, mode)) != 0) {
8375 		cmd->Status = EXT_STATUS_COPY_ERR;
8376 		cmd->DetailStatus = 0;
8377 		EL(ha, "failed, copy err pci_dump\n");
8378 	} else {
8379 		cmd->Status = EXT_STATUS_OK;
8380 		cmd->DetailStatus = buf_size;
8381 	}
8382 
8383 	QL_PRINT_9(ha, "done\n");
8384 }
8385 
8386 /*
8387  * ql_pci_dump
8388  *	Dumps PCI config data to application buffer.
8389  *
8390  * Input:
8391  *	ha = adapter state pointer.
8392  *	bp = user buffer address.
8393  *
8394  * Returns:
8395  *
8396  * Context:
8397  *	Kernel context.
8398  */
8399 int
ql_pci_dump(ql_adapter_state_t * ha,uint32_t * bp,uint32_t pci_size,int mode)8400 ql_pci_dump(ql_adapter_state_t *ha, uint32_t *bp, uint32_t pci_size, int mode)
8401 {
8402 	uint32_t	pci_os;
8403 	uint32_t	*ptr32, *org_ptr32;
8404 
8405 	QL_PRINT_9(ha, "started\n");
8406 
8407 	ptr32 = kmem_zalloc(pci_size, KM_SLEEP);
8408 	if (ptr32 == NULL) {
8409 		EL(ha, "failed kmem_zalloc\n");
8410 		return (ENOMEM);
8411 	}
8412 
8413 	/* store the initial value of ptr32 */
8414 	org_ptr32 = ptr32;
8415 	for (pci_os = 0; pci_os < pci_size; pci_os += 4) {
8416 		*ptr32 = (uint32_t)ql_pci_config_get32(ha, pci_os);
8417 		LITTLE_ENDIAN_32(ptr32);
8418 		ptr32++;
8419 	}
8420 
8421 	if (ddi_copyout((void *)org_ptr32, (void *)bp, pci_size, mode) !=
8422 	    0) {
8423 		EL(ha, "failed ddi_copyout\n");
8424 		kmem_free(org_ptr32, pci_size);
8425 		return (EFAULT);
8426 	}
8427 
8428 	QL_DUMP_9(org_ptr32, 8, pci_size);
8429 
8430 	kmem_free(org_ptr32, pci_size);
8431 
8432 	QL_PRINT_9(ha, "done\n");
8433 
8434 	return (0);
8435 }
8436 
8437 /*
8438  * ql_menlo_reset
8439  *	Reset Menlo
8440  *
8441  * Input:
8442  *	ha:	adapter state pointer.
8443  *	bp:	buffer address.
8444  *	mode:	flags
8445  *
8446  * Returns:
8447  *
8448  * Context:
8449  *	Kernel context.
8450  */
8451 static void
ql_menlo_reset(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8452 ql_menlo_reset(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8453 {
8454 	EXT_MENLO_RESET	rst;
8455 	ql_mbx_data_t	mr;
8456 	int		rval;
8457 
8458 	QL_PRINT_9(ha, "started\n");
8459 
8460 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
8461 		EL(ha, "failed, invalid request for HBA\n");
8462 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8463 		cmd->ResponseLen = 0;
8464 		return;
8465 	}
8466 
8467 	/*
8468 	 * TODO: only vp_index 0 can do this (?)
8469 	 */
8470 
8471 	/*  Verify the size of request structure. */
8472 	if (cmd->RequestLen < sizeof (EXT_MENLO_RESET)) {
8473 		/* Return error */
8474 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8475 		    sizeof (EXT_MENLO_RESET));
8476 		cmd->Status = EXT_STATUS_INVALID_PARAM;
8477 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8478 		cmd->ResponseLen = 0;
8479 		return;
8480 	}
8481 
8482 	/* Get reset request. */
8483 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
8484 	    (void *)&rst, sizeof (EXT_MENLO_RESET), mode) != 0) {
8485 		EL(ha, "failed, ddi_copyin\n");
8486 		cmd->Status = EXT_STATUS_COPY_ERR;
8487 		cmd->ResponseLen = 0;
8488 		return;
8489 	}
8490 
8491 	/* Wait for I/O to stop and daemon to stall. */
8492 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8493 		EL(ha, "ql_stall_driver failed\n");
8494 		ql_restart_hba(ha);
8495 		cmd->Status = EXT_STATUS_BUSY;
8496 		cmd->ResponseLen = 0;
8497 		return;
8498 	}
8499 
8500 	rval = ql_reset_menlo(ha, &mr, rst.Flags);
8501 	if (rval != QL_SUCCESS) {
8502 		EL(ha, "failed, status=%xh\n", rval);
8503 		cmd->Status = EXT_STATUS_MAILBOX;
8504 		cmd->DetailStatus = rval;
8505 		cmd->ResponseLen = 0;
8506 	} else if (mr.mb[1] != 0) {
8507 		EL(ha, "failed, substatus=%d\n", mr.mb[1]);
8508 		cmd->Status = EXT_STATUS_ERR;
8509 		cmd->DetailStatus = mr.mb[1];
8510 		cmd->ResponseLen = 0;
8511 	}
8512 
8513 	ql_restart_hba(ha);
8514 
8515 	QL_PRINT_9(ha, "done\n");
8516 }
8517 
8518 /*
8519  * ql_menlo_get_fw_version
8520  *	Get Menlo firmware version.
8521  *
8522  * Input:
8523  *	ha:	adapter state pointer.
8524  *	bp:	buffer address.
8525  *	mode:	flags
8526  *
8527  * Returns:
8528  *
8529  * Context:
8530  *	Kernel context.
8531  */
8532 static void
ql_menlo_get_fw_version(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8533 ql_menlo_get_fw_version(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8534 {
8535 	int				rval;
8536 	ql_mbx_iocb_t			*pkt;
8537 	EXT_MENLO_GET_FW_VERSION	ver = {0};
8538 
8539 	QL_PRINT_9(ha, "started\n");
8540 
8541 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
8542 		EL(ha, "failed, invalid request for HBA\n");
8543 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8544 		cmd->ResponseLen = 0;
8545 		return;
8546 	}
8547 
8548 	if (cmd->ResponseLen < sizeof (EXT_MENLO_GET_FW_VERSION)) {
8549 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8550 		cmd->DetailStatus = sizeof (EXT_MENLO_GET_FW_VERSION);
8551 		EL(ha, "ResponseLen=%d < %d\n", cmd->ResponseLen,
8552 		    sizeof (EXT_MENLO_GET_FW_VERSION));
8553 		cmd->ResponseLen = 0;
8554 		return;
8555 	}
8556 
8557 	/* Allocate packet. */
8558 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8559 	if (pkt == NULL) {
8560 		EL(ha, "failed, kmem_zalloc\n");
8561 		cmd->Status = EXT_STATUS_NO_MEMORY;
8562 		cmd->ResponseLen = 0;
8563 		return;
8564 	}
8565 
8566 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
8567 	pkt->mvfy.entry_count = 1;
8568 	pkt->mvfy.options_status = LE_16(VMF_DO_NOT_UPDATE_FW);
8569 
8570 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8571 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
8572 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
8573 	ver.FwVersion = LE_32(pkt->mvfy.fw_version);
8574 
8575 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
8576 	    pkt->mvfy.options_status != CS_COMPLETE) {
8577 		/* Command error */
8578 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8579 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
8580 		    pkt->mvfy.failure_code);
8581 		cmd->Status = EXT_STATUS_ERR;
8582 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8583 		    QL_FUNCTION_FAILED;
8584 		cmd->ResponseLen = 0;
8585 	} else if (ddi_copyout((void *)&ver,
8586 	    (void *)(uintptr_t)cmd->ResponseAdr,
8587 	    sizeof (EXT_MENLO_GET_FW_VERSION), mode) != 0) {
8588 		EL(ha, "failed, ddi_copyout\n");
8589 		cmd->Status = EXT_STATUS_COPY_ERR;
8590 		cmd->ResponseLen = 0;
8591 	} else {
8592 		cmd->ResponseLen = sizeof (EXT_MENLO_GET_FW_VERSION);
8593 	}
8594 
8595 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8596 
8597 	QL_PRINT_9(ha, "done\n");
8598 }
8599 
8600 /*
8601  * ql_menlo_update_fw
8602  *	Get Menlo update firmware.
8603  *
8604  * Input:
8605  *	ha:	adapter state pointer.
8606  *	bp:	buffer address.
8607  *	mode:	flags
8608  *
8609  * Returns:
8610  *
8611  * Context:
8612  *	Kernel context.
8613  */
8614 static void
ql_menlo_update_fw(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8615 ql_menlo_update_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8616 {
8617 	ql_mbx_iocb_t		*pkt;
8618 	dma_mem_t		*dma_mem;
8619 	EXT_MENLO_UPDATE_FW	fw;
8620 	uint32_t		*ptr32;
8621 	int			rval;
8622 
8623 	QL_PRINT_9(ha, "started\n");
8624 
8625 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
8626 		EL(ha, "failed, invalid request for HBA\n");
8627 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8628 		cmd->ResponseLen = 0;
8629 		return;
8630 	}
8631 
8632 	/*
8633 	 * TODO: only vp_index 0 can do this (?)
8634 	 */
8635 
8636 	/*  Verify the size of request structure. */
8637 	if (cmd->RequestLen < sizeof (EXT_MENLO_UPDATE_FW)) {
8638 		/* Return error */
8639 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8640 		    sizeof (EXT_MENLO_UPDATE_FW));
8641 		cmd->Status = EXT_STATUS_INVALID_PARAM;
8642 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8643 		cmd->ResponseLen = 0;
8644 		return;
8645 	}
8646 
8647 	/* Get update fw request. */
8648 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, (caddr_t)&fw,
8649 	    sizeof (EXT_MENLO_UPDATE_FW), mode) != 0) {
8650 		EL(ha, "failed, ddi_copyin\n");
8651 		cmd->Status = EXT_STATUS_COPY_ERR;
8652 		cmd->ResponseLen = 0;
8653 		return;
8654 	}
8655 
8656 	/* Wait for I/O to stop and daemon to stall. */
8657 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8658 		EL(ha, "ql_stall_driver failed\n");
8659 		ql_restart_hba(ha);
8660 		cmd->Status = EXT_STATUS_BUSY;
8661 		cmd->ResponseLen = 0;
8662 		return;
8663 	}
8664 
8665 	/* Allocate packet. */
8666 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
8667 	if (dma_mem == NULL) {
8668 		EL(ha, "failed, kmem_zalloc\n");
8669 		cmd->Status = EXT_STATUS_NO_MEMORY;
8670 		cmd->ResponseLen = 0;
8671 		return;
8672 	}
8673 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8674 	if (pkt == NULL) {
8675 		EL(ha, "failed, kmem_zalloc\n");
8676 		kmem_free(dma_mem, sizeof (dma_mem_t));
8677 		ql_restart_hba(ha);
8678 		cmd->Status = EXT_STATUS_NO_MEMORY;
8679 		cmd->ResponseLen = 0;
8680 		return;
8681 	}
8682 
8683 	/* Get DMA memory for the IOCB */
8684 	if (ql_get_dma_mem(ha, dma_mem, fw.TotalByteCount, LITTLE_ENDIAN_DMA,
8685 	    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8686 		cmn_err(CE_WARN, "%srequest queue DMA memory "
8687 		    "alloc failed", QL_NAME);
8688 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8689 		kmem_free(dma_mem, sizeof (dma_mem_t));
8690 		ql_restart_hba(ha);
8691 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8692 		cmd->ResponseLen = 0;
8693 		return;
8694 	}
8695 
8696 	/* Get firmware data. */
8697 	if (ql_get_buffer_data((caddr_t)(uintptr_t)fw.pFwDataBytes, dma_mem->bp,
8698 	    fw.TotalByteCount, mode) != fw.TotalByteCount) {
8699 		EL(ha, "failed, get_buffer_data\n");
8700 		ql_free_dma_resource(ha, dma_mem);
8701 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8702 		kmem_free(dma_mem, sizeof (dma_mem_t));
8703 		ql_restart_hba(ha);
8704 		cmd->Status = EXT_STATUS_COPY_ERR;
8705 		cmd->ResponseLen = 0;
8706 		return;
8707 	}
8708 
8709 	/* Sync DMA buffer. */
8710 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8711 	    DDI_DMA_SYNC_FORDEV);
8712 
8713 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
8714 	pkt->mvfy.entry_count = 1;
8715 	pkt->mvfy.options_status = (uint16_t)LE_16(fw.Flags);
8716 	ptr32 = dma_mem->bp;
8717 	pkt->mvfy.fw_version = LE_32(ptr32[2]);
8718 	pkt->mvfy.fw_size = LE_32(fw.TotalByteCount);
8719 	pkt->mvfy.fw_sequence_size = LE_32(fw.TotalByteCount);
8720 	pkt->mvfy.dseg_count = LE_16(1);
8721 	pkt->mvfy.dseg.address[0] = (uint32_t)
8722 	    LE_32(LSD(dma_mem->cookie.dmac_laddress));
8723 	pkt->mvfy.dseg.address[1] = (uint32_t)
8724 	    LE_32(MSD(dma_mem->cookie.dmac_laddress));
8725 	pkt->mvfy.dseg.length = LE_32(fw.TotalByteCount);
8726 
8727 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8728 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
8729 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
8730 
8731 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
8732 	    pkt->mvfy.options_status != CS_COMPLETE) {
8733 		/* Command error */
8734 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8735 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
8736 		    pkt->mvfy.failure_code);
8737 		cmd->Status = EXT_STATUS_ERR;
8738 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8739 		    QL_FUNCTION_FAILED;
8740 		cmd->ResponseLen = 0;
8741 	}
8742 
8743 	ql_free_dma_resource(ha, dma_mem);
8744 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8745 	kmem_free(dma_mem, sizeof (dma_mem_t));
8746 	ql_restart_hba(ha);
8747 
8748 	QL_PRINT_9(ha, "done\n");
8749 }
8750 
8751 /*
8752  * ql_menlo_manage_info
8753  *	Get Menlo manage info.
8754  *
8755  * Input:
8756  *	ha:	adapter state pointer.
8757  *	bp:	buffer address.
8758  *	mode:	flags
8759  *
8760  * Returns:
8761  *
8762  * Context:
8763  *	Kernel context.
8764  */
8765 static void
ql_menlo_manage_info(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8766 ql_menlo_manage_info(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8767 {
8768 	ql_mbx_iocb_t		*pkt;
8769 	dma_mem_t		*dma_mem = NULL;
8770 	EXT_MENLO_MANAGE_INFO	info;
8771 	int			rval;
8772 
8773 	QL_PRINT_9(ha, "started\n");
8774 
8775 
8776 	/* The call is only supported for Schultz right now */
8777 	if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
8778 		ql_get_xgmac_statistics(ha, cmd, mode);
8779 		QL_PRINT_9(ha, "CFG_FCOE_SUPPORT done\n");
8780 		return;
8781 	}
8782 
8783 	if (!CFG_IST(ha, CFG_CTRL_MENLO)) {
8784 		EL(ha, "failed, invalid request for HBA\n");
8785 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8786 		cmd->ResponseLen = 0;
8787 		return;
8788 	}
8789 
8790 	/*  Verify the size of request structure. */
8791 	if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
8792 		/* Return error */
8793 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8794 		    sizeof (EXT_MENLO_MANAGE_INFO));
8795 		cmd->Status = EXT_STATUS_INVALID_PARAM;
8796 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8797 		cmd->ResponseLen = 0;
8798 		return;
8799 	}
8800 
8801 	/* Get manage info request. */
8802 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
8803 	    (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
8804 		EL(ha, "failed, ddi_copyin\n");
8805 		cmd->Status = EXT_STATUS_COPY_ERR;
8806 		cmd->ResponseLen = 0;
8807 		return;
8808 	}
8809 
8810 	/* Allocate packet. */
8811 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8812 	if (pkt == NULL) {
8813 		EL(ha, "failed, kmem_zalloc\n");
8814 		ql_restart_driver(ha);
8815 		cmd->Status = EXT_STATUS_NO_MEMORY;
8816 		cmd->ResponseLen = 0;
8817 		return;
8818 	}
8819 
8820 	pkt->mdata.entry_type = MENLO_DATA_TYPE;
8821 	pkt->mdata.entry_count = 1;
8822 	pkt->mdata.options_status = (uint16_t)LE_16(info.Operation);
8823 
8824 	/* Get DMA memory for the IOCB */
8825 	if (info.Operation == MENLO_OP_READ_MEM ||
8826 	    info.Operation == MENLO_OP_WRITE_MEM) {
8827 		pkt->mdata.total_byte_count = LE_32(info.TotalByteCount);
8828 		pkt->mdata.parameter_1 =
8829 		    LE_32(info.Parameters.ap.MenloMemory.StartingAddr);
8830 		dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t),
8831 		    KM_SLEEP);
8832 		if (dma_mem == NULL) {
8833 			EL(ha, "failed, kmem_zalloc\n");
8834 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8835 			cmd->Status = EXT_STATUS_NO_MEMORY;
8836 			cmd->ResponseLen = 0;
8837 			return;
8838 		}
8839 		if (ql_get_dma_mem(ha, dma_mem, info.TotalByteCount,
8840 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8841 			cmn_err(CE_WARN, "%srequest queue DMA memory "
8842 			    "alloc failed", QL_NAME);
8843 			kmem_free(dma_mem, sizeof (dma_mem_t));
8844 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8845 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8846 			cmd->ResponseLen = 0;
8847 			return;
8848 		}
8849 		if (info.Operation == MENLO_OP_WRITE_MEM) {
8850 			/* Get data. */
8851 			if (ql_get_buffer_data(
8852 			    (caddr_t)(uintptr_t)info.pDataBytes,
8853 			    dma_mem->bp, info.TotalByteCount, mode) !=
8854 			    info.TotalByteCount) {
8855 				EL(ha, "failed, get_buffer_data\n");
8856 				ql_free_dma_resource(ha, dma_mem);
8857 				kmem_free(dma_mem, sizeof (dma_mem_t));
8858 				kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8859 				cmd->Status = EXT_STATUS_COPY_ERR;
8860 				cmd->ResponseLen = 0;
8861 				return;
8862 			}
8863 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
8864 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
8865 		}
8866 		pkt->mdata.dseg_count = LE_16(1);
8867 		pkt->mdata.dseg.address[0] = (uint32_t)
8868 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
8869 		pkt->mdata.dseg.address[1] = (uint32_t)
8870 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
8871 		pkt->mdata.dseg.length = LE_32(info.TotalByteCount);
8872 	} else if (info.Operation & MENLO_OP_CHANGE_CONFIG) {
8873 		pkt->mdata.parameter_1 =
8874 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamID);
8875 		pkt->mdata.parameter_2 =
8876 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData0);
8877 		pkt->mdata.parameter_3 =
8878 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData1);
8879 	} else if (info.Operation & MENLO_OP_GET_INFO) {
8880 		pkt->mdata.parameter_1 =
8881 		    LE_32(info.Parameters.ap.MenloInfo.InfoDataType);
8882 		pkt->mdata.parameter_2 =
8883 		    LE_32(info.Parameters.ap.MenloInfo.InfoContext);
8884 	}
8885 
8886 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8887 	LITTLE_ENDIAN_16(&pkt->mdata.options_status);
8888 	LITTLE_ENDIAN_16(&pkt->mdata.failure_code);
8889 
8890 	if (rval != QL_SUCCESS || (pkt->mdata.entry_status & 0x3c) != 0 ||
8891 	    pkt->mdata.options_status != CS_COMPLETE) {
8892 		/* Command error */
8893 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8894 		    pkt->mdata.entry_status & 0x3c, pkt->mdata.options_status,
8895 		    pkt->mdata.failure_code);
8896 		cmd->Status = EXT_STATUS_ERR;
8897 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8898 		    QL_FUNCTION_FAILED;
8899 		cmd->ResponseLen = 0;
8900 	} else if (info.Operation == MENLO_OP_READ_MEM) {
8901 		(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8902 		    DDI_DMA_SYNC_FORKERNEL);
8903 		if (ql_send_buffer_data((caddr_t)(uintptr_t)info.pDataBytes,
8904 		    dma_mem->bp, info.TotalByteCount, mode) !=
8905 		    info.TotalByteCount) {
8906 			cmd->Status = EXT_STATUS_COPY_ERR;
8907 			cmd->ResponseLen = 0;
8908 		}
8909 	}
8910 
8911 	ql_free_dma_resource(ha, dma_mem);
8912 	kmem_free(dma_mem, sizeof (dma_mem_t));
8913 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8914 
8915 	QL_PRINT_9(ha, "done\n");
8916 }
8917 
8918 /*
8919  * ql_suspend_hba
8920  *	Suspends all adapter ports.
8921  *
8922  * Input:
8923  *	ha:		adapter state pointer.
8924  *	options:	BIT_0 --> leave driver stalled on exit if
8925  *				  failed.
8926  *
8927  * Returns:
8928  *	ql local function return status code.
8929  *
8930  * Context:
8931  *	Kernel context.
8932  */
8933 static int
ql_suspend_hba(ql_adapter_state_t * ha,uint32_t opt)8934 ql_suspend_hba(ql_adapter_state_t *ha, uint32_t opt)
8935 {
8936 	ql_adapter_state_t	*ha2;
8937 	ql_link_t		*link;
8938 	int			rval = QL_SUCCESS;
8939 
8940 	/* Quiesce I/O on all adapter ports */
8941 	for (link = ql_hba.first; link != NULL; link = link->next) {
8942 		ha2 = link->base_address;
8943 
8944 		if (ha2->fru_hba_index != ha->fru_hba_index) {
8945 			continue;
8946 		}
8947 
8948 		if ((rval = ql_stall_driver(ha2, opt)) != QL_SUCCESS) {
8949 			EL(ha, "ql_stall_driver status=%xh\n", rval);
8950 			break;
8951 		}
8952 	}
8953 
8954 	return (rval);
8955 }
8956 
8957 /*
8958  * ql_restart_hba
8959  *	Restarts adapter.
8960  *
8961  * Input:
8962  *	ha:	adapter state pointer.
8963  *
8964  * Context:
8965  *	Kernel context.
8966  */
8967 static void
ql_restart_hba(ql_adapter_state_t * ha)8968 ql_restart_hba(ql_adapter_state_t *ha)
8969 {
8970 	ql_adapter_state_t	*ha2;
8971 	ql_link_t		*link;
8972 
8973 	/* Resume I/O on all adapter ports */
8974 	for (link = ql_hba.first; link != NULL; link = link->next) {
8975 		ha2 = link->base_address;
8976 
8977 		if (ha2->fru_hba_index != ha->fru_hba_index) {
8978 			continue;
8979 		}
8980 
8981 		ql_restart_driver(ha2);
8982 	}
8983 }
8984 
8985 /*
8986  * ql_get_vp_cnt_id
8987  *	Retrieves pci config space data
8988  *
8989  * Input:
8990  *	ha:	adapter state pointer.
8991  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8992  *	mode:	flags.
8993  *
8994  * Returns:
8995  *	None, request status indicated in cmd->Status.
8996  *
8997  * Context:
8998  *	Kernel context.
8999  *
9000  */
9001 static void
ql_get_vp_cnt_id(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9002 ql_get_vp_cnt_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9003 {
9004 	ql_adapter_state_t	*vha;
9005 	PEXT_VPORT_ID_CNT	ptmp_vp;
9006 	int			id = 0;
9007 	int			rval;
9008 	char			name[MAXPATHLEN];
9009 
9010 	QL_PRINT_9(ha, "started\n");
9011 
9012 	/*
9013 	 * To be backward compatible with older API
9014 	 * check for the size of old EXT_VPORT_ID_CNT
9015 	 */
9016 	if (cmd->ResponseLen < sizeof (EXT_VPORT_ID_CNT) &&
9017 	    (cmd->ResponseLen != EXT_OLD_VPORT_ID_CNT_SIZE)) {
9018 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9019 		cmd->DetailStatus = sizeof (EXT_VPORT_ID_CNT);
9020 		EL(ha, "failed, ResponseLen < EXT_VPORT_ID_CNT, Len=%xh\n",
9021 		    cmd->ResponseLen);
9022 		cmd->ResponseLen = 0;
9023 		return;
9024 	}
9025 
9026 	ptmp_vp = (EXT_VPORT_ID_CNT *)
9027 	    kmem_zalloc(sizeof (EXT_VPORT_ID_CNT), KM_SLEEP);
9028 	if (ptmp_vp == NULL) {
9029 		EL(ha, "failed, kmem_zalloc\n");
9030 		cmd->ResponseLen = 0;
9031 		return;
9032 	}
9033 	vha = ha->vp_next;
9034 	while (vha != NULL) {
9035 		ptmp_vp->VpCnt++;
9036 		ptmp_vp->VpId[id] = vha->vp_index;
9037 		(void) ddi_pathname(vha->dip, name);
9038 		(void) strncpy((char *)ptmp_vp->vp_path[id], name,
9039 		    (sizeof (ptmp_vp->vp_path[id]) -1));
9040 		ptmp_vp->VpDrvInst[id] = (int32_t)vha->instance;
9041 		id++;
9042 		vha = vha->vp_next;
9043 	}
9044 	rval = ddi_copyout((void *)ptmp_vp,
9045 	    (void *)(uintptr_t)(cmd->ResponseAdr),
9046 	    cmd->ResponseLen, mode);
9047 	if (rval != 0) {
9048 		cmd->Status = EXT_STATUS_COPY_ERR;
9049 		cmd->ResponseLen = 0;
9050 		EL(ha, "failed, ddi_copyout\n");
9051 	} else {
9052 		cmd->ResponseLen = sizeof (EXT_VPORT_ID_CNT);
9053 		QL_PRINT_9(ha, "done, vport_cnt=%d\n",
9054 		    ha->instance, ptmp_vp->VpCnt);
9055 	}
9056 	kmem_free(ptmp_vp, sizeof (EXT_VPORT_ID_CNT));
9057 }
9058 
9059 /*
9060  * ql_vp_ioctl
9061  *	Performs all EXT_CC_VPORT_CMD functions.
9062  *
9063  * Input:
9064  *	ha:	adapter state pointer.
9065  *	cmd:	Local EXT_IOCTL cmd struct pointer.
9066  *	mode:	flags.
9067  *
9068  * Returns:
9069  *	None, request status indicated in cmd->Status.
9070  *
9071  * Context:
9072  *	Kernel context.
9073  */
9074 static void
ql_vp_ioctl(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9075 ql_vp_ioctl(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9076 {
9077 	QL_PRINT_9(ha, "started, cmd=%d\n",
9078 	    cmd->SubCode);
9079 
9080 	/* case off on command subcode */
9081 	switch (cmd->SubCode) {
9082 	case EXT_VF_SC_VPORT_GETINFO:
9083 		ql_qry_vport(ha, cmd, mode);
9084 		break;
9085 	default:
9086 		/* function not supported. */
9087 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
9088 		EL(ha, "failed, Unsupported Subcode=%xh\n",
9089 		    cmd->SubCode);
9090 		break;
9091 	}
9092 
9093 	QL_PRINT_9(ha, "done\n");
9094 }
9095 
9096 /*
9097  * ql_qry_vport
9098  *	Performs EXT_VF_SC_VPORT_GETINFO subfunction.
9099  *
9100  * Input:
9101  *	ha:	adapter state pointer.
9102  *	cmd:	EXT_IOCTL cmd struct pointer.
9103  *	mode:	flags.
9104  *
9105  * Returns:
9106  *	None, request status indicated in cmd->Status.
9107  *
9108  * Context:
9109  *	Kernel context.
9110  */
9111 static void
ql_qry_vport(ql_adapter_state_t * vha,EXT_IOCTL * cmd,int mode)9112 ql_qry_vport(ql_adapter_state_t *vha, EXT_IOCTL *cmd, int mode)
9113 {
9114 	ql_adapter_state_t	*tmp_vha;
9115 	EXT_VPORT_INFO		tmp_vport = {0};
9116 
9117 	QL_PRINT_9(vha, "started\n", vha->instance);
9118 
9119 	if (cmd->ResponseLen < sizeof (EXT_VPORT_INFO)) {
9120 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9121 		cmd->DetailStatus = sizeof (EXT_VPORT_INFO);
9122 		EL(vha, "failed, ResponseLen < EXT_VPORT_INFO, Len=%xh\n",
9123 		    cmd->ResponseLen);
9124 		cmd->ResponseLen = 0;
9125 		return;
9126 	}
9127 
9128 	/* Fill in the vport information. */
9129 	bcopy(vha->loginparams.node_ww_name.raw_wwn, tmp_vport.wwnn,
9130 	    EXT_DEF_WWN_NAME_SIZE);
9131 	bcopy(vha->loginparams.nport_ww_name.raw_wwn, tmp_vport.wwpn,
9132 	    EXT_DEF_WWN_NAME_SIZE);
9133 	tmp_vport.state = vha->state;
9134 	tmp_vport.id = vha->vp_index;
9135 
9136 	tmp_vha = vha->pha->vp_next;
9137 	while (tmp_vha != NULL) {
9138 		tmp_vport.used++;
9139 		tmp_vha = tmp_vha->vp_next;
9140 	}
9141 
9142 	if (vha->max_vports > tmp_vport.used) {
9143 		tmp_vport.free = vha->max_vports - tmp_vport.used;
9144 	}
9145 
9146 	if (ddi_copyout((void *)&tmp_vport,
9147 	    (void *)(uintptr_t)(cmd->ResponseAdr),
9148 	    sizeof (EXT_VPORT_INFO), mode) != 0) {
9149 		cmd->Status = EXT_STATUS_COPY_ERR;
9150 		cmd->ResponseLen = 0;
9151 		EL(vha, "failed, ddi_copyout\n");
9152 	} else {
9153 		cmd->ResponseLen = sizeof (EXT_VPORT_INFO);
9154 		QL_PRINT_9(vha, "done\n", vha->instance);
9155 	}
9156 }
9157 
9158 /*
9159  * ql_access_flash
9160  *	Performs all EXT_CC_ACCESS_FLASH_OS functions.
9161  *
9162  * Input:
9163  *	pi:	port info pointer.
9164  *	cmd:	Local EXT_IOCTL cmd struct pointer.
9165  *	mode:	flags.
9166  *
9167  * Returns:
9168  *	None, request status indicated in cmd->Status.
9169  *
9170  * Context:
9171  *	Kernel context.
9172  */
9173 static void
ql_access_flash(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9174 ql_access_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9175 {
9176 	int	rval;
9177 
9178 	QL_PRINT_9(ha, "started\n");
9179 
9180 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1) &&
9181 	    ql_stall_driver(ha, 0) != QL_SUCCESS) {
9182 		EL(ha, "ql_stall_driver failed\n");
9183 		ql_restart_driver(ha);
9184 		cmd->Status = EXT_STATUS_BUSY;
9185 		cmd->ResponseLen = 0;
9186 		return;
9187 	}
9188 
9189 	switch (cmd->SubCode) {
9190 	case EXT_SC_FLASH_READ:
9191 		if ((rval = ql_flash_fcode_dump(ha,
9192 		    (void *)(uintptr_t)(cmd->ResponseAdr),
9193 		    (size_t)(cmd->ResponseLen), cmd->Reserved1, mode)) != 0) {
9194 			cmd->Status = EXT_STATUS_COPY_ERR;
9195 			cmd->ResponseLen = 0;
9196 			EL(ha, "flash_fcode_dump status=%xh\n", rval);
9197 		}
9198 		break;
9199 	case EXT_SC_FLASH_WRITE:
9200 		if ((rval = ql_r_m_w_flash(ha,
9201 		    (void *)(uintptr_t)(cmd->RequestAdr),
9202 		    (size_t)(cmd->RequestLen), cmd->Reserved1, mode)) !=
9203 		    QL_SUCCESS) {
9204 			cmd->Status = EXT_STATUS_COPY_ERR;
9205 			cmd->ResponseLen = 0;
9206 			EL(ha, "r_m_w_flash status=%xh\n", rval);
9207 		} else {
9208 			/* Reset caches on all adapter instances. */
9209 			ql_update_flash_caches(ha);
9210 		}
9211 		break;
9212 	default:
9213 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
9214 		cmd->Status = EXT_STATUS_ERR;
9215 		cmd->ResponseLen = 0;
9216 		break;
9217 	}
9218 
9219 	/* Resume I/O */
9220 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
9221 		EL(ha, "isp_abort_needed for restart\n");
9222 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
9223 		    DRIVER_STALL);
9224 	}
9225 
9226 	QL_PRINT_9(ha, "done\n");
9227 }
9228 
9229 /*
9230  * ql_reset_cmd
9231  *	Performs all EXT_CC_RESET_FW_OS functions.
9232  *
9233  * Input:
9234  *	ha:	adapter state pointer.
9235  *	cmd:	Local EXT_IOCTL cmd struct pointer.
9236  *
9237  * Returns:
9238  *	None, request status indicated in cmd->Status.
9239  *
9240  * Context:
9241  *	Kernel context.
9242  */
9243 static void
ql_reset_cmd(ql_adapter_state_t * ha,EXT_IOCTL * cmd)9244 ql_reset_cmd(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
9245 {
9246 	uint8_t	timer;
9247 
9248 	QL_PRINT_9(ha, "started\n");
9249 
9250 	switch (cmd->SubCode) {
9251 	case EXT_SC_RESET_FC_FW:
9252 		if (CFG_IST(ha, CFG_CTRL_82XX)) {
9253 			(void) ql_8021_reset_fw(ha);
9254 		} else {
9255 			EL(ha, "isp_abort_needed\n");
9256 			ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
9257 		}
9258 		for (timer = 180; timer; timer--) {
9259 			ql_awaken_task_daemon(ha, NULL, 0, 0);
9260 			/* Delay for 1 second. */
9261 			delay(100);
9262 			if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
9263 			    ABORT_ISP_ACTIVE | LOOP_RESYNC_NEEDED |
9264 			    LOOP_RESYNC_ACTIVE))) {
9265 				break;
9266 			}
9267 		}
9268 		break;
9269 	case EXT_SC_RESET_MPI_FW:
9270 		if (!(CFG_IST(ha, CFG_CTRL_8081))) {
9271 			EL(ha, "invalid request for HBA\n");
9272 			cmd->Status = EXT_STATUS_INVALID_REQUEST;
9273 			cmd->ResponseLen = 0;
9274 		} else {
9275 			ADAPTER_STATE_LOCK(ha);
9276 			ha->flags |= DISABLE_NIC_FW_DMP;
9277 			ADAPTER_STATE_UNLOCK(ha);
9278 
9279 			/* Wait for I/O to stop and daemon to stall. */
9280 			if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
9281 				EL(ha, "ql_suspend_hba failed\n");
9282 				cmd->Status = EXT_STATUS_BUSY;
9283 				cmd->ResponseLen = 0;
9284 			} else if (ql_restart_mpi(ha) != QL_SUCCESS) {
9285 				cmd->Status = EXT_STATUS_ERR;
9286 				cmd->ResponseLen = 0;
9287 			} else {
9288 				/*
9289 				 * While the restart_mpi mailbox cmd may be
9290 				 * done the MPI is not. Wait at least 6 sec. or
9291 				 * exit if the loop comes up.
9292 				 */
9293 				for (timer = 6; timer; timer--) {
9294 					if (!(ha->task_daemon_flags &
9295 					    LOOP_DOWN)) {
9296 						break;
9297 					}
9298 					/* Delay for 1 second. */
9299 					ql_delay(ha, 1000000);
9300 				}
9301 			}
9302 			ql_restart_hba(ha);
9303 
9304 			ADAPTER_STATE_LOCK(ha);
9305 			ha->flags &= ~DISABLE_NIC_FW_DMP;
9306 			ADAPTER_STATE_UNLOCK(ha);
9307 		}
9308 		break;
9309 	default:
9310 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
9311 		cmd->Status = EXT_STATUS_ERR;
9312 		cmd->ResponseLen = 0;
9313 		break;
9314 	}
9315 
9316 	QL_PRINT_9(ha, "done\n");
9317 }
9318 
9319 /*
9320  * ql_get_dcbx_parameters
9321  *	Get DCBX parameters.
9322  *
9323  * Input:
9324  *	ha:	adapter state pointer.
9325  *	cmd:	User space CT arguments pointer.
9326  *	mode:	flags.
9327  */
9328 static void
ql_get_dcbx_parameters(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9329 ql_get_dcbx_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9330 {
9331 	uint8_t		*tmp_buf;
9332 	int		rval;
9333 
9334 	QL_PRINT_9(ha, "started\n");
9335 
9336 	if (!(CFG_IST(ha, CFG_FCOE_SUPPORT))) {
9337 		EL(ha, "invalid request for HBA\n");
9338 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
9339 		cmd->ResponseLen = 0;
9340 		return;
9341 	}
9342 
9343 	/* Allocate memory for command. */
9344 	tmp_buf = kmem_zalloc(EXT_DEF_DCBX_PARAM_BUF_SIZE, KM_SLEEP);
9345 	if (tmp_buf == NULL) {
9346 		EL(ha, "failed, kmem_zalloc\n");
9347 		cmd->Status = EXT_STATUS_NO_MEMORY;
9348 		cmd->ResponseLen = 0;
9349 		return;
9350 	}
9351 	/* Send command */
9352 	rval = ql_get_dcbx_params(ha, EXT_DEF_DCBX_PARAM_BUF_SIZE,
9353 	    (caddr_t)tmp_buf);
9354 	if (rval != QL_SUCCESS) {
9355 		/* error */
9356 		EL(ha, "failed, get_dcbx_params_mbx=%xh\n", rval);
9357 		kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
9358 		cmd->Status = EXT_STATUS_ERR;
9359 		cmd->ResponseLen = 0;
9360 		return;
9361 	}
9362 
9363 	/* Copy the response */
9364 	if (ql_send_buffer_data((caddr_t)tmp_buf,
9365 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
9366 	    EXT_DEF_DCBX_PARAM_BUF_SIZE, mode) != EXT_DEF_DCBX_PARAM_BUF_SIZE) {
9367 		EL(ha, "failed, ddi_copyout\n");
9368 		cmd->Status = EXT_STATUS_COPY_ERR;
9369 		cmd->ResponseLen = 0;
9370 	} else {
9371 		cmd->ResponseLen = EXT_DEF_DCBX_PARAM_BUF_SIZE;
9372 		QL_PRINT_9(ha, "done\n");
9373 	}
9374 	kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
9375 
9376 }
9377 
9378 /*
9379  * ql_qry_cna_port
9380  *	Performs EXT_SC_QUERY_CNA_PORT subfunction.
9381  *
9382  * Input:
9383  *	ha:	adapter state pointer.
9384  *	cmd:	EXT_IOCTL cmd struct pointer.
9385  *	mode:	flags.
9386  *
9387  * Returns:
9388  *	None, request status indicated in cmd->Status.
9389  *
9390  * Context:
9391  *	Kernel context.
9392  */
9393 static void
ql_qry_cna_port(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9394 ql_qry_cna_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9395 {
9396 	EXT_CNA_PORT	cna_port = {0};
9397 
9398 	QL_PRINT_9(ha, "started\n");
9399 
9400 	if (!(CFG_IST(ha, CFG_FCOE_SUPPORT))) {
9401 		EL(ha, "invalid request for HBA\n");
9402 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
9403 		cmd->ResponseLen = 0;
9404 		return;
9405 	}
9406 
9407 	if (cmd->ResponseLen < sizeof (EXT_CNA_PORT)) {
9408 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9409 		cmd->DetailStatus = sizeof (EXT_CNA_PORT);
9410 		EL(ha, "failed, ResponseLen < EXT_CNA_PORT, Len=%xh\n",
9411 		    cmd->ResponseLen);
9412 		cmd->ResponseLen = 0;
9413 		return;
9414 	}
9415 
9416 	cna_port.VLanId = ha->fcoe_vlan_id;
9417 	cna_port.FabricParam = ha->fabric_params;
9418 	bcopy(ha->fcoe_vnport_mac, cna_port.VNPortMACAddress,
9419 	    EXT_DEF_MAC_ADDRESS_SIZE);
9420 
9421 	if (ddi_copyout((void *)&cna_port,
9422 	    (void *)(uintptr_t)(cmd->ResponseAdr),
9423 	    sizeof (EXT_CNA_PORT), mode) != 0) {
9424 		cmd->Status = EXT_STATUS_COPY_ERR;
9425 		cmd->ResponseLen = 0;
9426 		EL(ha, "failed, ddi_copyout\n");
9427 	} else {
9428 		cmd->ResponseLen = sizeof (EXT_CNA_PORT);
9429 		QL_PRINT_9(ha, "done\n");
9430 	}
9431 }
9432 
9433 /*
9434  * ql_qry_adapter_versions
9435  *	Performs EXT_SC_QUERY_ADAPTER_VERSIONS subfunction.
9436  *
9437  * Input:
9438  *	ha:	adapter state pointer.
9439  *	cmd:	EXT_IOCTL cmd struct pointer.
9440  *	mode:	flags.
9441  *
9442  * Returns:
9443  *	None, request status indicated in cmd->Status.
9444  *
9445  * Context:
9446  *	Kernel context.
9447  */
9448 static void
ql_qry_adapter_versions(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9449 ql_qry_adapter_versions(ql_adapter_state_t *ha, EXT_IOCTL *cmd,
9450     int mode)
9451 {
9452 	uint8_t				is_8142, mpi_cap;
9453 	uint32_t			ver_len, transfer_size;
9454 	PEXT_ADAPTERREGIONVERSION	padapter_ver = NULL;
9455 
9456 	QL_PRINT_9(ha, "started\n");
9457 
9458 	/* 8142s do not have a EDC PHY firmware. */
9459 	mpi_cap = (uint8_t)(ha->mpi_capability_list >> 8);
9460 
9461 	is_8142 = 0;
9462 	/* Sizeof (Length + Reserved) = 8 Bytes */
9463 	if (mpi_cap == 0x02 || mpi_cap == 0x04) {
9464 		ver_len = (sizeof (EXT_REGIONVERSION) * (NO_OF_VERSIONS - 1))
9465 		    + 8;
9466 		is_8142 = 1;
9467 	} else {
9468 		ver_len = (sizeof (EXT_REGIONVERSION) * NO_OF_VERSIONS) + 8;
9469 	}
9470 
9471 	/* Allocate local memory for EXT_ADAPTERREGIONVERSION */
9472 	padapter_ver = (EXT_ADAPTERREGIONVERSION *)kmem_zalloc(ver_len,
9473 	    KM_SLEEP);
9474 
9475 	if (padapter_ver == NULL) {
9476 		EL(ha, "failed, kmem_zalloc\n");
9477 		cmd->Status = EXT_STATUS_NO_MEMORY;
9478 		cmd->ResponseLen = 0;
9479 		return;
9480 	}
9481 
9482 	padapter_ver->Length = 1;
9483 	/* Copy MPI version */
9484 	padapter_ver->RegionVersion[0].Region =
9485 	    EXT_OPT_ROM_REGION_MPI_RISC_FW;
9486 	padapter_ver->RegionVersion[0].Version[0] =
9487 	    ha->mpi_fw_major_version;
9488 	padapter_ver->RegionVersion[0].Version[1] =
9489 	    ha->mpi_fw_minor_version;
9490 	padapter_ver->RegionVersion[0].Version[2] =
9491 	    ha->mpi_fw_subminor_version;
9492 	padapter_ver->RegionVersion[0].VersionLength = 3;
9493 	padapter_ver->RegionVersion[0].Location = RUNNING_VERSION;
9494 
9495 	if (!is_8142) {
9496 		padapter_ver->RegionVersion[1].Region =
9497 		    EXT_OPT_ROM_REGION_EDC_PHY_FW;
9498 		padapter_ver->RegionVersion[1].Version[0] =
9499 		    ha->phy_fw_major_version;
9500 		padapter_ver->RegionVersion[1].Version[1] =
9501 		    ha->phy_fw_minor_version;
9502 		padapter_ver->RegionVersion[1].Version[2] =
9503 		    ha->phy_fw_subminor_version;
9504 		padapter_ver->RegionVersion[1].VersionLength = 3;
9505 		padapter_ver->RegionVersion[1].Location = RUNNING_VERSION;
9506 		padapter_ver->Length = NO_OF_VERSIONS;
9507 	}
9508 
9509 	if (cmd->ResponseLen < ver_len) {
9510 		EL(ha, "failed, ResponseLen < ver_len, ",
9511 		    "RespLen=%xh ver_len=%xh\n", cmd->ResponseLen, ver_len);
9512 		/* Calculate the No. of valid versions being returned. */
9513 		padapter_ver->Length = (uint32_t)
9514 		    ((cmd->ResponseLen - 8) / sizeof (EXT_REGIONVERSION));
9515 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9516 		cmd->DetailStatus = ver_len;
9517 		transfer_size = cmd->ResponseLen;
9518 	} else {
9519 		transfer_size = ver_len;
9520 	}
9521 
9522 	if (ddi_copyout((void *)padapter_ver,
9523 	    (void *)(uintptr_t)(cmd->ResponseAdr),
9524 	    transfer_size, mode) != 0) {
9525 		cmd->Status = EXT_STATUS_COPY_ERR;
9526 		cmd->ResponseLen = 0;
9527 		EL(ha, "failed, ddi_copyout\n");
9528 	} else {
9529 		cmd->ResponseLen = ver_len;
9530 		QL_PRINT_9(ha, "done\n");
9531 	}
9532 
9533 	kmem_free(padapter_ver, ver_len);
9534 }
9535 
9536 /*
9537  * ql_get_xgmac_statistics
9538  *	Get XgMac information
9539  *
9540  * Input:
9541  *	ha:	adapter state pointer.
9542  *	cmd:	EXT_IOCTL cmd struct pointer.
9543  *	mode:	flags.
9544  *
9545  * Returns:
9546  *	None, request status indicated in cmd->Status.
9547  *
9548  * Context:
9549  *	Kernel context.
9550  */
9551 static void
ql_get_xgmac_statistics(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9552 ql_get_xgmac_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9553 {
9554 	int			rval;
9555 	uint32_t		size;
9556 	int8_t			*tmp_buf;
9557 	EXT_MENLO_MANAGE_INFO	info;
9558 
9559 	QL_PRINT_9(ha, "started\n");
9560 
9561 	/*  Verify the size of request structure. */
9562 	if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
9563 		/* Return error */
9564 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
9565 		    sizeof (EXT_MENLO_MANAGE_INFO));
9566 		cmd->Status = EXT_STATUS_INVALID_PARAM;
9567 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
9568 		cmd->ResponseLen = 0;
9569 		return;
9570 	}
9571 
9572 	/* Get manage info request. */
9573 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
9574 	    (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
9575 		EL(ha, "failed, ddi_copyin\n");
9576 		cmd->Status = EXT_STATUS_COPY_ERR;
9577 		cmd->ResponseLen = 0;
9578 		return;
9579 	}
9580 
9581 	size = info.TotalByteCount;
9582 	if (!size) {
9583 		/* parameter error */
9584 		cmd->Status = EXT_STATUS_INVALID_PARAM;
9585 		cmd->DetailStatus = 0;
9586 		EL(ha, "failed, size=%xh\n", size);
9587 		cmd->ResponseLen = 0;
9588 		return;
9589 	}
9590 
9591 	/* Allocate memory for command. */
9592 	tmp_buf = kmem_zalloc(size, KM_SLEEP);
9593 	if (tmp_buf == NULL) {
9594 		EL(ha, "failed, kmem_zalloc\n");
9595 		cmd->Status = EXT_STATUS_NO_MEMORY;
9596 		cmd->ResponseLen = 0;
9597 		return;
9598 	}
9599 
9600 	if (!(info.Operation & MENLO_OP_GET_INFO)) {
9601 		EL(ha, "Invalid request for 81XX\n");
9602 		kmem_free(tmp_buf, size);
9603 		cmd->Status = EXT_STATUS_ERR;
9604 		cmd->ResponseLen = 0;
9605 		return;
9606 	}
9607 
9608 	rval = ql_get_xgmac_stats(ha, size, (caddr_t)tmp_buf);
9609 
9610 	if (rval != QL_SUCCESS) {
9611 		/* error */
9612 		EL(ha, "failed, get_xgmac_stats =%xh\n", rval);
9613 		kmem_free(tmp_buf, size);
9614 		cmd->Status = EXT_STATUS_ERR;
9615 		cmd->ResponseLen = 0;
9616 		return;
9617 	}
9618 
9619 	if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)info.pDataBytes,
9620 	    size, mode) != size) {
9621 		EL(ha, "failed, ddi_copyout\n");
9622 		cmd->Status = EXT_STATUS_COPY_ERR;
9623 		cmd->ResponseLen = 0;
9624 	} else {
9625 		cmd->ResponseLen = info.TotalByteCount;
9626 		QL_PRINT_9(ha, "done\n");
9627 	}
9628 	kmem_free(tmp_buf, size);
9629 	QL_PRINT_9(ha, "done\n");
9630 }
9631 
9632 /*
9633  * ql_get_fcf_list
9634  *	Get FCF list.
9635  *
9636  * Input:
9637  *	ha:	adapter state pointer.
9638  *	cmd:	User space CT arguments pointer.
9639  *	mode:	flags.
9640  */
9641 static void
ql_get_fcf_list(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9642 ql_get_fcf_list(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9643 {
9644 	uint8_t			*tmp_buf;
9645 	int			rval;
9646 	EXT_FCF_LIST		fcf_list = {0};
9647 	ql_fcf_list_desc_t	mb_fcf_list = {0};
9648 
9649 	QL_PRINT_9(ha, "started\n");
9650 
9651 	if (!(CFG_IST(ha, CFG_FCOE_SUPPORT))) {
9652 		EL(ha, "invalid request for HBA\n");
9653 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
9654 		cmd->ResponseLen = 0;
9655 		return;
9656 	}
9657 	/* Get manage info request. */
9658 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
9659 	    (caddr_t)&fcf_list, sizeof (EXT_FCF_LIST), mode) != 0) {
9660 		EL(ha, "failed, ddi_copyin\n");
9661 		cmd->Status = EXT_STATUS_COPY_ERR;
9662 		cmd->ResponseLen = 0;
9663 		return;
9664 	}
9665 
9666 	if (!(fcf_list.BufSize)) {
9667 		/* Return error */
9668 		EL(ha, "failed, fcf_list BufSize is=%xh\n",
9669 		    fcf_list.BufSize);
9670 		cmd->Status = EXT_STATUS_INVALID_PARAM;
9671 		cmd->ResponseLen = 0;
9672 		return;
9673 	}
9674 	/* Allocate memory for command. */
9675 	tmp_buf = kmem_zalloc(fcf_list.BufSize, KM_SLEEP);
9676 	if (tmp_buf == NULL) {
9677 		EL(ha, "failed, kmem_zalloc\n");
9678 		cmd->Status = EXT_STATUS_NO_MEMORY;
9679 		cmd->ResponseLen = 0;
9680 		return;
9681 	}
9682 	/* build the descriptor */
9683 	if (fcf_list.Options) {
9684 		mb_fcf_list.options = FCF_LIST_RETURN_ONE;
9685 	} else {
9686 		mb_fcf_list.options = FCF_LIST_RETURN_ALL;
9687 	}
9688 	mb_fcf_list.fcf_index = (uint16_t)fcf_list.FcfIndex;
9689 	mb_fcf_list.buffer_size = fcf_list.BufSize;
9690 
9691 	/* Send command */
9692 	rval = ql_get_fcf_list_mbx(ha, &mb_fcf_list, (caddr_t)tmp_buf);
9693 	if (rval != QL_SUCCESS) {
9694 		/* error */
9695 		EL(ha, "failed, get_fcf_list_mbx=%xh\n", rval);
9696 		kmem_free(tmp_buf, fcf_list.BufSize);
9697 		cmd->Status = EXT_STATUS_ERR;
9698 		cmd->ResponseLen = 0;
9699 		return;
9700 	}
9701 
9702 	/* Copy the response */
9703 	if (ql_send_buffer_data((caddr_t)tmp_buf,
9704 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
9705 	    fcf_list.BufSize, mode) != fcf_list.BufSize) {
9706 		EL(ha, "failed, ddi_copyout\n");
9707 		cmd->Status = EXT_STATUS_COPY_ERR;
9708 		cmd->ResponseLen = 0;
9709 	} else {
9710 		cmd->ResponseLen = mb_fcf_list.buffer_size;
9711 		QL_PRINT_9(ha, "done\n");
9712 	}
9713 
9714 	kmem_free(tmp_buf, fcf_list.BufSize);
9715 }
9716 
9717 /*
9718  * ql_get_resource_counts
9719  *	Get Resource counts:
9720  *
9721  * Input:
9722  *	ha:	adapter state pointer.
9723  *	cmd:	User space CT arguments pointer.
9724  *	mode:	flags.
9725  */
9726 static void
ql_get_resource_counts(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9727 ql_get_resource_counts(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9728 {
9729 	int			rval;
9730 	ql_mbx_data_t		mr;
9731 	EXT_RESOURCE_CNTS	tmp_rc_cnt = {0};
9732 
9733 	QL_PRINT_9(ha, "started\n");
9734 
9735 	if (cmd->ResponseLen < sizeof (EXT_RESOURCE_CNTS)) {
9736 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9737 		cmd->DetailStatus = sizeof (EXT_RESOURCE_CNTS);
9738 		EL(ha, "failed, ResponseLen < EXT_RESOURCE_CNTS, "
9739 		    "Len=%xh\n", cmd->ResponseLen);
9740 		cmd->ResponseLen = 0;
9741 		return;
9742 	}
9743 
9744 	rval = ql_get_resource_cnts(ha, &mr);
9745 	if (rval != QL_SUCCESS) {
9746 		EL(ha, "resource cnt mbx failed\n");
9747 		cmd->Status = EXT_STATUS_ERR;
9748 		cmd->ResponseLen = 0;
9749 		return;
9750 	}
9751 
9752 	tmp_rc_cnt.OrgTgtXchgCtrlCnt = (uint32_t)mr.mb[1];
9753 	tmp_rc_cnt.CurTgtXchgCtrlCnt = (uint32_t)mr.mb[2];
9754 	tmp_rc_cnt.CurXchgCtrlCnt = (uint32_t)mr.mb[3];
9755 	tmp_rc_cnt.OrgXchgCtrlCnt = (uint32_t)mr.mb[6];
9756 	tmp_rc_cnt.CurIocbBufCnt = (uint32_t)mr.mb[7];
9757 	tmp_rc_cnt.OrgIocbBufCnt = (uint32_t)mr.mb[10];
9758 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
9759 		tmp_rc_cnt.NoOfSupVPs = (uint32_t)mr.mb[11];
9760 	}
9761 	if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
9762 		tmp_rc_cnt.NoOfSupFCFs = (uint32_t)mr.mb[12];
9763 	}
9764 
9765 	rval = ddi_copyout((void *)&tmp_rc_cnt,
9766 	    (void *)(uintptr_t)(cmd->ResponseAdr),
9767 	    sizeof (EXT_RESOURCE_CNTS), mode);
9768 	if (rval != 0) {
9769 		cmd->Status = EXT_STATUS_COPY_ERR;
9770 		cmd->ResponseLen = 0;
9771 		EL(ha, "failed, ddi_copyout\n");
9772 	} else {
9773 		cmd->ResponseLen = sizeof (EXT_RESOURCE_CNTS);
9774 		QL_PRINT_9(ha, "done\n");
9775 	}
9776 }
9777 
9778 /*
9779  * ql_get_temperature
9780  *	Get ASIC temperature data
9781  *
9782  * Input:
9783  *	ha:	adapter state pointer.
9784  *	cmd:	EXT_IOCTL cmd struct pointer.
9785  *	mode:	flags
9786  *
9787  * Returns:
9788  *	None, request status indicated in cmd->Status.
9789  *
9790  * Context:
9791  *	Kernel context.
9792  */
9793 static void
ql_get_temperature(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9794 ql_get_temperature(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9795 {
9796 	ql_mbx_data_t	mr;
9797 	int		rval = 0;
9798 	EXT_BOARD_TEMP	board_temp = {0};
9799 
9800 	QL_PRINT_9(ha, "started\n");
9801 
9802 	if (!(ha->fw_ext_attributes & TEMP_SUPPORT_ISP)) {
9803 		EL(ha, "invalid request for HBA\n");
9804 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
9805 		cmd->ResponseLen = 0;
9806 		return;
9807 	}
9808 
9809 	if (cmd->ResponseLen < sizeof (EXT_BOARD_TEMP)) {
9810 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9811 		cmd->DetailStatus = sizeof (EXT_BOARD_TEMP);
9812 		EL(ha, "failed, ResponseLen < EXT_BOARD_TEMP, "
9813 		    "Len=%xh \n", cmd->ResponseLen);
9814 		cmd->ResponseLen = 0;
9815 		return;
9816 	}
9817 
9818 	switch (cmd->SubCode) {
9819 	case EXT_SC_GET_BOARD_TEMP:
9820 		rval = ql_get_temp(ha, &mr);
9821 		if (rval != QL_SUCCESS) {
9822 			/* error */
9823 			EL(ha, "failed, get_temperature_mbx=%xh\n", rval);
9824 			cmd->Status = EXT_STATUS_ERR;
9825 			cmd->ResponseLen = 0;
9826 			break;
9827 		}
9828 		board_temp.IntTemp = mr.mb[1];
9829 
9830 		rval = ddi_copyout((void *)&board_temp,
9831 		    (void *)(uintptr_t)(cmd->ResponseAdr),
9832 		    sizeof (EXT_BOARD_TEMP), mode);
9833 		if (rval != 0) {
9834 			cmd->Status = EXT_STATUS_COPY_ERR;
9835 			cmd->ResponseLen = 0;
9836 			EL(ha, "failed, ddi_copyout\n");
9837 		} else {
9838 			cmd->ResponseLen = sizeof (EXT_BOARD_TEMP);
9839 		}
9840 		break;
9841 	default:
9842 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
9843 		cmd->Status = EXT_STATUS_ERR;
9844 		cmd->ResponseLen = 0;
9845 		break;
9846 	}
9847 
9848 	QL_PRINT_9(ha, "done\n");
9849 }
9850 
9851 /*
9852  * ql_dump_cmd
9853  *	Performs all EXT_CC_DUMP_OS functions.
9854  *
9855  * Input:
9856  *	ha:	adapter state pointer.
9857  *	cmd:	Local EXT_IOCTL cmd struct pointer.
9858  *
9859  * Returns:
9860  *	None, request status indicated in cmd->Status.
9861  *
9862  * Context:
9863  *	Kernel context.
9864  */
9865 static void
ql_dump_cmd(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9866 ql_dump_cmd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9867 {
9868 	caddr_t		dump;
9869 	uint32_t	sdm_valid_dump = 0;
9870 	int		rval = 0;
9871 
9872 	QL_PRINT_9(ha, "started\n");
9873 
9874 	if (ha->ql_dump_state & QL_DUMP_VALID &&
9875 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED) &&
9876 	    ha->ql_dump_state != 0) {
9877 		sdm_valid_dump = 1;
9878 	} else {
9879 		EL(ha, "dump does not exist for instance %d (%x, %p)\n",
9880 		    ha->instance, ha->ql_dump_state, ha->ql_dump_ptr);
9881 	}
9882 
9883 	cmd->Status = EXT_STATUS_OK;
9884 	cmd->DetailStatus = 0;
9885 
9886 	switch (cmd->SubCode) {
9887 	case EXT_SC_DUMP_SIZE:
9888 		cmd->ResponseLen = 0;
9889 		if (sdm_valid_dump) {
9890 			cmd->DetailStatus = ha->risc_dump_size;
9891 		}
9892 		break;
9893 	case EXT_SC_DUMP_READ:
9894 		if (!sdm_valid_dump) {
9895 			cmd->Status = EXT_STATUS_INVALID_REQUEST;
9896 			cmd->ResponseLen = 0;
9897 			break;
9898 		}
9899 
9900 		if (cmd->ResponseLen < ha->risc_dump_size) {
9901 			cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9902 			cmd->DetailStatus = ha->risc_dump_size;
9903 			EL(ha, "failed, ResponseLen < %x, "
9904 			    "Len=%xh\n", ha->risc_dump_size,
9905 			    cmd->ResponseLen);
9906 			break;
9907 		}
9908 
9909 		ADAPTER_STATE_LOCK(ha);
9910 		ha->flags |= DISABLE_NIC_FW_DMP;
9911 		ADAPTER_STATE_UNLOCK(ha);
9912 
9913 		QL_DUMP_LOCK(ha);
9914 
9915 		dump = kmem_zalloc(ha->risc_dump_size, KM_SLEEP);
9916 		cmd->ResponseLen = (uint32_t)ql_ascii_fw_dump(ha, dump);
9917 
9918 		if ((rval = ddi_copyout((void *)dump,
9919 		    (void *)(uintptr_t)(cmd->ResponseAdr), cmd->ResponseLen,
9920 		    mode)) != 0) {
9921 			ha->ql_dump_state &= ~QL_DUMP_UPLOADED;
9922 			EL(ha, "failed, ddi_copyout\n");
9923 			cmd->Status = EXT_STATUS_COPY_ERR;
9924 			cmd->ResponseLen = 0;
9925 		} else {
9926 			ha->ql_dump_state |= QL_DUMP_UPLOADED;
9927 		}
9928 
9929 		kmem_free(dump, ha->risc_dump_size);
9930 
9931 		QL_DUMP_UNLOCK(ha);
9932 
9933 		ADAPTER_STATE_LOCK(ha);
9934 		ha->flags &= ~DISABLE_NIC_FW_DMP;
9935 		ADAPTER_STATE_UNLOCK(ha);
9936 		break;
9937 	case EXT_SC_DUMP_TRIGGER:
9938 		cmd->ResponseLen = 0;
9939 
9940 		ADAPTER_STATE_LOCK(ha);
9941 		ha->flags |= DISABLE_NIC_FW_DMP;
9942 		ADAPTER_STATE_UNLOCK(ha);
9943 
9944 		if (sdm_valid_dump) {
9945 			cmd->Status = EXT_STATUS_INVALID_REQUEST;
9946 			EL(ha, "Existing dump file needs to be retrieved.\n");
9947 		} else {
9948 			rval = ql_dump_firmware(ha);
9949 
9950 			if (rval != QL_SUCCESS && rval != QL_DATA_EXISTS) {
9951 				cmd->Status = EXT_STATUS_ERR;
9952 			}
9953 		}
9954 
9955 		ADAPTER_STATE_LOCK(ha);
9956 		ha->flags &= ~DISABLE_NIC_FW_DMP;
9957 		ADAPTER_STATE_UNLOCK(ha);
9958 		break;
9959 	default:
9960 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
9961 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
9962 		cmd->ResponseLen = 0;
9963 		break;
9964 	}
9965 
9966 	QL_PRINT_9(ha, "done\n");
9967 }
9968 
9969 /*
9970  * ql_serdes_reg
9971  *	Performs all EXT_CC_SERDES_REG_OP functions.
9972  *
9973  * Input:
9974  *	ha:	adapter state pointer.
9975  *	cmd:	EXT_IOCTL cmd struct pointer.
9976  *	mode:	flags
9977  *
9978  * Returns:
9979  *	None, request status indicated in cmd->Status.
9980  *
9981  * Context:
9982  *	Kernel context.
9983  */
9984 static void
ql_serdes_reg(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9985 ql_serdes_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9986 {
9987 	ql_mbx_data_t	mr = {0};
9988 	int		rval = 0;
9989 	EXT_SERDES_REG	serdes_reg = {0};
9990 
9991 	QL_PRINT_9(ha, "started\n");
9992 
9993 	/* Check if request valid for HBA */
9994 	if (!(CFG_IST(ha, CFG_SERDES_SUPPORT))) {
9995 		EL(ha, "invalid request for HBA\n");
9996 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
9997 		cmd->ResponseLen = 0;
9998 		return;
9999 	}
10000 
10001 	/* Copy in the request structure. */
10002 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
10003 	    (void *)&serdes_reg, sizeof (EXT_SERDES_REG), mode) != 0) {
10004 		EL(ha, "failed, ddi_copyin\n");
10005 		cmd->Status = EXT_STATUS_COPY_ERR;
10006 		cmd->ResponseLen = 0;
10007 		return;
10008 	}
10009 
10010 	switch (cmd->SubCode) {
10011 	case EXT_SC_WRITE_SERDES_REG:
10012 		mr.mb[1] = serdes_reg.addr;
10013 		mr.mb[2] = LSB(serdes_reg.val);
10014 		mr.mb[3] = 0;
10015 		mr.mb[4] = MSB(serdes_reg.val);
10016 		if ((rval = ql_write_serdes(ha, &mr)) != QL_SUCCESS) {
10017 			/* error */
10018 			EL(ha, "failed, write_serdes_mbx=%xh\n", rval);
10019 			cmd->Status = EXT_STATUS_ERR;
10020 			cmd->ResponseLen = 0;
10021 			break;
10022 		} else {
10023 			cmd->Status = EXT_STATUS_OK;
10024 		}
10025 		break;
10026 	case EXT_SC_READ_SERDES_REG:
10027 		/* Verify the size of response structure. */
10028 		if (cmd->ResponseLen < sizeof (EXT_SERDES_REG)) {
10029 			cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
10030 			cmd->DetailStatus = sizeof (EXT_SERDES_REG);
10031 			EL(ha, "failed, ResponseLen < EXT_SERDES_REG, "
10032 			    "Len=%xh \n", cmd->ResponseLen);
10033 			cmd->ResponseLen = 0;
10034 			break;
10035 		}
10036 		mr.mb[1] = serdes_reg.addr;
10037 		if ((rval = ql_read_serdes(ha, &mr)) != QL_SUCCESS) {
10038 			/* error */
10039 			EL(ha, "failed, read_serdes_mbx=%xh\n", rval);
10040 			cmd->Status = EXT_STATUS_ERR;
10041 			cmd->ResponseLen = 0;
10042 			break;
10043 		}
10044 		serdes_reg.val = CHAR_TO_SHORT(LSB(mr.mb[1]), LSB(mr.mb[2]));
10045 		/* Copy back the response data */
10046 		if (ddi_copyout((void *)&serdes_reg,
10047 		    (void *)(uintptr_t)(cmd->ResponseAdr),
10048 		    sizeof (EXT_SERDES_REG), mode) != 0) {
10049 			cmd->Status = EXT_STATUS_COPY_ERR;
10050 			cmd->ResponseLen = 0;
10051 			EL(ha, "failed, ddi_copyout\n");
10052 		} else {
10053 			cmd->Status = EXT_STATUS_OK;
10054 			cmd->ResponseLen = sizeof (EXT_SERDES_REG);
10055 		}
10056 		break;
10057 	default:
10058 		/* Subcode not supported. */
10059 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
10060 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
10061 		cmd->ResponseLen = 0;
10062 		break;
10063 	}
10064 
10065 	QL_PRINT_9(ha, "done\n");
10066 }
10067 
10068 /*
10069  * ql_serdes_reg_ex
10070  *	Performs all EXT_CC_SERDES_REG_OP_EX functions.
10071  *
10072  * Input:
10073  *	ha:	adapter state pointer.
10074  *	cmd:	EXT_IOCTL cmd struct pointer.
10075  *	mode:	flags
10076  *
10077  * Returns:
10078  *	None, request status indicated in cmd->Status.
10079  *
10080  * Context:
10081  *	Kernel context.
10082  */
10083 static void
ql_serdes_reg_ex(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)10084 ql_serdes_reg_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
10085 {
10086 	ql_mbx_data_t		mr = {0};
10087 	int			rval = 0;
10088 	EXT_SERDES_REG_EX	serdes_reg_ex = {0};
10089 
10090 	QL_PRINT_9(ha, "started\n");
10091 
10092 	/* Check if request valid for HBA */
10093 	if (!(CFG_IST(ha, CFG_SERDES_SUPPORT))) {
10094 		EL(ha, "invalid request for HBA\n");
10095 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
10096 		cmd->ResponseLen = 0;
10097 		return;
10098 	}
10099 
10100 	/* Copy in the request structure. */
10101 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
10102 	    (void *)&serdes_reg_ex, sizeof (EXT_SERDES_REG_EX), mode) != 0) {
10103 		EL(ha, "failed, ddi_copyin\n");
10104 		cmd->Status = EXT_STATUS_COPY_ERR;
10105 		cmd->ResponseLen = 0;
10106 		return;
10107 	}
10108 
10109 	switch (cmd->SubCode) {
10110 	case EXT_SC_WRITE_SERDES_REG:
10111 		mr.mb[3] = LSW(serdes_reg_ex.addr);
10112 		mr.mb[4] = MSW(serdes_reg_ex.addr);
10113 		mr.mb[5] = LSW(serdes_reg_ex.val);
10114 		mr.mb[6] = MSW(serdes_reg_ex.val);
10115 		if ((rval = ql_write_serdes(ha, &mr)) != QL_SUCCESS) {
10116 			/* error */
10117 			EL(ha, "failed, write_serdes_mbx=%xh\n", rval);
10118 			cmd->Status = EXT_STATUS_ERR;
10119 			cmd->ResponseLen = 0;
10120 			break;
10121 		} else {
10122 			cmd->Status = EXT_STATUS_OK;
10123 		}
10124 		break;
10125 	case EXT_SC_READ_SERDES_REG:
10126 		/* Verify the size of response structure. */
10127 		if (cmd->ResponseLen < sizeof (EXT_SERDES_REG_EX)) {
10128 			cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
10129 			cmd->DetailStatus = sizeof (EXT_SERDES_REG_EX);
10130 			EL(ha, "failed, ResponseLen < EXT_SERDES_REG_EX, "
10131 			    "Len=%xh\n", cmd->ResponseLen);
10132 			cmd->ResponseLen = 0;
10133 			break;
10134 		}
10135 		mr.mb[3] = LSW(serdes_reg_ex.addr);
10136 		mr.mb[4] = MSW(serdes_reg_ex.addr);
10137 		if ((rval = ql_read_serdes(ha, &mr)) != QL_SUCCESS) {
10138 			/* error */
10139 			EL(ha, "failed, read_serdes_mbx=%xh\n", rval);
10140 			cmd->Status = EXT_STATUS_ERR;
10141 			cmd->ResponseLen = 0;
10142 			break;
10143 		}
10144 		serdes_reg_ex.val = SHORT_TO_LONG(mr.mb[1], mr.mb[2]);
10145 		/* Copy back the response data */
10146 		if (ddi_copyout((void *)&serdes_reg_ex,
10147 		    (void *)(uintptr_t)(cmd->ResponseAdr),
10148 		    sizeof (EXT_SERDES_REG_EX), mode) != 0) {
10149 			cmd->Status = EXT_STATUS_COPY_ERR;
10150 			cmd->ResponseLen = 0;
10151 			EL(ha, "failed, ddi_copyout\n");
10152 		} else {
10153 			cmd->Status = EXT_STATUS_OK;
10154 			cmd->ResponseLen = sizeof (EXT_SERDES_REG_EX);
10155 		}
10156 		break;
10157 	default:
10158 		/* Subcode not supported. */
10159 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
10160 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
10161 		cmd->ResponseLen = 0;
10162 		break;
10163 	}
10164 
10165 	QL_PRINT_9(ha, "done\n");
10166 }
10167 
10168 /*
10169  * ql_els_passthru
10170  *	IOCTL for extended link service passthru command.
10171  *
10172  * Input:
10173  *	ha:	adapter state pointer.
10174  *	cmd:	User space CT arguments pointer.
10175  *	mode:	flags.
10176  *
10177  * Returns:
10178  *	None, request status indicated in cmd->Status.
10179  *
10180  * Context:
10181  *	Kernel context.
10182  */
10183 static void
ql_els_passthru(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)10184 ql_els_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
10185 {
10186 	ql_mbx_iocb_t		*pkt;
10187 	dma_mem_t		*dma_mem;
10188 	caddr_t			bp, pld;
10189 	uint32_t		pkt_size, pld_byte_cnt, cmd_size, *long_ptr;
10190 	EXT_ELS_PT_REQ		*pt_req;
10191 	boolean_t		local_hba = B_FALSE;
10192 	ql_tgt_t		*tq = NULL;
10193 	port_id_t		tmp_fcid;
10194 	int			rval;
10195 	uint16_t		comp_status;
10196 
10197 	QL_PRINT_9(ha, "started\n");
10198 
10199 	if (DRIVER_SUSPENDED(ha)) {
10200 		EL(ha, "failed, LOOP_NOT_READY\n");
10201 		cmd->Status = EXT_STATUS_BUSY;
10202 		cmd->ResponseLen = 0;
10203 		return;
10204 	}
10205 
10206 	if (cmd->RequestLen < sizeof (EXT_ELS_PT_REQ)) {
10207 		/* parameter error */
10208 		EL(ha, "failed, RequestLen < EXT_ELS_PT_REQ, Len=%xh\n",
10209 		    cmd->RequestLen);
10210 		cmd->Status = EXT_STATUS_INVALID_PARAM;
10211 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
10212 		cmd->ResponseLen = 0;
10213 		return;
10214 	}
10215 
10216 	/* Allocate memory for command. */
10217 	bp = kmem_zalloc(cmd->RequestLen, KM_SLEEP);
10218 
10219 	if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr,
10220 	    bp, cmd->RequestLen, mode) != 0) {
10221 		EL(ha, "failed, ddi_copyin\n");
10222 		kmem_free(bp, cmd->RequestLen);
10223 		cmd->Status = EXT_STATUS_COPY_ERR;
10224 		cmd->ResponseLen = 0;
10225 		return;
10226 	}
10227 	pt_req = (EXT_ELS_PT_REQ *)bp;
10228 
10229 	QL_PRINT_9(ha, "EXT_ELS_PT_REQ\n");
10230 	QL_DUMP_9((uint8_t *)pt_req, 8, sizeof (EXT_ELS_PT_REQ));
10231 
10232 	/* Find loop ID of the device */
10233 	if (pt_req->ValidMask & EXT_DEF_WWPN_VALID) {
10234 		if (bcmp(ha->loginparams.nport_ww_name.raw_wwn, pt_req->WWPN,
10235 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
10236 			local_hba = B_TRUE;
10237 		} else {
10238 			tq = ql_find_port(ha, pt_req->WWPN, QLNT_PORT);
10239 		}
10240 	} else if (pt_req->ValidMask & EXT_DEF_PID_VALID) {
10241 		/*
10242 		 * Copy caller's d_id to tmp space.
10243 		 */
10244 		bcopy(&pt_req->Id[1], tmp_fcid.r.d_id,
10245 		    EXT_DEF_PORTID_SIZE_ACTUAL);
10246 		BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]);
10247 
10248 		if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id,
10249 		    EXT_DEF_PORTID_SIZE_ACTUAL) == 0) {
10250 			local_hba = B_TRUE;
10251 		} else {
10252 			tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id,
10253 			    QLNT_PID);
10254 		}
10255 	} else if (pt_req->ValidMask & EXT_DEF_WWNN_VALID) {
10256 		if (bcmp(ha->loginparams.node_ww_name.raw_wwn, pt_req->WWNN,
10257 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
10258 			local_hba = B_TRUE;
10259 		} else {
10260 			tq = ql_find_port(ha, pt_req->WWNN, QLNT_NODE);
10261 		}
10262 	}
10263 
10264 	if (local_hba == B_TRUE) {
10265 		EL(ha, "failed, els to adapter\n");
10266 		kmem_free(bp, cmd->RequestLen);
10267 		cmd->Status = EXT_STATUS_ERR;
10268 		cmd->ResponseLen = 0;
10269 		return;
10270 	}
10271 
10272 	if (tq == NULL) {
10273 		/* no matching device */
10274 		EL(ha, "failed, device not found\n");
10275 		kmem_free(bp, cmd->RequestLen);
10276 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
10277 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
10278 		cmd->ResponseLen = 0;
10279 		return;
10280 	}
10281 
10282 	/* Allocate a DMA Memory Descriptor */
10283 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
10284 	if (dma_mem == NULL) {
10285 		EL(ha, "failed, kmem_zalloc\n");
10286 		kmem_free(bp, cmd->RequestLen);
10287 		cmd->Status = EXT_STATUS_NO_MEMORY;
10288 		cmd->ResponseLen = 0;
10289 		return;
10290 	}
10291 	/* Determine maximum buffer size. */
10292 	cmd_size = cmd->RequestLen - sizeof (EXT_ELS_PT_REQ);
10293 	pld_byte_cnt = cmd_size < cmd->ResponseLen ? cmd->ResponseLen :
10294 	    cmd_size;
10295 	pld = (caddr_t)(bp + sizeof (EXT_ELS_PT_REQ));
10296 
10297 	/* Allocate command block. */
10298 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t));
10299 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
10300 	if (pkt == NULL) {
10301 		EL(ha, "failed, kmem_zalloc\n");
10302 		kmem_free(dma_mem, sizeof (dma_mem_t));
10303 		kmem_free(bp, cmd->RequestLen);
10304 		cmd->Status = EXT_STATUS_NO_MEMORY;
10305 		cmd->ResponseLen = 0;
10306 		return;
10307 	}
10308 
10309 	/* Get DMA memory for the payload */
10310 	if (ql_get_dma_mem(ha, dma_mem, pld_byte_cnt, LITTLE_ENDIAN_DMA,
10311 	    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
10312 		cmn_err(CE_WARN, "%sDMA memory alloc failed", QL_NAME);
10313 		kmem_free(pkt, pkt_size);
10314 		kmem_free(dma_mem, sizeof (dma_mem_t));
10315 		kmem_free(bp, cmd->RequestLen);
10316 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
10317 		cmd->ResponseLen = 0;
10318 		return;
10319 	}
10320 
10321 	/* Copy out going payload data to IOCB DMA buffer. */
10322 	ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
10323 	    (uint8_t *)dma_mem->bp, cmd_size, DDI_DEV_AUTOINCR);
10324 
10325 	/* Sync IOCB DMA buffer. */
10326 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, cmd_size,
10327 	    DDI_DMA_SYNC_FORDEV);
10328 
10329 	/*
10330 	 * Setup IOCB
10331 	 */
10332 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
10333 		pkt->els.entry_type = ELS_PASSTHRU_TYPE;
10334 		pkt->els.entry_count = 1;
10335 
10336 		/* Set loop ID */
10337 		pkt->els.n_port_hdl = tq->loop_id;
10338 
10339 		/* Set cmd/response data segment counts. */
10340 		pkt->els.xmt_dseg_count = LE_16(1);
10341 		pkt->els.vp_index = ha->vp_index;
10342 		pkt->els.rcv_dseg_count = LE_16(1);
10343 
10344 		pkt->els.els_cmd_opcode = pld[0];
10345 
10346 		pkt->els.d_id_7_0 = tq->d_id.b.al_pa;
10347 		pkt->els.d_id_15_8 = tq->d_id.b.area;
10348 		pkt->els.d_id_23_16 = tq->d_id.b.domain;
10349 
10350 		pkt->els.s_id_7_0 = ha->d_id.b.al_pa;
10351 		pkt->els.s_id_15_8 = ha->d_id.b.area;
10352 		pkt->els.s_id_23_16 = ha->d_id.b.domain;
10353 
10354 		/* Load rsp byte count. */
10355 		pkt->els.rcv_payld_data_bcnt = LE_32(cmd->ResponseLen);
10356 
10357 		/* Load cmd byte count. */
10358 		pkt->els.xmt_payld_data_bcnt = LE_32(cmd_size);
10359 
10360 		long_ptr = (uint32_t *)&pkt->els.dseg;
10361 
10362 		/* Load MS command entry data segments. */
10363 		*long_ptr++ = (uint32_t)
10364 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
10365 		*long_ptr++ = (uint32_t)
10366 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
10367 		*long_ptr++ = LE_32(cmd_size);
10368 
10369 		/* Load MS response entry data segments. */
10370 		*long_ptr++ = (uint32_t)
10371 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
10372 		*long_ptr++ = (uint32_t)
10373 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
10374 		*long_ptr = LE_32(cmd->ResponseLen);
10375 
10376 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
10377 		    sizeof (ql_mbx_iocb_t));
10378 
10379 		comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
10380 		if (rval == QL_SUCCESS && comp_status == CS_DATA_UNDERRUN) {
10381 			comp_status = CS_COMPLETE;
10382 		}
10383 		if (rval != QL_SUCCESS ||
10384 		    (pkt->sts24.entry_status & 0x3c) != 0 ||
10385 		    comp_status != CS_COMPLETE) {
10386 			EL(ha, "failed, I/O timeout, cs=%xh, es=%xh, "
10387 			    "rval=%xh\n",
10388 			    comp_status, pkt->sts24.entry_status, rval);
10389 			ql_free_dma_resource(ha, dma_mem);
10390 			kmem_free(pkt, pkt_size);
10391 			kmem_free(dma_mem, sizeof (dma_mem_t));
10392 			kmem_free(bp, cmd->RequestLen);
10393 			cmd->Status = EXT_STATUS_ERR;
10394 			cmd->ResponseLen = 0;
10395 			return;
10396 		}
10397 	} else {
10398 		pkt->ms.entry_type = MS_TYPE;
10399 		pkt->ms.entry_count = 1;
10400 
10401 		/* Set loop ID */
10402 		if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
10403 			pkt->ms.loop_id_l = LSB(tq->loop_id);
10404 			pkt->ms.loop_id_h = MSB(tq->loop_id);
10405 		} else {
10406 			pkt->ms.loop_id_h = LSB(tq->loop_id);
10407 		}
10408 
10409 		pkt->ms.control_flags_h = CF_ELS_PASSTHROUGH;
10410 
10411 		/* Set ISP command timeout. */
10412 		pkt->ms.timeout = LE_16(120);
10413 
10414 		/* Set data segment counts. */
10415 		pkt->ms.cmd_dseg_count_l = 1;
10416 		pkt->ms.total_dseg_count = LE_16(2);
10417 
10418 		/* Response total byte count. */
10419 		pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
10420 		pkt->ms.dseg[1].length = LE_32(cmd->ResponseLen);
10421 
10422 		/* Command total byte count. */
10423 		pkt->ms.cmd_byte_count = LE_32(cmd_size);
10424 		pkt->ms.dseg[0].length = LE_32(cmd_size);
10425 
10426 		/* Load command/response data segments. */
10427 		pkt->ms.dseg[0].address[0] = (uint32_t)
10428 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
10429 		pkt->ms.dseg[0].address[1] = (uint32_t)
10430 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
10431 		pkt->ms.dseg[1].address[0] = (uint32_t)
10432 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
10433 		pkt->ms.dseg[1].address[1] = (uint32_t)
10434 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
10435 
10436 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
10437 		    sizeof (ql_mbx_iocb_t));
10438 
10439 		comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
10440 		if (rval == QL_SUCCESS && comp_status == CS_DATA_UNDERRUN) {
10441 			comp_status = CS_COMPLETE;
10442 		}
10443 		if (rval != QL_SUCCESS ||
10444 		    (pkt->sts.entry_status & 0x7e) != 0 ||
10445 		    comp_status != CS_COMPLETE) {
10446 			EL(ha, "failed, I/O timeout, cs=%xh, es=%xh, "
10447 			    "rval=%xh\n",
10448 			    comp_status, pkt->sts.entry_status, rval);
10449 			ql_free_dma_resource(ha, dma_mem);
10450 			kmem_free(pkt, pkt_size);
10451 			kmem_free(dma_mem, sizeof (dma_mem_t));
10452 			kmem_free(bp, cmd->RequestLen);
10453 			cmd->Status = EXT_STATUS_ERR;
10454 			cmd->ResponseLen = 0;
10455 			return;
10456 		}
10457 	}
10458 
10459 	/* Sync payload DMA buffer. */
10460 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, cmd->ResponseLen,
10461 	    DDI_DMA_SYNC_FORKERNEL);
10462 
10463 	if (ql_send_buffer_data(dma_mem->bp,
10464 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
10465 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
10466 		cmd->Status = EXT_STATUS_COPY_ERR;
10467 		EL(ha, "failed, ddi_copyout\n");
10468 	} else {
10469 		QL_PRINT_9(ha, "els_rsp\n");
10470 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
10471 		cmd->Status = EXT_STATUS_OK;
10472 		QL_PRINT_9(ha, "done\n");
10473 	}
10474 
10475 	ql_free_dma_resource(ha, dma_mem);
10476 	kmem_free(pkt, pkt_size);
10477 	kmem_free(dma_mem, sizeof (dma_mem_t));
10478 	kmem_free(bp, cmd->RequestLen);
10479 }
10480 
10481 /*
10482  * ql_flash_update_caps
10483  *	IOCTL for flash update capabilities command.
10484  *
10485  * Input:
10486  *	ha:	adapter state pointer.
10487  *	cmd:	User space CT arguments pointer.
10488  *	mode:	flags.
10489  *
10490  * Returns:
10491  *	None, request status indicated in cmd->Status.
10492  *
10493  * Context:
10494  *	Kernel context.
10495  */
10496 static void
ql_flash_update_caps(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)10497 ql_flash_update_caps(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
10498 {
10499 	int			rval;
10500 	uint64_t		cb;
10501 	EXT_FLASH_UPDATE_CAPS	caps = {0};
10502 
10503 	QL_PRINT_9(ha, "started\n");
10504 
10505 	cb = LONG_TO_LLONG(ha->fw_attributes, ha->fw_ext_attributes);
10506 
10507 	switch (cmd->SubCode) {
10508 	case EXT_SC_GET_FLASH_UPDATE_CAPS:
10509 		if (cmd->ResponseLen < sizeof (EXT_FLASH_UPDATE_CAPS)) {
10510 			cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
10511 			cmd->DetailStatus = sizeof (EXT_FLASH_UPDATE_CAPS);
10512 			EL(ha, "failed, ResponseLen < 0x%x, Len=0x%x\n",
10513 			    sizeof (EXT_FLASH_UPDATE_CAPS), cmd->ResponseLen);
10514 			cmd->ResponseLen = 0;
10515 			return;
10516 		}
10517 		caps.Capabilities = cb;
10518 		caps.OutageDuration = 300;	/* seconds */
10519 
10520 		rval = ddi_copyout((void *)&caps,
10521 		    (void *)(uintptr_t)(cmd->ResponseAdr),
10522 		    sizeof (EXT_FLASH_UPDATE_CAPS), mode);
10523 		if (rval != 0) {
10524 			cmd->Status = EXT_STATUS_COPY_ERR;
10525 			cmd->ResponseLen = 0;
10526 			EL(ha, "failed, ddi_copyout\n");
10527 		} else {
10528 			cmd->ResponseLen = sizeof (EXT_FLASH_UPDATE_CAPS);
10529 		}
10530 		break;
10531 	case EXT_SC_SET_FLASH_UPDATE_CAPS:
10532 		if (cmd->RequestLen < sizeof (EXT_FLASH_UPDATE_CAPS)) {
10533 			/* parameter error */
10534 			EL(ha, "failed, RequestLen < EXT_FLASH_UPDATE_CAPS, "
10535 			    "Len=%xh\n", cmd->RequestLen);
10536 			cmd->Status = EXT_STATUS_INVALID_PARAM;
10537 			cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
10538 			cmd->ResponseLen = 0;
10539 			return;
10540 		}
10541 
10542 		/* Copy in the request structure. */
10543 		if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
10544 		    (void *)&caps, sizeof (EXT_FLASH_UPDATE_CAPS), mode) != 0) {
10545 			EL(ha, "failed, ddi_copyin\n");
10546 			cmd->Status = EXT_STATUS_COPY_ERR;
10547 			cmd->ResponseLen = 0;
10548 			return;
10549 		}
10550 
10551 		if (cb != caps.Capabilities || caps.OutageDuration < 300) {
10552 			cmd->Status = EXT_STATUS_ERR;
10553 			cmd->ResponseLen = 0;
10554 		}
10555 		break;
10556 	default:
10557 		/* Subcode not supported. */
10558 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
10559 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
10560 		cmd->ResponseLen = 0;
10561 		break;
10562 	}
10563 
10564 	QL_PRINT_9(ha, "done\n");
10565 }
10566 
10567 /*
10568  * ql_get_bbcr_data
10569  *	IOCTL for get buffer to buffer credits command.
10570  *
10571  * Input:
10572  *	ha:	adapter state pointer.
10573  *	cmd:	User space CT arguments pointer.
10574  *	mode:	flags.
10575  *
10576  * Returns:
10577  *	None, request status indicated in cmd->Status.
10578  *
10579  * Context:
10580  *	Kernel context.
10581  */
10582 static void
ql_get_bbcr_data(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)10583 ql_get_bbcr_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
10584 {
10585 	int		rval;
10586 	ql_mbx_data_t	mr;
10587 	EXT_BBCR_DATA	bb = {0};
10588 
10589 	QL_PRINT_9(ha, "started\n");
10590 
10591 	if (cmd->ResponseLen < sizeof (EXT_BBCR_DATA)) {
10592 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
10593 		cmd->DetailStatus = sizeof (EXT_BBCR_DATA);
10594 		EL(ha, "failed, ResponseLen < 0x%x, Len=0x%x\n",
10595 		    sizeof (EXT_BBCR_DATA), cmd->ResponseLen);
10596 		cmd->ResponseLen = 0;
10597 		return;
10598 	}
10599 
10600 	if (!(CFG_IST(ha, CFG_BBCR_SUPPORT))) {
10601 		EL(ha, "invalid request for HBA\n");
10602 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
10603 		cmd->ResponseLen = 0;
10604 		return;
10605 	}
10606 	if (ha->task_daemon_flags & LOOP_DOWN) {
10607 		rval = ql_get_adapter_id(ha, &mr);
10608 		ha->bbcr_initial = LSB(mr.mb[15]);
10609 		ha->bbcr_runtime = MSB(mr.mb[15]);
10610 		bb.ConfiguredBBSCN = ha->bbcr_initial & BBCR_INITIAL_MASK;
10611 		bb.NegotiatedBBSCN = ha->bbcr_runtime & BBCR_RUNTIME_MASK;
10612 		bb.Status = EXT_DEF_BBCR_STATUS_UNKNOWN;
10613 		bb.State = EXT_DEF_BBCR_STATE_OFFLINE;
10614 		if (rval == 0x4005) {
10615 			bb.mbx1 = mr.mb[1];
10616 		}
10617 	} else {
10618 		bb.ConfiguredBBSCN = ha->bbcr_initial & BBCR_INITIAL_MASK;
10619 		bb.NegotiatedBBSCN = ha->bbcr_runtime & BBCR_RUNTIME_MASK;
10620 
10621 		if (bb.ConfiguredBBSCN) {
10622 			bb.Status = EXT_DEF_BBCR_STATUS_ENABLED;
10623 			if (bb.NegotiatedBBSCN &&
10624 			    !(ha->bbcr_runtime & BBCR_RUNTIME_REJECT)) {
10625 				bb.State = EXT_DEF_BBCR_STATE_ONLINE;
10626 			} else {
10627 				bb.State = EXT_DEF_BBCR_STATE_OFFLINE;
10628 				if (ha->bbcr_runtime & BBCR_RUNTIME_REJECT) {
10629 					bb.OfflineReasonCode =
10630 					    EXT_DEF_BBCR_REASON_LOGIN_REJECT;
10631 				} else {
10632 					bb.OfflineReasonCode =
10633 					    EXT_DEF_BBCR_REASON_SWITCH;
10634 				}
10635 			}
10636 		} else {
10637 			bb.Status = EXT_DEF_BBCR_STATUS_DISABLED;
10638 		}
10639 	}
10640 
10641 	rval = ddi_copyout((void *)&bb, (void *)(uintptr_t)(cmd->ResponseAdr),
10642 	    sizeof (EXT_BBCR_DATA), mode);
10643 	if (rval != 0) {
10644 		cmd->Status = EXT_STATUS_COPY_ERR;
10645 		cmd->ResponseLen = 0;
10646 		EL(ha, "failed, ddi_copyout\n");
10647 	} else {
10648 		cmd->ResponseLen = sizeof (EXT_BBCR_DATA);
10649 	}
10650 
10651 	QL_PRINT_9(ha, "done\n");
10652 }
10653 
10654 /*
10655  * ql_get_priv_stats
10656  *	Performs EXT_SC_GET_PRIV_STATS subcommand. of EXT_CC_GET_DATA.
10657  *
10658  * Input:
10659  *	ha:	adapter state pointer.
10660  *	cmd:	Local EXT_IOCTL cmd struct pointer.
10661  *	mode:	flags.
10662  *
10663  * Returns:
10664  *	None, request status indicated in cmd->Status.
10665  *
10666  * Context:
10667  *	Kernel context.
10668  */
10669 static void
ql_get_priv_stats(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)10670 ql_get_priv_stats(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
10671 {
10672 	uint8_t	*ls;
10673 	int	rval;
10674 	int	retry = 10;
10675 
10676 	QL_PRINT_9(ha, "started\n");
10677 
10678 	while (ha->task_daemon_flags & (DRIVER_STALL | ABORT_ISP_ACTIVE |
10679 	    LOOP_RESYNC_ACTIVE)) {
10680 		ql_delay(ha, 10000000);	/* 10 second delay */
10681 
10682 		retry--;
10683 
10684 		if (retry == 0) { /* effectively 100 seconds */
10685 			EL(ha, "failed, LOOP_NOT_READY\n");
10686 			cmd->Status = EXT_STATUS_BUSY;
10687 			cmd->ResponseLen = 0;
10688 			return;
10689 		}
10690 	}
10691 
10692 	/* Allocate memory for command. */
10693 	ls = kmem_zalloc(cmd->ResponseLen, KM_SLEEP);
10694 
10695 	/*
10696 	 * I think these are supposed to be port statistics
10697 	 * the loop ID or port ID should be in cmd->Instance.
10698 	 */
10699 	rval = ql_get_status_counts(ha,
10700 	    ha->task_daemon_flags & LOOP_DOWN ? 0xFF : ha->loop_id,
10701 	    cmd->ResponseLen, (caddr_t)ls, 0);
10702 	if (rval != QL_SUCCESS) {
10703 		EL(ha, "failed, get_link_status=%xh, id=%xh\n", rval,
10704 		    ha->loop_id);
10705 		cmd->Status = EXT_STATUS_MAILBOX;
10706 		cmd->DetailStatus = rval;
10707 		cmd->ResponseLen = 0;
10708 	} else {
10709 		rval = ddi_copyout((void *)&ls,
10710 		    (void *)(uintptr_t)cmd->ResponseAdr, cmd->ResponseLen,
10711 		    mode);
10712 		if (rval != 0) {
10713 			EL(ha, "failed, ddi_copyout\n");
10714 			cmd->Status = EXT_STATUS_COPY_ERR;
10715 			cmd->ResponseLen = 0;
10716 		}
10717 	}
10718 
10719 	kmem_free(ls, cmd->ResponseLen);
10720 
10721 	QL_PRINT_9(ha, "done\n");
10722 }
10723