xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c (revision 23a1ccea6aac035f084a7a4cdc968687d1b02daf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26  */
27 
28 #pragma ident	"Copyright 2010 QLogic Corporation; ql_api.c"
29 
30 /*
31  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32  *
33  * ***********************************************************************
34  * *									**
35  * *				NOTICE					**
36  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
37  * *			ALL RIGHTS RESERVED				**
38  * *									**
39  * ***********************************************************************
40  *
41  */
42 
43 #include <ql_apps.h>
44 #include <ql_api.h>
45 #include <ql_debug.h>
46 #include <ql_init.h>
47 #include <ql_iocb.h>
48 #include <ql_ioctl.h>
49 #include <ql_isr.h>
50 #include <ql_mbx.h>
51 #include <ql_nx.h>
52 #include <ql_xioctl.h>
53 
54 /*
55  * Solaris external defines.
56  */
57 extern pri_t minclsyspri;
58 extern pri_t maxclsyspri;
59 
60 /*
61  * dev_ops functions prototypes
62  */
63 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
64 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
65 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
66 static int ql_power(dev_info_t *, int, int);
67 static int ql_quiesce(dev_info_t *);
68 
69 /*
70  * FCA functions prototypes exported by means of the transport table
71  */
72 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
73     fc_fca_bind_info_t *);
74 static void ql_unbind_port(opaque_t);
75 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
76 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
77 static int ql_els_send(opaque_t, fc_packet_t *);
78 static int ql_get_cap(opaque_t, char *, void *);
79 static int ql_set_cap(opaque_t, char *, void *);
80 static int ql_getmap(opaque_t, fc_lilpmap_t *);
81 static int ql_transport(opaque_t, fc_packet_t *);
82 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
83 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
84 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
85 static int ql_abort(opaque_t, fc_packet_t *, int);
86 static int ql_reset(opaque_t, uint32_t);
87 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
88 static opaque_t ql_get_device(opaque_t, fc_portid_t);
89 
90 /*
91  * FCA Driver Support Function Prototypes.
92  */
93 static uint16_t	ql_wait_outstanding(ql_adapter_state_t *);
94 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
95     ql_srb_t *);
96 static void ql_task_daemon(void *);
97 static void ql_task_thread(ql_adapter_state_t *);
98 static void ql_unsol_callback(ql_srb_t *);
99 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
100     fc_unsol_buf_t *);
101 static void ql_timer(void *);
102 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
103 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
104     uint32_t *, uint32_t *);
105 static void ql_halt(ql_adapter_state_t *, int);
106 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
107 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
108 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
109 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
110 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
112 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
113 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
114 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
115 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
116 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
117 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
118 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
119 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
120 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
121 static int ql_login_port(ql_adapter_state_t *, port_id_t);
122 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
123 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
124 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
125 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
126 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
127 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
128 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
129 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
130     ql_srb_t *);
131 static int ql_kstat_update(kstat_t *, int);
132 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
133 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
134 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
135 static void ql_rst_aen(ql_adapter_state_t *);
136 static void ql_restart_queues(ql_adapter_state_t *);
137 static void ql_abort_queues(ql_adapter_state_t *);
138 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
139 static void ql_idle_check(ql_adapter_state_t *);
140 static int ql_loop_resync(ql_adapter_state_t *);
141 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
142 static size_t ql_2581_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
143 static int ql_save_config_regs(dev_info_t *);
144 static int ql_restore_config_regs(dev_info_t *);
145 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
146 static int ql_handle_rscn_update(ql_adapter_state_t *);
147 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
148 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
149 static int ql_dump_firmware(ql_adapter_state_t *);
150 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
151 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
152 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
153 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
154 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
155 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
156 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
157     void *);
158 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
159     uint8_t);
160 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
161 static int ql_suspend_adapter(ql_adapter_state_t *);
162 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
163 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
164 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
165 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
166 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
167 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
168 static int ql_setup_interrupts(ql_adapter_state_t *);
169 static int ql_setup_msi(ql_adapter_state_t *);
170 static int ql_setup_msix(ql_adapter_state_t *);
171 static int ql_setup_fixed(ql_adapter_state_t *);
172 static void ql_release_intr(ql_adapter_state_t *);
173 static void ql_disable_intr(ql_adapter_state_t *);
174 static int ql_legacy_intr(ql_adapter_state_t *);
175 static int ql_init_mutex(ql_adapter_state_t *);
176 static void ql_destroy_mutex(ql_adapter_state_t *);
177 static void ql_iidma(ql_adapter_state_t *);
178 
179 static int ql_n_port_plogi(ql_adapter_state_t *);
180 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
181     els_descriptor_t *);
182 static void ql_isp_els_request_ctor(els_descriptor_t *,
183     els_passthru_entry_t *);
184 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
185 static int ql_wait_for_td_stop(ql_adapter_state_t *);
186 static void ql_process_idc_event(ql_adapter_state_t *);
187 
188 /*
189  * Global data
190  */
191 static uint8_t	ql_enable_pm = 1;
192 static int	ql_flash_sbus_fpga = 0;
193 uint32_t	ql_os_release_level;
194 uint32_t	ql_disable_aif = 0;
195 uint32_t	ql_disable_msi = 0;
196 uint32_t	ql_disable_msix = 0;
197 uint32_t	ql_enable_ets = 0;
198 uint16_t	ql_osc_wait_count = 1000;
199 
200 /* Timer routine variables. */
201 static timeout_id_t	ql_timer_timeout_id = NULL;
202 static clock_t		ql_timer_ticks;
203 
204 /* Soft state head pointer. */
205 void *ql_state = NULL;
206 
207 /* Head adapter link. */
208 ql_head_t ql_hba = {
209 	NULL,
210 	NULL
211 };
212 
213 /* Global hba index */
214 uint32_t ql_gfru_hba_index = 1;
215 
216 /*
217  * Some IP defines and globals
218  */
219 uint32_t	ql_ip_buffer_count = 128;
220 uint32_t	ql_ip_low_water = 10;
221 uint8_t		ql_ip_fast_post_count = 5;
222 static int	ql_ip_mtu = 65280;		/* equivalent to FCIPMTU */
223 
224 /* Device AL_PA to Device Head Queue index array. */
225 uint8_t ql_alpa_to_index[] = {
226 	0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
227 	0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
228 	0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
229 	0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
230 	0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
231 	0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
232 	0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
233 	0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
234 	0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
235 	0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
236 	0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
237 	0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
238 	0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
239 	0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
240 	0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
241 	0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
242 	0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
243 	0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
244 	0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
245 	0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
246 	0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
247 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
248 	0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
249 	0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
250 	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
251 	0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
252 };
253 
254 /* Device loop_id to ALPA array. */
255 static uint8_t ql_index_to_alpa[] = {
256 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
257 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
258 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
259 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
260 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
261 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
262 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
263 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
264 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
265 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
266 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
267 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
268 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
269 };
270 
271 /* 2200 register offsets */
272 static reg_off_t reg_off_2200 = {
273 	0x00,	/* flash_address */
274 	0x02,	/* flash_data */
275 	0x06,	/* ctrl_status */
276 	0x08,	/* ictrl */
277 	0x0a,	/* istatus */
278 	0x0c,	/* semaphore */
279 	0x0e,	/* nvram */
280 	0x18,	/* req_in */
281 	0x18,	/* req_out */
282 	0x1a,	/* resp_in */
283 	0x1a,	/* resp_out */
284 	0xff,	/* risc2host - n/a */
285 	24,	/* Number of mailboxes */
286 
287 	/* Mailbox in register offsets 0 - 23 */
288 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
289 	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
290 	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
291 	/* 2200 does not have mailbox 24-31 - n/a */
292 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
293 
294 	/* Mailbox out register offsets 0 - 23 */
295 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
296 	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
297 	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
298 	/* 2200 does not have mailbox 24-31 - n/a */
299 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
300 
301 	0x96,	/* fpm_diag_config */
302 	0xa4,	/* pcr */
303 	0xb0,	/* mctr */
304 	0xb8,	/* fb_cmd */
305 	0xc0,	/* hccr */
306 	0xcc,	/* gpiod */
307 	0xce,	/* gpioe */
308 	0xff,	/* host_to_host_sema - n/a */
309 	0xff,	/* pri_req_in - n/a */
310 	0xff,	/* pri_req_out - n/a */
311 	0xff,	/* atio_req_in - n/a */
312 	0xff,	/* atio_req_out - n/a */
313 	0xff,	/* io_base_addr - n/a */
314 	0xff,	/* nx_host_int - n/a */
315 	0xff	/* nx_risc_int - n/a */
316 };
317 
318 /* 2300 register offsets */
319 static reg_off_t reg_off_2300 = {
320 	0x00,	/* flash_address */
321 	0x02,	/* flash_data */
322 	0x06,	/* ctrl_status */
323 	0x08,	/* ictrl */
324 	0x0a,	/* istatus */
325 	0x0c,	/* semaphore */
326 	0x0e,	/* nvram */
327 	0x10,	/* req_in */
328 	0x12,	/* req_out */
329 	0x14,	/* resp_in */
330 	0x16,	/* resp_out */
331 	0x18,	/* risc2host */
332 	32,	/* Number of mailboxes */
333 
334 	/* Mailbox in register offsets 0 - 31 */
335 	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
336 	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
337 	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
338 	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
339 
340 	/* Mailbox out register offsets 0 - 31 */
341 	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
342 	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
343 	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
344 	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
345 
346 	0x96,	/* fpm_diag_config */
347 	0xa4,	/* pcr */
348 	0xb0,	/* mctr */
349 	0x80,	/* fb_cmd */
350 	0xc0,	/* hccr */
351 	0xcc,	/* gpiod */
352 	0xce,	/* gpioe */
353 	0x1c,	/* host_to_host_sema */
354 	0xff,	/* pri_req_in - n/a */
355 	0xff,	/* pri_req_out - n/a */
356 	0xff,	/* atio_req_in - n/a */
357 	0xff,	/* atio_req_out - n/a */
358 	0xff,	/* io_base_addr - n/a */
359 	0xff,	/* nx_host_int - n/a */
360 	0xff	/* nx_risc_int - n/a */
361 };
362 
363 /* 2400/2500 register offsets */
364 reg_off_t reg_off_2400_2500 = {
365 	0x00,	/* flash_address */
366 	0x04,	/* flash_data */
367 	0x08,	/* ctrl_status */
368 	0x0c,	/* ictrl */
369 	0x10,	/* istatus */
370 	0xff,	/* semaphore - n/a */
371 	0xff,	/* nvram - n/a */
372 	0x1c,	/* req_in */
373 	0x20,	/* req_out */
374 	0x24,	/* resp_in */
375 	0x28,	/* resp_out */
376 	0x44,	/* risc2host */
377 	32,	/* Number of mailboxes */
378 
379 	/* Mailbox in register offsets 0 - 31 */
380 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
381 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
382 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
383 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
384 
385 	/* Mailbox out register offsets 0 - 31 */
386 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
387 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
388 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
389 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
390 
391 	0xff,	/* fpm_diag_config  - n/a */
392 	0xff,	/* pcr - n/a */
393 	0xff,	/* mctr - n/a */
394 	0xff,	/* fb_cmd - n/a */
395 	0x48,	/* hccr */
396 	0x4c,	/* gpiod */
397 	0x50,	/* gpioe */
398 	0xff,	/* host_to_host_sema - n/a */
399 	0x2c,	/* pri_req_in */
400 	0x30,	/* pri_req_out */
401 	0x3c,	/* atio_req_in */
402 	0x40,	/* atio_req_out */
403 	0x54,	/* io_base_addr */
404 	0xff,	/* nx_host_int - n/a */
405 	0xff	/* nx_risc_int - n/a */
406 };
407 
408 /* P3 register offsets */
409 static reg_off_t reg_off_8021 = {
410 	0x00,	/* flash_address */
411 	0x04,	/* flash_data */
412 	0x08,	/* ctrl_status */
413 	0x0c,	/* ictrl */
414 	0x10,	/* istatus */
415 	0xff,	/* semaphore - n/a */
416 	0xff,	/* nvram - n/a */
417 	0xff,	/* req_in - n/a */
418 	0x0,	/* req_out */
419 	0x100,	/* resp_in */
420 	0x200,	/* resp_out */
421 	0x500,	/* risc2host */
422 	32,	/* Number of mailboxes */
423 
424 	/* Mailbox in register offsets 0 - 31 */
425 	0x300, 0x302, 0x304, 0x306, 0x308, 0x30a, 0x30c, 0x30e,
426 	0x310, 0x312, 0x314, 0x316, 0x318, 0x31a, 0x31c, 0x31e,
427 	0x320, 0x322, 0x324, 0x326, 0x328, 0x32a, 0x32c, 0x32e,
428 	0x330, 0x332, 0x334, 0x336, 0x338, 0x33a, 0x33c, 0x33e,
429 
430 	/* Mailbox out register offsets 0 - 31 */
431 	0x400, 0x402, 0x404, 0x406, 0x408, 0x40a, 0x40c, 0x40e,
432 	0x410, 0x412, 0x414, 0x416, 0x418, 0x41a, 0x41c, 0x41e,
433 	0x420, 0x422, 0x424, 0x426, 0x428, 0x42a, 0x42c, 0x42e,
434 	0x430, 0x432, 0x434, 0x436, 0x438, 0x43a, 0x43c, 0x43e,
435 
436 	0xff,	/* fpm_diag_config  - n/a */
437 	0xff,	/* pcr - n/a */
438 	0xff,	/* mctr - n/a */
439 	0xff,	/* fb_cmd - n/a */
440 	0x48,	/* hccr */
441 	0x4c,	/* gpiod */
442 	0x50,	/* gpioe */
443 	0xff,	/* host_to_host_sema - n/a */
444 	0x2c,	/* pri_req_in */
445 	0x30,	/* pri_req_out */
446 	0x3c,	/* atio_req_in */
447 	0x40,	/* atio_req_out */
448 	0x54,	/* io_base_addr */
449 	0x380,	/* nx_host_int */
450 	0x504	/* nx_risc_int */
451 };
452 
453 /* mutex for protecting variables shared by all instances of the driver */
454 kmutex_t ql_global_mutex;
455 kmutex_t ql_global_hw_mutex;
456 kmutex_t ql_global_el_mutex;
457 
458 /* DMA access attribute structure. */
459 static ddi_device_acc_attr_t ql_dev_acc_attr = {
460 	DDI_DEVICE_ATTR_V0,
461 	DDI_STRUCTURE_LE_ACC,
462 	DDI_STRICTORDER_ACC
463 };
464 
465 /* I/O DMA attributes structures. */
466 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
467 	DMA_ATTR_V0,			/* dma_attr_version */
468 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
469 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
470 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
471 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
472 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
473 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
474 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
475 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
476 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
477 	QL_DMA_GRANULARITY,		/* granularity of device */
478 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
479 };
480 
481 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
482 	DMA_ATTR_V0,			/* dma_attr_version */
483 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
484 	QL_DMA_HIGH_32BIT_ADDRESS,	/* high DMA address range */
485 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
486 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
487 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
488 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
489 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
490 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
491 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
492 	QL_DMA_GRANULARITY,		/* granularity of device */
493 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
494 };
495 
496 /* Load the default dma attributes */
497 static	ddi_dma_attr_t	ql_32fcsm_cmd_dma_attr;
498 static	ddi_dma_attr_t	ql_64fcsm_cmd_dma_attr;
499 static	ddi_dma_attr_t	ql_32fcsm_rsp_dma_attr;
500 static	ddi_dma_attr_t	ql_64fcsm_rsp_dma_attr;
501 static	ddi_dma_attr_t	ql_32fcip_cmd_dma_attr;
502 static	ddi_dma_attr_t	ql_64fcip_cmd_dma_attr;
503 static	ddi_dma_attr_t	ql_32fcip_rsp_dma_attr;
504 static	ddi_dma_attr_t	ql_64fcip_rsp_dma_attr;
505 static	ddi_dma_attr_t	ql_32fcp_cmd_dma_attr;
506 static	ddi_dma_attr_t	ql_64fcp_cmd_dma_attr;
507 static	ddi_dma_attr_t	ql_32fcp_rsp_dma_attr;
508 static	ddi_dma_attr_t	ql_64fcp_rsp_dma_attr;
509 static	ddi_dma_attr_t	ql_32fcp_data_dma_attr;
510 static	ddi_dma_attr_t	ql_64fcp_data_dma_attr;
511 
512 /* Static declarations of cb_ops entry point functions... */
513 static struct cb_ops ql_cb_ops = {
514 	ql_open,			/* b/c open */
515 	ql_close,			/* b/c close */
516 	nodev,				/* b strategy */
517 	nodev,				/* b print */
518 	nodev,				/* b dump */
519 	nodev,				/* c read */
520 	nodev,				/* c write */
521 	ql_ioctl,			/* c ioctl */
522 	nodev,				/* c devmap */
523 	nodev,				/* c mmap */
524 	nodev,				/* c segmap */
525 	nochpoll,			/* c poll */
526 	nodev,				/* cb_prop_op */
527 	NULL,				/* streamtab  */
528 	D_MP | D_NEW | D_HOTPLUG,	/* Driver compatibility flag */
529 	CB_REV,				/* cb_ops revision */
530 	nodev,				/* c aread */
531 	nodev				/* c awrite */
532 };
533 
534 /* Static declarations of dev_ops entry point functions... */
535 static struct dev_ops ql_devops = {
536 	DEVO_REV,			/* devo_rev */
537 	0,				/* refcnt */
538 	ql_getinfo,			/* getinfo */
539 	nulldev,			/* identify */
540 	nulldev,			/* probe */
541 	ql_attach,			/* attach */
542 	ql_detach,			/* detach */
543 	nodev,				/* reset */
544 	&ql_cb_ops,			/* char/block ops */
545 	NULL,				/* bus operations */
546 	ql_power,			/* power management */
547 	ql_quiesce			/* quiesce device */
548 };
549 
550 /* ELS command code to text converter */
551 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
552 /* Mailbox command code to text converter */
553 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
554 
555 char qlc_driver_version[] = QL_VERSION;
556 
557 /*
558  * Loadable Driver Interface Structures.
559  * Declare and initialize the module configuration section...
560  */
561 static struct modldrv modldrv = {
562 	&mod_driverops,				/* type of module: driver */
563 	"SunFC Qlogic FCA v" QL_VERSION,	/* name of module */
564 	&ql_devops				/* driver dev_ops */
565 };
566 
567 static struct modlinkage modlinkage = {
568 	MODREV_1,
569 	&modldrv,
570 	NULL
571 };
572 
573 /* ************************************************************************ */
574 /*				Loadable Module Routines.		    */
575 /* ************************************************************************ */
576 
577 /*
578  * _init
579  *	Initializes a loadable module. It is called before any other
580  *	routine in a loadable module.
581  *
582  * Returns:
583  *	0 = success
584  *
585  * Context:
586  *	Kernel context.
587  */
588 int
589 _init(void)
590 {
591 	uint16_t	w16;
592 	int		rval = 0;
593 
594 	/* Get OS major release level. */
595 	for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
596 		if (utsname.release[w16] == '.') {
597 			w16++;
598 			break;
599 		}
600 	}
601 	if (w16 < sizeof (utsname.release)) {
602 		(void) ql_bstr_to_dec(&utsname.release[w16],
603 		    &ql_os_release_level, 0);
604 	} else {
605 		ql_os_release_level = 0;
606 	}
607 	if (ql_os_release_level < 6) {
608 		cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
609 		    QL_NAME, ql_os_release_level);
610 		rval = EINVAL;
611 	}
612 	if (ql_os_release_level == 6) {
613 		ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
614 		ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
615 	}
616 
617 	if (rval == 0) {
618 		rval = ddi_soft_state_init(&ql_state,
619 		    sizeof (ql_adapter_state_t), 0);
620 	}
621 	if (rval == 0) {
622 		/* allow the FC Transport to tweak the dev_ops */
623 		fc_fca_init(&ql_devops);
624 
625 		mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
626 		mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
627 		mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
628 		rval = mod_install(&modlinkage);
629 		if (rval != 0) {
630 			mutex_destroy(&ql_global_hw_mutex);
631 			mutex_destroy(&ql_global_mutex);
632 			mutex_destroy(&ql_global_el_mutex);
633 			ddi_soft_state_fini(&ql_state);
634 		} else {
635 			/*EMPTY*/
636 			ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
637 			ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
638 			ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
639 			ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
640 			ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
641 			ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
642 			ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
643 			ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
644 			ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
645 			ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
646 			ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
647 			ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
648 			ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
649 			ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
650 			ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
651 			    ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
652 			    QL_FCSM_CMD_SGLLEN;
653 			ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
654 			    ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
655 			    QL_FCSM_RSP_SGLLEN;
656 			ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
657 			    ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
658 			    QL_FCIP_CMD_SGLLEN;
659 			ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
660 			    ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
661 			    QL_FCIP_RSP_SGLLEN;
662 			ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
663 			    ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
664 			    QL_FCP_CMD_SGLLEN;
665 			ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
666 			    ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
667 			    QL_FCP_RSP_SGLLEN;
668 		}
669 	}
670 
671 	if (rval != 0) {
672 		cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
673 		    QL_NAME);
674 	}
675 
676 	return (rval);
677 }
678 
679 /*
680  * _fini
681  *	Prepares a module for unloading. It is called when the system
682  *	wants to unload a module. If the module determines that it can
683  *	be unloaded, then _fini() returns the value returned by
684  *	mod_remove(). Upon successful return from _fini() no other
685  *	routine in the module will be called before _init() is called.
686  *
687  * Returns:
688  *	0 = success
689  *
690  * Context:
691  *	Kernel context.
692  */
693 int
694 _fini(void)
695 {
696 	int	rval;
697 
698 	rval = mod_remove(&modlinkage);
699 	if (rval == 0) {
700 		mutex_destroy(&ql_global_hw_mutex);
701 		mutex_destroy(&ql_global_mutex);
702 		mutex_destroy(&ql_global_el_mutex);
703 		ddi_soft_state_fini(&ql_state);
704 	}
705 
706 	return (rval);
707 }
708 
709 /*
710  * _info
711  *	Returns information about loadable module.
712  *
713  * Input:
714  *	modinfo = pointer to module information structure.
715  *
716  * Returns:
717  *	Value returned by mod_info().
718  *
719  * Context:
720  *	Kernel context.
721  */
722 int
723 _info(struct modinfo *modinfop)
724 {
725 	return (mod_info(&modlinkage, modinfop));
726 }
727 
728 /* ************************************************************************ */
729 /*			dev_ops functions				    */
730 /* ************************************************************************ */
731 
732 /*
733  * ql_getinfo
734  *	Returns the pointer associated with arg when cmd is
735  *	set to DDI_INFO_DEVT2DEVINFO, or it should return the
736  *	instance number associated with arg when cmd is set
737  *	to DDI_INFO_DEV2INSTANCE.
738  *
739  * Input:
740  *	dip = Do not use.
741  *	cmd = command argument.
742  *	arg = command specific argument.
743  *	resultp = pointer to where request information is stored.
744  *
745  * Returns:
746  *	DDI_SUCCESS or DDI_FAILURE.
747  *
748  * Context:
749  *	Kernel context.
750  */
751 /* ARGSUSED */
752 static int
753 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
754 {
755 	ql_adapter_state_t	*ha;
756 	int			minor;
757 	int			rval = DDI_FAILURE;
758 
759 	minor = (int)(getminor((dev_t)arg));
760 	ha = ddi_get_soft_state(ql_state, minor);
761 	if (ha == NULL) {
762 		QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
763 		    getminor((dev_t)arg));
764 		*resultp = NULL;
765 		return (rval);
766 	}
767 
768 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
769 
770 	switch (cmd) {
771 	case DDI_INFO_DEVT2DEVINFO:
772 		*resultp = ha->dip;
773 		rval = DDI_SUCCESS;
774 		break;
775 	case DDI_INFO_DEVT2INSTANCE:
776 		*resultp = (void *)(uintptr_t)(ha->instance);
777 		rval = DDI_SUCCESS;
778 		break;
779 	default:
780 		EL(ha, "failed, unsupported cmd=%d\n", cmd);
781 		rval = DDI_FAILURE;
782 		break;
783 	}
784 
785 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
786 
787 	return (rval);
788 }
789 
790 /*
791  * ql_attach
792  *	Configure and attach an instance of the driver
793  *	for a port.
794  *
795  * Input:
796  *	dip = pointer to device information structure.
797  *	cmd = attach type.
798  *
799  * Returns:
800  *	DDI_SUCCESS or DDI_FAILURE.
801  *
802  * Context:
803  *	Kernel context.
804  */
805 static int
806 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
807 {
808 	off_t			regsize;
809 	uint32_t		size;
810 	int			rval, *ptr;
811 	int			instance;
812 	uint_t			progress = 0;
813 	char			*buf;
814 	ushort_t		caps_ptr, cap;
815 	fc_fca_tran_t		*tran;
816 	ql_adapter_state_t	*ha = NULL;
817 
818 	static char *pmcomps[] = {
819 		NULL,
820 		PM_LEVEL_D3_STR,		/* Device OFF */
821 		PM_LEVEL_D0_STR,		/* Device ON */
822 	};
823 
824 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
825 	    ddi_get_instance(dip), cmd);
826 
827 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
828 
829 	switch (cmd) {
830 	case DDI_ATTACH:
831 		/* first get the instance */
832 		instance = ddi_get_instance(dip);
833 
834 		cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
835 		    QL_NAME, instance, QL_VERSION);
836 
837 		/* Correct OS version? */
838 		if (ql_os_release_level != 11) {
839 			cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
840 			    "11", QL_NAME, instance);
841 			goto attach_failed;
842 		}
843 
844 		/* Hardware is installed in a DMA-capable slot? */
845 		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
846 			cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
847 			    instance);
848 			goto attach_failed;
849 		}
850 
851 		/* No support for high-level interrupts */
852 		if (ddi_intr_hilevel(dip, 0) != 0) {
853 			cmn_err(CE_WARN, "%s(%d): High level interrupt"
854 			    " not supported", QL_NAME, instance);
855 			goto attach_failed;
856 		}
857 
858 		/* Allocate our per-device-instance structure */
859 		if (ddi_soft_state_zalloc(ql_state,
860 		    instance) != DDI_SUCCESS) {
861 			cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
862 			    QL_NAME, instance);
863 			goto attach_failed;
864 		}
865 		progress |= QL_SOFT_STATE_ALLOCED;
866 
867 		ha = ddi_get_soft_state(ql_state, instance);
868 		if (ha == NULL) {
869 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
870 			    QL_NAME, instance);
871 			goto attach_failed;
872 		}
873 		ha->dip = dip;
874 		ha->instance = instance;
875 		ha->hba.base_address = ha;
876 		ha->pha = ha;
877 
878 		if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
879 			cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
880 			    QL_NAME, instance);
881 			goto attach_failed;
882 		}
883 
884 		/* Get extended logging and dump flags. */
885 		ql_common_properties(ha);
886 
887 		if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
888 		    "sbus") == 0) {
889 			EL(ha, "%s SBUS card detected", QL_NAME);
890 			ha->cfg_flags |= CFG_SBUS_CARD;
891 		}
892 
893 		ha->dev = kmem_zalloc(sizeof (*ha->dev) *
894 		    DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
895 
896 		ha->outstanding_cmds = kmem_zalloc(
897 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
898 		    KM_SLEEP);
899 
900 		ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
901 		    QL_UB_LIMIT, KM_SLEEP);
902 
903 		ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
904 		    KM_SLEEP);
905 
906 		(void) ddi_pathname(dip, buf);
907 		ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
908 		if (ha->devpath == NULL) {
909 			EL(ha, "devpath mem alloc failed\n");
910 		} else {
911 			(void) strcpy(ha->devpath, buf);
912 			EL(ha, "devpath is: %s\n", ha->devpath);
913 		}
914 
915 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
916 			/*
917 			 * For cards where PCI is mapped to sbus e.g. Ivory.
918 			 *
919 			 * 0x00	: 0x000 - 0x0FF PCI Config Space for 2200
920 			 *	: 0x100 - 0x3FF PCI IO space for 2200
921 			 * 0x01	: 0x000 - 0x0FF PCI Config Space for fpga
922 			 *	: 0x100 - 0x3FF PCI IO Space for fpga
923 			 */
924 			if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
925 			    0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle) !=
926 			    DDI_SUCCESS) {
927 				cmn_err(CE_WARN, "%s(%d): Unable to map device"
928 				    " registers", QL_NAME, instance);
929 				goto attach_failed;
930 			}
931 			if (ddi_regs_map_setup(dip, 1,
932 			    (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
933 			    &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle) !=
934 			    DDI_SUCCESS) {
935 				/* We should not fail attach here */
936 				cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
937 				    QL_NAME, instance);
938 				ha->sbus_fpga_iobase = NULL;
939 			}
940 			progress |= QL_REGS_MAPPED;
941 
942 			/*
943 			 * We should map config space before adding interrupt
944 			 * So that the chip type (2200 or 2300) can be
945 			 * determined before the interrupt routine gets a
946 			 * chance to execute.
947 			 */
948 			if (ddi_regs_map_setup(dip, 0,
949 			    (caddr_t *)&ha->sbus_config_base, 0, 0x100,
950 			    &ql_dev_acc_attr, &ha->sbus_config_handle) !=
951 			    DDI_SUCCESS) {
952 				cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
953 				    "config registers", QL_NAME, instance);
954 				goto attach_failed;
955 			}
956 			progress |= QL_CONFIG_SPACE_SETUP;
957 		} else {
958 			/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
959 			rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
960 			    DDI_PROP_DONTPASS, "reg", &ptr, &size);
961 			if (rval != DDI_PROP_SUCCESS) {
962 				cmn_err(CE_WARN, "%s(%d): Unable to get PCI "
963 				    "address registers", QL_NAME, instance);
964 				goto attach_failed;
965 			} else {
966 				ha->pci_bus_addr = ptr[0];
967 				ha->function_number = (uint8_t)
968 				    (ha->pci_bus_addr >> 8 & 7);
969 				ddi_prop_free(ptr);
970 			}
971 
972 			/*
973 			 * We should map config space before adding interrupt
974 			 * So that the chip type (2200 or 2300) can be
975 			 * determined before the interrupt routine gets a
976 			 * chance to execute.
977 			 */
978 			if (pci_config_setup(ha->dip, &ha->pci_handle) !=
979 			    DDI_SUCCESS) {
980 				cmn_err(CE_WARN, "%s(%d): can't setup PCI "
981 				    "config space", QL_NAME, instance);
982 				goto attach_failed;
983 			}
984 			progress |= QL_CONFIG_SPACE_SETUP;
985 
986 			/*
987 			 * Setup the ISP2200 registers address mapping to be
988 			 * accessed by this particular driver.
989 			 * 0x0   Configuration Space
990 			 * 0x1   I/O Space
991 			 * 0x2   32-bit Memory Space address
992 			 * 0x3   64-bit Memory Space address
993 			 */
994 			size = ql_pci_config_get32(ha, PCI_CONF_BASE0) & BIT_0 ?
995 			    2 : 1;
996 			if (ddi_dev_regsize(dip, size, &regsize) !=
997 			    DDI_SUCCESS ||
998 			    ddi_regs_map_setup(dip, size, &ha->iobase,
999 			    0, regsize, &ql_dev_acc_attr, &ha->dev_handle) !=
1000 			    DDI_SUCCESS) {
1001 				cmn_err(CE_WARN, "%s(%d): regs_map_setup(mem) "
1002 				    "failed", QL_NAME, instance);
1003 				goto attach_failed;
1004 			}
1005 			progress |= QL_REGS_MAPPED;
1006 
1007 			/*
1008 			 * We need I/O space mappings for 23xx HBAs for
1009 			 * loading flash (FCode). The chip has a bug due to
1010 			 * which loading flash fails through mem space
1011 			 * mappings in PCI-X mode.
1012 			 */
1013 			if (size == 1) {
1014 				ha->iomap_iobase = ha->iobase;
1015 				ha->iomap_dev_handle = ha->dev_handle;
1016 			} else {
1017 				if (ddi_dev_regsize(dip, 1, &regsize) !=
1018 				    DDI_SUCCESS ||
1019 				    ddi_regs_map_setup(dip, 1,
1020 				    &ha->iomap_iobase, 0, regsize,
1021 				    &ql_dev_acc_attr, &ha->iomap_dev_handle) !=
1022 				    DDI_SUCCESS) {
1023 					cmn_err(CE_WARN, "%s(%d): regs_map_"
1024 					    "setup(I/O) failed", QL_NAME,
1025 					    instance);
1026 					goto attach_failed;
1027 				}
1028 				progress |= QL_IOMAP_IOBASE_MAPPED;
1029 			}
1030 		}
1031 
1032 		ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
1033 		    PCI_CONF_SUBSYSID);
1034 		ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
1035 		    PCI_CONF_SUBVENID);
1036 		ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
1037 		    PCI_CONF_VENID);
1038 		ha->device_id = (uint16_t)ql_pci_config_get16(ha,
1039 		    PCI_CONF_DEVID);
1040 		ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
1041 		    PCI_CONF_REVID);
1042 
1043 		EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
1044 		    "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
1045 		    ha->subven_id, ha->subsys_id);
1046 
1047 		switch (ha->device_id) {
1048 		case 0x2300:
1049 		case 0x2312:
1050 #if !defined(__sparc) || defined(QL_DEBUG_ROUTINES)
1051 		/*
1052 		 * per marketing, fibre-lite HBA's are not supported
1053 		 * on sparc platforms
1054 		 */
1055 		case 0x6312:
1056 		case 0x6322:
1057 #endif	/* !defined(__sparc) || defined(QL_DEBUG_ROUTINES) */
1058 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1059 				ha->flags |= FUNCTION_1;
1060 			}
1061 			if (ha->device_id == 0x6322) {
1062 				ha->cfg_flags |= CFG_CTRL_6322;
1063 				ha->fw_class = 0x6322;
1064 				ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
1065 			} else {
1066 				ha->cfg_flags |= CFG_CTRL_2300;
1067 				ha->fw_class = 0x2300;
1068 				ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
1069 			}
1070 			ha->reg_off = &reg_off_2300;
1071 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1072 				goto attach_failed;
1073 			}
1074 			ha->fcp_cmd = ql_command_iocb;
1075 			ha->ip_cmd = ql_ip_iocb;
1076 			ha->ms_cmd = ql_ms_iocb;
1077 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1078 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1079 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1080 			} else {
1081 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1082 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1083 			}
1084 			break;
1085 
1086 		case 0x2200:
1087 			ha->cfg_flags |= CFG_CTRL_2200;
1088 			ha->reg_off = &reg_off_2200;
1089 			ha->fw_class = 0x2200;
1090 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1091 				goto attach_failed;
1092 			}
1093 			ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
1094 			ha->fcp_cmd = ql_command_iocb;
1095 			ha->ip_cmd = ql_ip_iocb;
1096 			ha->ms_cmd = ql_ms_iocb;
1097 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1098 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1099 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1100 			} else {
1101 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1102 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1103 			}
1104 			break;
1105 
1106 		case 0x2422:
1107 		case 0x2432:
1108 		case 0x5422:
1109 		case 0x5432:
1110 		case 0x8432:
1111 #ifdef __sparc
1112 			/*
1113 			 * Per marketing, the QLA/QLE-2440's (which
1114 			 * also use the 2422 & 2432) are only for the
1115 			 * x86 platform (SMB market).
1116 			 */
1117 			if (ha->subsys_id == 0x145 || ha->subsys_id == 0x147 ||
1118 			    ha->subsys_id == 0x13e) {
1119 				cmn_err(CE_WARN,
1120 				    "%s(%d): Unsupported HBA ssid: %x",
1121 				    QL_NAME, instance, ha->subsys_id);
1122 				goto attach_failed;
1123 			}
1124 #endif	/* __sparc */
1125 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1126 				ha->flags |= FUNCTION_1;
1127 			}
1128 			ha->cfg_flags |= CFG_CTRL_2422;
1129 			if (ha->device_id == 0x8432) {
1130 				ha->cfg_flags |= CFG_CTRL_MENLO;
1131 			} else {
1132 				ha->flags |= VP_ENABLED;
1133 			}
1134 
1135 			ha->reg_off = &reg_off_2400_2500;
1136 			ha->fw_class = 0x2400;
1137 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1138 				goto attach_failed;
1139 			}
1140 			ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
1141 			ha->fcp_cmd = ql_command_24xx_iocb;
1142 			ha->ip_cmd = ql_ip_24xx_iocb;
1143 			ha->ms_cmd = ql_ms_24xx_iocb;
1144 			ha->els_cmd = ql_els_24xx_iocb;
1145 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1146 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1147 			break;
1148 
1149 		case 0x2522:
1150 		case 0x2532:
1151 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1152 				ha->flags |= FUNCTION_1;
1153 			}
1154 			ha->cfg_flags |= CFG_CTRL_25XX;
1155 			ha->flags |= VP_ENABLED;
1156 			ha->fw_class = 0x2500;
1157 			ha->reg_off = &reg_off_2400_2500;
1158 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1159 				goto attach_failed;
1160 			}
1161 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1162 			ha->fcp_cmd = ql_command_24xx_iocb;
1163 			ha->ip_cmd = ql_ip_24xx_iocb;
1164 			ha->ms_cmd = ql_ms_24xx_iocb;
1165 			ha->els_cmd = ql_els_24xx_iocb;
1166 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1167 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1168 			break;
1169 
1170 		case 0x8001:
1171 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1172 				ha->flags |= FUNCTION_1;
1173 			}
1174 			ha->cfg_flags |= CFG_CTRL_81XX;
1175 			ha->flags |= VP_ENABLED;
1176 			ha->fw_class = 0x8100;
1177 			ha->reg_off = &reg_off_2400_2500;
1178 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1179 				goto attach_failed;
1180 			}
1181 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1182 			ha->fcp_cmd = ql_command_24xx_iocb;
1183 			ha->ip_cmd = ql_ip_24xx_iocb;
1184 			ha->ms_cmd = ql_ms_24xx_iocb;
1185 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1186 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1187 			break;
1188 
1189 		case 0x8021:
1190 			if (ha->function_number & BIT_0) {
1191 				ha->flags |= FUNCTION_1;
1192 			}
1193 			ha->cfg_flags |= CFG_CTRL_8021;
1194 			ha->reg_off = &reg_off_8021;
1195 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1196 			ha->fcp_cmd = ql_command_24xx_iocb;
1197 			ha->ms_cmd = ql_ms_24xx_iocb;
1198 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1199 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1200 
1201 			ha->nx_pcibase = ha->iobase;
1202 			ha->iobase += 0xBC000 + (ha->function_number << 11);
1203 			ha->iomap_iobase += 0xBC000 +
1204 			    (ha->function_number << 11);
1205 
1206 			/* map doorbell */
1207 			if (ddi_dev_regsize(dip, 2, &regsize) != DDI_SUCCESS ||
1208 			    ddi_regs_map_setup(dip, 2, &ha->db_iobase,
1209 			    0, regsize, &ql_dev_acc_attr, &ha->db_dev_handle) !=
1210 			    DDI_SUCCESS) {
1211 				cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1212 				    "(doorbell) failed", QL_NAME, instance);
1213 				goto attach_failed;
1214 			}
1215 			progress |= QL_DB_IOBASE_MAPPED;
1216 
1217 			ha->nx_req_in = (uint32_t *)(ha->db_iobase +
1218 			    (ha->function_number << 12));
1219 			ha->db_read = ha->nx_pcibase + (512 * 1024) +
1220 			    (ha->function_number * 8);
1221 
1222 			ql_8021_update_crb_int_ptr(ha);
1223 			ql_8021_set_drv_active(ha);
1224 			break;
1225 
1226 		default:
1227 			cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1228 			    QL_NAME, instance, ha->device_id);
1229 			goto attach_failed;
1230 		}
1231 
1232 		/* Setup hba buffer. */
1233 
1234 		size = CFG_IST(ha, CFG_CTRL_24258081) ?
1235 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1236 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1237 		    RCVBUF_QUEUE_SIZE);
1238 
1239 		if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1240 		    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1241 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1242 			    "alloc failed", QL_NAME, instance);
1243 			goto attach_failed;
1244 		}
1245 		progress |= QL_HBA_BUFFER_SETUP;
1246 
1247 		/* Setup buffer pointers. */
1248 		ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1249 		    REQUEST_Q_BUFFER_OFFSET;
1250 		ha->request_ring_bp = (struct cmd_entry *)
1251 		    ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1252 
1253 		ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1254 		    RESPONSE_Q_BUFFER_OFFSET;
1255 		ha->response_ring_bp = (struct sts_entry *)
1256 		    ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1257 
1258 		ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1259 		    RCVBUF_Q_BUFFER_OFFSET;
1260 		ha->rcvbuf_ring_bp = (struct rcvbuf *)
1261 		    ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1262 
1263 		/* Allocate resource for QLogic IOCTL */
1264 		(void) ql_alloc_xioctl_resource(ha);
1265 
1266 		/* Setup interrupts */
1267 		if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1268 			cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1269 			    "rval=%xh", QL_NAME, instance, rval);
1270 			goto attach_failed;
1271 		}
1272 
1273 		progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1274 
1275 		if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1276 			cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1277 			    QL_NAME, instance);
1278 			goto attach_failed;
1279 		}
1280 
1281 		/*
1282 		 * Allocate an N Port information structure
1283 		 * for use when in P2P topology.
1284 		 */
1285 		ha->n_port = (ql_n_port_info_t *)
1286 		    kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1287 		if (ha->n_port == NULL) {
1288 			cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1289 			    QL_NAME, instance);
1290 			goto attach_failed;
1291 		}
1292 
1293 		progress |= QL_N_PORT_INFO_CREATED;
1294 
1295 		/*
1296 		 * Determine support for Power Management
1297 		 */
1298 		caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1299 
1300 		while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1301 			cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1302 			if (cap == PCI_CAP_ID_PM) {
1303 				ha->pm_capable = 1;
1304 				break;
1305 			}
1306 			caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1307 			    PCI_CAP_NEXT_PTR);
1308 		}
1309 
1310 		if (ha->pm_capable) {
1311 			/*
1312 			 * Enable PM for 2200 based HBAs only.
1313 			 */
1314 			if (ha->device_id != 0x2200) {
1315 				ha->pm_capable = 0;
1316 			}
1317 		}
1318 
1319 		if (ha->pm_capable) {
1320 			ha->pm_capable = ql_enable_pm;
1321 		}
1322 
1323 		if (ha->pm_capable) {
1324 			/*
1325 			 * Initialize power management bookkeeping;
1326 			 * components are created idle.
1327 			 */
1328 			(void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1329 			pmcomps[0] = buf;
1330 
1331 			/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1332 			if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1333 			    dip, "pm-components", pmcomps,
1334 			    sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1335 			    DDI_PROP_SUCCESS) {
1336 				cmn_err(CE_WARN, "%s(%d): failed to create"
1337 				    " pm-components property", QL_NAME,
1338 				    instance);
1339 
1340 				/* Initialize adapter. */
1341 				ha->power_level = PM_LEVEL_D0;
1342 				if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1343 					cmn_err(CE_WARN, "%s(%d): failed to"
1344 					    " initialize adapter", QL_NAME,
1345 					    instance);
1346 					goto attach_failed;
1347 				}
1348 			} else {
1349 				ha->power_level = PM_LEVEL_D3;
1350 				if (pm_raise_power(dip, QL_POWER_COMPONENT,
1351 				    PM_LEVEL_D0) != DDI_SUCCESS) {
1352 					cmn_err(CE_WARN, "%s(%d): failed to"
1353 					    " raise power or initialize"
1354 					    " adapter", QL_NAME, instance);
1355 				}
1356 			}
1357 		} else {
1358 			/* Initialize adapter. */
1359 			ha->power_level = PM_LEVEL_D0;
1360 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1361 				cmn_err(CE_WARN, "%s(%d): failed to initialize"
1362 				    " adapter", QL_NAME, instance);
1363 			}
1364 		}
1365 
1366 		if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1367 		    ha->fw_subminor_version == 0) {
1368 			cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1369 			    QL_NAME, ha->instance);
1370 		} else {
1371 			int	rval;
1372 			char	ver_fmt[256];
1373 
1374 			rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1375 			    "Firmware version %d.%d.%d", ha->fw_major_version,
1376 			    ha->fw_minor_version, ha->fw_subminor_version);
1377 
1378 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
1379 				rval = (int)snprintf(ver_fmt + rval,
1380 				    (size_t)sizeof (ver_fmt),
1381 				    ", MPI fw version %d.%d.%d",
1382 				    ha->mpi_fw_major_version,
1383 				    ha->mpi_fw_minor_version,
1384 				    ha->mpi_fw_subminor_version);
1385 
1386 				if (ha->subsys_id == 0x17B ||
1387 				    ha->subsys_id == 0x17D) {
1388 					(void) snprintf(ver_fmt + rval,
1389 					    (size_t)sizeof (ver_fmt),
1390 					    ", PHY fw version %d.%d.%d",
1391 					    ha->phy_fw_major_version,
1392 					    ha->phy_fw_minor_version,
1393 					    ha->phy_fw_subminor_version);
1394 				}
1395 			}
1396 			cmn_err(CE_NOTE, "!%s(%d): %s",
1397 			    QL_NAME, ha->instance, ver_fmt);
1398 		}
1399 
1400 		ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1401 		    "controller", KSTAT_TYPE_RAW,
1402 		    (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1403 		if (ha->k_stats == NULL) {
1404 			cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1405 			    QL_NAME, instance);
1406 			goto attach_failed;
1407 		}
1408 		progress |= QL_KSTAT_CREATED;
1409 
1410 		ha->adapter_stats->version = 1;
1411 		ha->k_stats->ks_data = (void *)ha->adapter_stats;
1412 		ha->k_stats->ks_private = ha;
1413 		ha->k_stats->ks_update = ql_kstat_update;
1414 		ha->k_stats->ks_ndata = 1;
1415 		ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1416 		kstat_install(ha->k_stats);
1417 
1418 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1419 		    instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1420 			cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1421 			    QL_NAME, instance);
1422 			goto attach_failed;
1423 		}
1424 		progress |= QL_MINOR_NODE_CREATED;
1425 
1426 		/* Allocate a transport structure for this instance */
1427 		tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1428 		if (tran == NULL) {
1429 			cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1430 			    QL_NAME, instance);
1431 			goto attach_failed;
1432 		}
1433 
1434 		progress |= QL_FCA_TRAN_ALLOCED;
1435 
1436 		/* fill in the structure */
1437 		tran->fca_numports = 1;
1438 		tran->fca_version = FCTL_FCA_MODREV_5;
1439 		if (CFG_IST(ha, CFG_CTRL_2422)) {
1440 			tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1441 		} else if (CFG_IST(ha, CFG_CTRL_2581)) {
1442 			tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1443 		}
1444 		bcopy(ha->loginparams.node_ww_name.raw_wwn,
1445 		    tran->fca_perm_pwwn.raw_wwn, 8);
1446 
1447 		EL(ha, "FCA version %d\n", tran->fca_version);
1448 
1449 		/* Specify the amount of space needed in each packet */
1450 		tran->fca_pkt_size = sizeof (ql_srb_t);
1451 
1452 		/* command limits are usually dictated by hardware */
1453 		tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1454 
1455 		/* dmaattr are static, set elsewhere. */
1456 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1457 			tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1458 			tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1459 			tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1460 			tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1461 			tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1462 			tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1463 			tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1464 			tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1465 		} else {
1466 			tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1467 			tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1468 			tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1469 			tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1470 			tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1471 			tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1472 			tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1473 			tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1474 		}
1475 
1476 		tran->fca_acc_attr = &ql_dev_acc_attr;
1477 		tran->fca_iblock = &(ha->iblock_cookie);
1478 
1479 		/* the remaining values are simply function vectors */
1480 		tran->fca_bind_port = ql_bind_port;
1481 		tran->fca_unbind_port = ql_unbind_port;
1482 		tran->fca_init_pkt = ql_init_pkt;
1483 		tran->fca_un_init_pkt = ql_un_init_pkt;
1484 		tran->fca_els_send = ql_els_send;
1485 		tran->fca_get_cap = ql_get_cap;
1486 		tran->fca_set_cap = ql_set_cap;
1487 		tran->fca_getmap = ql_getmap;
1488 		tran->fca_transport = ql_transport;
1489 		tran->fca_ub_alloc = ql_ub_alloc;
1490 		tran->fca_ub_free = ql_ub_free;
1491 		tran->fca_ub_release = ql_ub_release;
1492 		tran->fca_abort = ql_abort;
1493 		tran->fca_reset = ql_reset;
1494 		tran->fca_port_manage = ql_port_manage;
1495 		tran->fca_get_device = ql_get_device;
1496 
1497 		/* give it to the FC transport */
1498 		if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1499 			cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1500 			    instance);
1501 			goto attach_failed;
1502 		}
1503 		progress |= QL_FCA_ATTACH_DONE;
1504 
1505 		/* Stash the structure so it can be freed at detach */
1506 		ha->tran = tran;
1507 
1508 		/* Acquire global state lock. */
1509 		GLOBAL_STATE_LOCK();
1510 
1511 		/* Add adapter structure to link list. */
1512 		ql_add_link_b(&ql_hba, &ha->hba);
1513 
1514 		/* Start one second driver timer. */
1515 		if (ql_timer_timeout_id == NULL) {
1516 			ql_timer_ticks = drv_usectohz(1000000);
1517 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1518 			    ql_timer_ticks);
1519 		}
1520 
1521 		/* Release global state lock. */
1522 		GLOBAL_STATE_UNLOCK();
1523 
1524 		/* Determine and populate HBA fru info */
1525 		ql_setup_fruinfo(ha);
1526 
1527 		/* Setup task_daemon thread. */
1528 		(void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1529 		    0, &p0, TS_RUN, minclsyspri);
1530 
1531 		progress |= QL_TASK_DAEMON_STARTED;
1532 
1533 		ddi_report_dev(dip);
1534 
1535 		/* Disable link reset in panic path */
1536 		ha->lip_on_panic = 1;
1537 
1538 		rval = DDI_SUCCESS;
1539 		break;
1540 
1541 attach_failed:
1542 		if (progress & QL_FCA_ATTACH_DONE) {
1543 			(void) fc_fca_detach(dip);
1544 			progress &= ~QL_FCA_ATTACH_DONE;
1545 		}
1546 
1547 		if (progress & QL_FCA_TRAN_ALLOCED) {
1548 			kmem_free(tran, sizeof (fc_fca_tran_t));
1549 			progress &= ~QL_FCA_TRAN_ALLOCED;
1550 		}
1551 
1552 		if (progress & QL_MINOR_NODE_CREATED) {
1553 			ddi_remove_minor_node(dip, "devctl");
1554 			progress &= ~QL_MINOR_NODE_CREATED;
1555 		}
1556 
1557 		if (progress & QL_KSTAT_CREATED) {
1558 			kstat_delete(ha->k_stats);
1559 			progress &= ~QL_KSTAT_CREATED;
1560 		}
1561 
1562 		if (progress & QL_N_PORT_INFO_CREATED) {
1563 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1564 			progress &= ~QL_N_PORT_INFO_CREATED;
1565 		}
1566 
1567 		if (progress & QL_TASK_DAEMON_STARTED) {
1568 			TASK_DAEMON_LOCK(ha);
1569 
1570 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1571 
1572 			cv_signal(&ha->cv_task_daemon);
1573 
1574 			/* Release task daemon lock. */
1575 			TASK_DAEMON_UNLOCK(ha);
1576 
1577 			/* Wait for for task daemon to stop running. */
1578 			while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1579 				ql_delay(ha, 10000);
1580 			}
1581 			progress &= ~QL_TASK_DAEMON_STARTED;
1582 		}
1583 
1584 		if (progress & QL_DB_IOBASE_MAPPED) {
1585 			ql_8021_clr_drv_active(ha);
1586 			ddi_regs_map_free(&ha->db_dev_handle);
1587 			progress &= ~QL_DB_IOBASE_MAPPED;
1588 		}
1589 		if (progress & QL_IOMAP_IOBASE_MAPPED) {
1590 			ddi_regs_map_free(&ha->iomap_dev_handle);
1591 			progress &= ~QL_IOMAP_IOBASE_MAPPED;
1592 		}
1593 
1594 		if (progress & QL_CONFIG_SPACE_SETUP) {
1595 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1596 				ddi_regs_map_free(&ha->sbus_config_handle);
1597 			} else {
1598 				pci_config_teardown(&ha->pci_handle);
1599 			}
1600 			progress &= ~QL_CONFIG_SPACE_SETUP;
1601 		}
1602 
1603 		if (progress & QL_INTR_ADDED) {
1604 			ql_disable_intr(ha);
1605 			ql_release_intr(ha);
1606 			progress &= ~QL_INTR_ADDED;
1607 		}
1608 
1609 		if (progress & QL_MUTEX_CV_INITED) {
1610 			ql_destroy_mutex(ha);
1611 			progress &= ~QL_MUTEX_CV_INITED;
1612 		}
1613 
1614 		if (progress & QL_HBA_BUFFER_SETUP) {
1615 			ql_free_phys(ha, &ha->hba_buf);
1616 			progress &= ~QL_HBA_BUFFER_SETUP;
1617 		}
1618 
1619 		if (progress & QL_REGS_MAPPED) {
1620 			ddi_regs_map_free(&ha->dev_handle);
1621 			if (ha->sbus_fpga_iobase != NULL) {
1622 				ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1623 			}
1624 			progress &= ~QL_REGS_MAPPED;
1625 		}
1626 
1627 		if (progress & QL_SOFT_STATE_ALLOCED) {
1628 
1629 			ql_fcache_rel(ha->fcache);
1630 
1631 			kmem_free(ha->adapter_stats,
1632 			    sizeof (*ha->adapter_stats));
1633 
1634 			kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1635 			    QL_UB_LIMIT);
1636 
1637 			kmem_free(ha->outstanding_cmds,
1638 			    sizeof (*ha->outstanding_cmds) *
1639 			    MAX_OUTSTANDING_COMMANDS);
1640 
1641 			if (ha->devpath != NULL) {
1642 				kmem_free(ha->devpath,
1643 				    strlen(ha->devpath) + 1);
1644 			}
1645 
1646 			kmem_free(ha->dev, sizeof (*ha->dev) *
1647 			    DEVICE_HEAD_LIST_SIZE);
1648 
1649 			if (ha->xioctl != NULL) {
1650 				ql_free_xioctl_resource(ha);
1651 			}
1652 
1653 			if (ha->fw_module != NULL) {
1654 				(void) ddi_modclose(ha->fw_module);
1655 			}
1656 			(void) ql_el_trace_desc_dtor(ha);
1657 			(void) ql_nvram_cache_desc_dtor(ha);
1658 
1659 			ddi_soft_state_free(ql_state, instance);
1660 			progress &= ~QL_SOFT_STATE_ALLOCED;
1661 		}
1662 
1663 		ddi_prop_remove_all(dip);
1664 		rval = DDI_FAILURE;
1665 		break;
1666 
1667 	case DDI_RESUME:
1668 		rval = DDI_FAILURE;
1669 
1670 		ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1671 		if (ha == NULL) {
1672 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
1673 			    QL_NAME, instance);
1674 			break;
1675 		}
1676 
1677 		ha->power_level = PM_LEVEL_D3;
1678 		if (ha->pm_capable) {
1679 			/*
1680 			 * Get ql_power to do power on initialization
1681 			 */
1682 			if (pm_raise_power(dip, QL_POWER_COMPONENT,
1683 			    PM_LEVEL_D0) != DDI_SUCCESS) {
1684 				cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1685 				    " power", QL_NAME, instance);
1686 			}
1687 		}
1688 
1689 		/*
1690 		 * There is a bug in DR that prevents PM framework
1691 		 * from calling ql_power.
1692 		 */
1693 		if (ha->power_level == PM_LEVEL_D3) {
1694 			ha->power_level = PM_LEVEL_D0;
1695 
1696 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1697 				cmn_err(CE_WARN, "%s(%d): can't initialize the"
1698 				    " adapter", QL_NAME, instance);
1699 			}
1700 
1701 			/* Wake up task_daemon. */
1702 			ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1703 			    0);
1704 		}
1705 
1706 		/* Acquire global state lock. */
1707 		GLOBAL_STATE_LOCK();
1708 
1709 		/* Restart driver timer. */
1710 		if (ql_timer_timeout_id == NULL) {
1711 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1712 			    ql_timer_ticks);
1713 		}
1714 
1715 		/* Release global state lock. */
1716 		GLOBAL_STATE_UNLOCK();
1717 
1718 		/* Wake up command start routine. */
1719 		ADAPTER_STATE_LOCK(ha);
1720 		ha->flags &= ~ADAPTER_SUSPENDED;
1721 		ADAPTER_STATE_UNLOCK(ha);
1722 
1723 		/*
1724 		 * Transport doesn't make FC discovery in polled
1725 		 * mode; So we need the daemon thread's services
1726 		 * right here.
1727 		 */
1728 		(void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1729 
1730 		rval = DDI_SUCCESS;
1731 
1732 		/* Restart IP if it was running. */
1733 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1734 			(void) ql_initialize_ip(ha);
1735 			ql_isp_rcvbuf(ha);
1736 		}
1737 		break;
1738 
1739 	default:
1740 		cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1741 		    " %x", QL_NAME, ddi_get_instance(dip), cmd);
1742 		rval = DDI_FAILURE;
1743 		break;
1744 	}
1745 
1746 	kmem_free(buf, MAXPATHLEN);
1747 
1748 	if (rval != DDI_SUCCESS) {
1749 		/*EMPTY*/
1750 		QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1751 		    ddi_get_instance(dip), rval);
1752 	} else {
1753 		/*EMPTY*/
1754 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1755 	}
1756 
1757 	return (rval);
1758 }
1759 
1760 /*
1761  * ql_detach
1762  *	Used to remove all the states associated with a given
1763  *	instances of a device node prior to the removal of that
1764  *	instance from the system.
1765  *
1766  * Input:
1767  *	dip = pointer to device information structure.
1768  *	cmd = type of detach.
1769  *
1770  * Returns:
1771  *	DDI_SUCCESS or DDI_FAILURE.
1772  *
1773  * Context:
1774  *	Kernel context.
1775  */
1776 static int
1777 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1778 {
1779 	ql_adapter_state_t	*ha, *vha;
1780 	ql_tgt_t		*tq;
1781 	int			delay_cnt;
1782 	uint16_t		index;
1783 	ql_link_t		*link;
1784 	char			*buf;
1785 	timeout_id_t		timer_id = NULL;
1786 	int			suspend, rval = DDI_SUCCESS;
1787 
1788 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1789 	if (ha == NULL) {
1790 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1791 		    ddi_get_instance(dip));
1792 		return (DDI_FAILURE);
1793 	}
1794 
1795 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1796 
1797 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1798 
1799 	switch (cmd) {
1800 	case DDI_DETACH:
1801 		ADAPTER_STATE_LOCK(ha);
1802 		ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1803 		ADAPTER_STATE_UNLOCK(ha);
1804 
1805 		TASK_DAEMON_LOCK(ha);
1806 
1807 		if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
1808 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1809 			cv_signal(&ha->cv_task_daemon);
1810 
1811 			TASK_DAEMON_UNLOCK(ha);
1812 
1813 			(void) ql_wait_for_td_stop(ha);
1814 
1815 			TASK_DAEMON_LOCK(ha);
1816 			if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1817 				ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1818 				EL(ha, "failed, could not stop task daemon\n");
1819 			}
1820 		}
1821 		TASK_DAEMON_UNLOCK(ha);
1822 
1823 		GLOBAL_STATE_LOCK();
1824 
1825 		/* Disable driver timer if no adapters. */
1826 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1827 		    ql_hba.last == &ha->hba) {
1828 			timer_id = ql_timer_timeout_id;
1829 			ql_timer_timeout_id = NULL;
1830 		}
1831 		ql_remove_link(&ql_hba, &ha->hba);
1832 
1833 		GLOBAL_STATE_UNLOCK();
1834 
1835 		if (timer_id) {
1836 			(void) untimeout(timer_id);
1837 		}
1838 
1839 		if (ha->pm_capable) {
1840 			if (pm_lower_power(dip, QL_POWER_COMPONENT,
1841 			    PM_LEVEL_D3) != DDI_SUCCESS) {
1842 				cmn_err(CE_WARN, "%s(%d): failed to lower the"
1843 				    " power", QL_NAME, ha->instance);
1844 			}
1845 		}
1846 
1847 		/*
1848 		 * If pm_lower_power shutdown the adapter, there
1849 		 * isn't much else to do
1850 		 */
1851 		if (ha->power_level != PM_LEVEL_D3) {
1852 			ql_halt(ha, PM_LEVEL_D3);
1853 		}
1854 
1855 		/* Remove virtual ports. */
1856 		while ((vha = ha->vp_next) != NULL) {
1857 			ql_vport_destroy(vha);
1858 		}
1859 
1860 		/* Free target queues. */
1861 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1862 			link = ha->dev[index].first;
1863 			while (link != NULL) {
1864 				tq = link->base_address;
1865 				link = link->next;
1866 				ql_dev_free(ha, tq);
1867 			}
1868 		}
1869 
1870 		/*
1871 		 * Free unsolicited buffers.
1872 		 * If we are here then there are no ULPs still
1873 		 * alive that wish to talk to ql so free up
1874 		 * any SRB_IP_UB_UNUSED buffers that are
1875 		 * lingering around
1876 		 */
1877 		QL_UB_LOCK(ha);
1878 		for (index = 0; index < QL_UB_LIMIT; index++) {
1879 			fc_unsol_buf_t *ubp = ha->ub_array[index];
1880 
1881 			if (ubp != NULL) {
1882 				ql_srb_t *sp = ubp->ub_fca_private;
1883 
1884 				sp->flags |= SRB_UB_FREE_REQUESTED;
1885 
1886 				while (!(sp->flags & SRB_UB_IN_FCA) ||
1887 				    (sp->flags & (SRB_UB_CALLBACK |
1888 				    SRB_UB_ACQUIRED))) {
1889 					QL_UB_UNLOCK(ha);
1890 					delay(drv_usectohz(100000));
1891 					QL_UB_LOCK(ha);
1892 				}
1893 				ha->ub_array[index] = NULL;
1894 
1895 				QL_UB_UNLOCK(ha);
1896 				ql_free_unsolicited_buffer(ha, ubp);
1897 				QL_UB_LOCK(ha);
1898 			}
1899 		}
1900 		QL_UB_UNLOCK(ha);
1901 
1902 		/* Free any saved RISC code. */
1903 		if (ha->risc_code != NULL) {
1904 			kmem_free(ha->risc_code, ha->risc_code_size);
1905 			ha->risc_code = NULL;
1906 			ha->risc_code_size = 0;
1907 		}
1908 
1909 		if (ha->fw_module != NULL) {
1910 			(void) ddi_modclose(ha->fw_module);
1911 			ha->fw_module = NULL;
1912 		}
1913 
1914 		/* Free resources. */
1915 		ddi_prop_remove_all(dip);
1916 		(void) fc_fca_detach(dip);
1917 		kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1918 		ddi_remove_minor_node(dip, "devctl");
1919 		if (ha->k_stats != NULL) {
1920 			kstat_delete(ha->k_stats);
1921 		}
1922 
1923 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
1924 			ddi_regs_map_free(&ha->sbus_config_handle);
1925 		} else {
1926 			if (CFG_IST(ha, CFG_CTRL_8021)) {
1927 				ql_8021_clr_drv_active(ha);
1928 				ddi_regs_map_free(&ha->db_dev_handle);
1929 			}
1930 			if (ha->iomap_dev_handle != ha->dev_handle) {
1931 				ddi_regs_map_free(&ha->iomap_dev_handle);
1932 			}
1933 			pci_config_teardown(&ha->pci_handle);
1934 		}
1935 
1936 		ql_disable_intr(ha);
1937 		ql_release_intr(ha);
1938 
1939 		ql_free_xioctl_resource(ha);
1940 
1941 		ql_destroy_mutex(ha);
1942 
1943 		ql_free_phys(ha, &ha->hba_buf);
1944 		ql_free_phys(ha, &ha->fwexttracebuf);
1945 		ql_free_phys(ha, &ha->fwfcetracebuf);
1946 
1947 		ddi_regs_map_free(&ha->dev_handle);
1948 		if (ha->sbus_fpga_iobase != NULL) {
1949 			ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1950 		}
1951 
1952 		ql_fcache_rel(ha->fcache);
1953 		if (ha->vcache != NULL) {
1954 			kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1955 		}
1956 
1957 		if (ha->pi_attrs != NULL) {
1958 			kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1959 		}
1960 
1961 		kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1962 
1963 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1964 
1965 		kmem_free(ha->outstanding_cmds,
1966 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1967 
1968 		if (ha->n_port != NULL) {
1969 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1970 		}
1971 
1972 		if (ha->devpath != NULL) {
1973 			kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1974 		}
1975 
1976 		kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1977 
1978 		EL(ha, "detached\n");
1979 
1980 		ddi_soft_state_free(ql_state, (int)ha->instance);
1981 
1982 		break;
1983 
1984 	case DDI_SUSPEND:
1985 		ADAPTER_STATE_LOCK(ha);
1986 
1987 		delay_cnt = 0;
1988 		ha->flags |= ADAPTER_SUSPENDED;
1989 		while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) {
1990 			ADAPTER_STATE_UNLOCK(ha);
1991 			delay(drv_usectohz(1000000));
1992 			ADAPTER_STATE_LOCK(ha);
1993 		}
1994 		if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1995 			ha->flags &= ~ADAPTER_SUSPENDED;
1996 			ADAPTER_STATE_UNLOCK(ha);
1997 			rval = DDI_FAILURE;
1998 			cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1999 			    " busy %xh flags %xh", QL_NAME, ha->instance,
2000 			    ha->busy, ha->flags);
2001 			break;
2002 		}
2003 
2004 		ADAPTER_STATE_UNLOCK(ha);
2005 
2006 		if (ha->flags & IP_INITIALIZED) {
2007 			(void) ql_shutdown_ip(ha);
2008 		}
2009 
2010 		if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
2011 			ADAPTER_STATE_LOCK(ha);
2012 			ha->flags &= ~ADAPTER_SUSPENDED;
2013 			ADAPTER_STATE_UNLOCK(ha);
2014 			cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
2015 			    QL_NAME, ha->instance, suspend);
2016 
2017 			/* Restart IP if it was running. */
2018 			if (ha->flags & IP_ENABLED &&
2019 			    !(ha->flags & IP_INITIALIZED)) {
2020 				(void) ql_initialize_ip(ha);
2021 				ql_isp_rcvbuf(ha);
2022 			}
2023 			rval = DDI_FAILURE;
2024 			break;
2025 		}
2026 
2027 		/* Acquire global state lock. */
2028 		GLOBAL_STATE_LOCK();
2029 
2030 		/* Disable driver timer if last adapter. */
2031 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2032 		    ql_hba.last == &ha->hba) {
2033 			timer_id = ql_timer_timeout_id;
2034 			ql_timer_timeout_id = NULL;
2035 		}
2036 		GLOBAL_STATE_UNLOCK();
2037 
2038 		if (timer_id) {
2039 			(void) untimeout(timer_id);
2040 		}
2041 
2042 		EL(ha, "suspended\n");
2043 
2044 		break;
2045 
2046 	default:
2047 		rval = DDI_FAILURE;
2048 		break;
2049 	}
2050 
2051 	kmem_free(buf, MAXPATHLEN);
2052 
2053 	if (rval != DDI_SUCCESS) {
2054 		if (ha != NULL) {
2055 			EL(ha, "failed, rval = %xh\n", rval);
2056 		} else {
2057 			/*EMPTY*/
2058 			QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
2059 			    ddi_get_instance(dip), rval);
2060 		}
2061 	} else {
2062 		/*EMPTY*/
2063 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
2064 	}
2065 
2066 	return (rval);
2067 }
2068 
2069 
2070 /*
2071  * ql_power
2072  *	Power a device attached to the system.
2073  *
2074  * Input:
2075  *	dip = pointer to device information structure.
2076  *	component = device.
2077  *	level = power level.
2078  *
2079  * Returns:
2080  *	DDI_SUCCESS or DDI_FAILURE.
2081  *
2082  * Context:
2083  *	Kernel context.
2084  */
2085 /* ARGSUSED */
2086 static int
2087 ql_power(dev_info_t *dip, int component, int level)
2088 {
2089 	int			rval = DDI_FAILURE;
2090 	off_t			csr;
2091 	uint8_t			saved_pm_val;
2092 	ql_adapter_state_t	*ha;
2093 	char			*buf;
2094 	char			*path;
2095 
2096 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2097 	if (ha == NULL || ha->pm_capable == 0) {
2098 		QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
2099 		    ddi_get_instance(dip));
2100 		return (rval);
2101 	}
2102 
2103 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2104 
2105 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2106 	path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2107 
2108 	if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
2109 	    level != PM_LEVEL_D3)) {
2110 		EL(ha, "invalid, component=%xh or level=%xh\n",
2111 		    component, level);
2112 		return (rval);
2113 	}
2114 
2115 	GLOBAL_HW_LOCK();
2116 	csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
2117 	GLOBAL_HW_UNLOCK();
2118 
2119 	(void) snprintf(buf, sizeof (buf),
2120 	    "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
2121 	    ddi_pathname(dip, path));
2122 
2123 	switch (level) {
2124 	case PM_LEVEL_D0:	/* power up to D0 state - fully on */
2125 
2126 		QL_PM_LOCK(ha);
2127 		if (ha->power_level == PM_LEVEL_D0) {
2128 			QL_PM_UNLOCK(ha);
2129 			rval = DDI_SUCCESS;
2130 			break;
2131 		}
2132 
2133 		/*
2134 		 * Enable interrupts now
2135 		 */
2136 		saved_pm_val = ha->power_level;
2137 		ha->power_level = PM_LEVEL_D0;
2138 		QL_PM_UNLOCK(ha);
2139 
2140 		GLOBAL_HW_LOCK();
2141 
2142 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
2143 
2144 		/*
2145 		 * Delay after reset, for chip to recover.
2146 		 * Otherwise causes system PANIC
2147 		 */
2148 		drv_usecwait(200000);
2149 
2150 		GLOBAL_HW_UNLOCK();
2151 
2152 		if (ha->config_saved) {
2153 			ha->config_saved = 0;
2154 			if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2155 				QL_PM_LOCK(ha);
2156 				ha->power_level = saved_pm_val;
2157 				QL_PM_UNLOCK(ha);
2158 				cmn_err(CE_WARN, "%s failed to restore "
2159 				    "config regs", buf);
2160 				break;
2161 			}
2162 		}
2163 
2164 		if (ql_initialize_adapter(ha) != QL_SUCCESS) {
2165 			cmn_err(CE_WARN, "%s adapter initialization failed",
2166 			    buf);
2167 		}
2168 
2169 		/* Wake up task_daemon. */
2170 		ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
2171 		    TASK_DAEMON_SLEEPING_FLG, 0);
2172 
2173 		/* Restart IP if it was running. */
2174 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
2175 			(void) ql_initialize_ip(ha);
2176 			ql_isp_rcvbuf(ha);
2177 		}
2178 
2179 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
2180 		    ha->instance, QL_NAME);
2181 
2182 		rval = DDI_SUCCESS;
2183 		break;
2184 
2185 	case PM_LEVEL_D3:	/* power down to D3 state - off */
2186 
2187 		QL_PM_LOCK(ha);
2188 
2189 		if (ha->busy || ((ha->task_daemon_flags &
2190 		    TASK_DAEMON_SLEEPING_FLG) == 0)) {
2191 			QL_PM_UNLOCK(ha);
2192 			break;
2193 		}
2194 
2195 		if (ha->power_level == PM_LEVEL_D3) {
2196 			rval = DDI_SUCCESS;
2197 			QL_PM_UNLOCK(ha);
2198 			break;
2199 		}
2200 		QL_PM_UNLOCK(ha);
2201 
2202 		if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2203 			cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2204 			    " config regs", QL_NAME, ha->instance, buf);
2205 			break;
2206 		}
2207 		ha->config_saved = 1;
2208 
2209 		/*
2210 		 * Don't enable interrupts. Running mailbox commands with
2211 		 * interrupts enabled could cause hangs since pm_run_scan()
2212 		 * runs out of a callout thread and on single cpu systems
2213 		 * cv_reltimedwait_sig(), called from ql_mailbox_command(),
2214 		 * would not get to run.
2215 		 */
2216 		TASK_DAEMON_LOCK(ha);
2217 		ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2218 		TASK_DAEMON_UNLOCK(ha);
2219 
2220 		ql_halt(ha, PM_LEVEL_D3);
2221 
2222 		/*
2223 		 * Setup ql_intr to ignore interrupts from here on.
2224 		 */
2225 		QL_PM_LOCK(ha);
2226 		ha->power_level = PM_LEVEL_D3;
2227 		QL_PM_UNLOCK(ha);
2228 
2229 		/*
2230 		 * Wait for ISR to complete.
2231 		 */
2232 		INTR_LOCK(ha);
2233 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2234 		INTR_UNLOCK(ha);
2235 
2236 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2237 		    ha->instance, QL_NAME);
2238 
2239 		rval = DDI_SUCCESS;
2240 		break;
2241 	}
2242 
2243 	kmem_free(buf, MAXPATHLEN);
2244 	kmem_free(path, MAXPATHLEN);
2245 
2246 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2247 
2248 	return (rval);
2249 }
2250 
2251 /*
2252  * ql_quiesce
2253  *	quiesce a device attached to the system.
2254  *
2255  * Input:
2256  *	dip = pointer to device information structure.
2257  *
2258  * Returns:
2259  *	DDI_SUCCESS
2260  *
2261  * Context:
2262  *	Kernel context.
2263  */
2264 static int
2265 ql_quiesce(dev_info_t *dip)
2266 {
2267 	ql_adapter_state_t	*ha;
2268 	uint32_t		timer;
2269 	uint32_t		stat;
2270 
2271 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2272 	if (ha == NULL) {
2273 		/* Oh well.... */
2274 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2275 		    ddi_get_instance(dip));
2276 		return (DDI_SUCCESS);
2277 	}
2278 
2279 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2280 
2281 	if (CFG_IST(ha, CFG_CTRL_8021)) {
2282 		(void) ql_stop_firmware(ha);
2283 	} else if (CFG_IST(ha, CFG_CTRL_242581)) {
2284 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2285 		WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2286 		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2287 		for (timer = 0; timer < 30000; timer++) {
2288 			stat = RD32_IO_REG(ha, risc2host);
2289 			if (stat & BIT_15) {
2290 				if ((stat & 0xff) < 0x12) {
2291 					WRT32_IO_REG(ha, hccr,
2292 					    HC24_CLR_RISC_INT);
2293 					break;
2294 				}
2295 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2296 			}
2297 			drv_usecwait(100);
2298 		}
2299 		/* Reset the chip. */
2300 		WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2301 		    MWB_4096_BYTES);
2302 		drv_usecwait(100);
2303 
2304 	} else {
2305 		/* Disable ISP interrupts. */
2306 		WRT16_IO_REG(ha, ictrl, 0);
2307 		/* Select RISC module registers. */
2308 		WRT16_IO_REG(ha, ctrl_status, 0);
2309 		/* Reset ISP semaphore. */
2310 		WRT16_IO_REG(ha, semaphore, 0);
2311 		/* Reset RISC module. */
2312 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2313 		/* Release RISC module. */
2314 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2315 	}
2316 
2317 	ql_disable_intr(ha);
2318 
2319 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2320 
2321 	return (DDI_SUCCESS);
2322 }
2323 
2324 /* ************************************************************************ */
2325 /*		Fibre Channel Adapter (FCA) Transport Functions.	    */
2326 /* ************************************************************************ */
2327 
2328 /*
2329  * ql_bind_port
2330  *	Handling port binding. The FC Transport attempts to bind an FCA port
2331  *	when it is ready to start transactions on the port. The FC Transport
2332  *	will call the fca_bind_port() function specified in the fca_transport
2333  *	structure it receives. The FCA must fill in the port_info structure
2334  *	passed in the call and also stash the information for future calls.
2335  *
2336  * Input:
2337  *	dip = pointer to FCA information structure.
2338  *	port_info = pointer to port information structure.
2339  *	bind_info = pointer to bind information structure.
2340  *
2341  * Returns:
2342  *	NULL = failure
2343  *
2344  * Context:
2345  *	Kernel context.
2346  */
2347 static opaque_t
2348 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2349     fc_fca_bind_info_t *bind_info)
2350 {
2351 	ql_adapter_state_t	*ha, *vha;
2352 	opaque_t		fca_handle = NULL;
2353 	port_id_t		d_id;
2354 	int			port_npiv = bind_info->port_npiv;
2355 	uchar_t			*port_nwwn = bind_info->port_nwwn.raw_wwn;
2356 	uchar_t			*port_pwwn = bind_info->port_pwwn.raw_wwn;
2357 
2358 	/* get state info based on the dip */
2359 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2360 	if (ha == NULL) {
2361 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2362 		    ddi_get_instance(dip));
2363 		return (NULL);
2364 	}
2365 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2366 
2367 	/* Verify port number is supported. */
2368 	if (port_npiv != 0) {
2369 		if (!(ha->flags & VP_ENABLED)) {
2370 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2371 			    ha->instance);
2372 			port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2373 			return (NULL);
2374 		}
2375 		if (!(ha->flags & POINT_TO_POINT)) {
2376 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2377 			    ha->instance);
2378 			port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2379 			return (NULL);
2380 		}
2381 		if (!(ha->flags & FDISC_ENABLED)) {
2382 			QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2383 			    "FDISC\n", ha->instance);
2384 			port_info->pi_error = FC_NPIV_FDISC_FAILED;
2385 			return (NULL);
2386 		}
2387 		if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2388 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2389 			QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2390 			    "FC_OUTOFBOUNDS\n", ha->instance);
2391 			port_info->pi_error = FC_OUTOFBOUNDS;
2392 			return (NULL);
2393 		}
2394 	} else if (bind_info->port_num != 0) {
2395 		QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2396 		    "supported\n", ha->instance, bind_info->port_num);
2397 		port_info->pi_error = FC_OUTOFBOUNDS;
2398 		return (NULL);
2399 	}
2400 
2401 	/* Locate port context. */
2402 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2403 		if (vha->vp_index == bind_info->port_num) {
2404 			break;
2405 		}
2406 	}
2407 
2408 	/* If virtual port does not exist. */
2409 	if (vha == NULL) {
2410 		vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2411 	}
2412 
2413 	/* make sure this port isn't already bound */
2414 	if (vha->flags & FCA_BOUND) {
2415 		port_info->pi_error = FC_ALREADY;
2416 	} else {
2417 		if (vha->vp_index != 0) {
2418 			bcopy(port_nwwn,
2419 			    vha->loginparams.node_ww_name.raw_wwn, 8);
2420 			bcopy(port_pwwn,
2421 			    vha->loginparams.nport_ww_name.raw_wwn, 8);
2422 		}
2423 		if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2424 			if (ql_vport_enable(vha) != QL_SUCCESS) {
2425 				QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2426 				    "virtual port=%d\n", ha->instance,
2427 				    vha->vp_index);
2428 				port_info->pi_error = FC_NPIV_FDISC_FAILED;
2429 				return (NULL);
2430 			}
2431 			cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2432 			    "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2433 			    "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2434 			    QL_NAME, ha->instance, vha->vp_index,
2435 			    port_pwwn[0], port_pwwn[1], port_pwwn[2],
2436 			    port_pwwn[3], port_pwwn[4], port_pwwn[5],
2437 			    port_pwwn[6], port_pwwn[7],
2438 			    port_nwwn[0], port_nwwn[1], port_nwwn[2],
2439 			    port_nwwn[3], port_nwwn[4], port_nwwn[5],
2440 			    port_nwwn[6], port_nwwn[7]);
2441 		}
2442 
2443 		/* stash the bind_info supplied by the FC Transport */
2444 		vha->bind_info.port_handle = bind_info->port_handle;
2445 		vha->bind_info.port_statec_cb =
2446 		    bind_info->port_statec_cb;
2447 		vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2448 
2449 		/* Set port's source ID. */
2450 		port_info->pi_s_id.port_id = vha->d_id.b24;
2451 
2452 		/* copy out the default login parameters */
2453 		bcopy((void *)&vha->loginparams,
2454 		    (void *)&port_info->pi_login_params,
2455 		    sizeof (la_els_logi_t));
2456 
2457 		/* Set port's hard address if enabled. */
2458 		port_info->pi_hard_addr.hard_addr = 0;
2459 		if (bind_info->port_num == 0) {
2460 			d_id.b24 = ha->d_id.b24;
2461 			if (CFG_IST(ha, CFG_CTRL_24258081)) {
2462 				if (ha->init_ctrl_blk.cb24.
2463 				    firmware_options_1[0] & BIT_0) {
2464 					d_id.b.al_pa = ql_index_to_alpa[ha->
2465 					    init_ctrl_blk.cb24.
2466 					    hard_address[0]];
2467 					port_info->pi_hard_addr.hard_addr =
2468 					    d_id.b24;
2469 				}
2470 			} else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2471 			    BIT_0) {
2472 				d_id.b.al_pa = ql_index_to_alpa[ha->
2473 				    init_ctrl_blk.cb.hard_address[0]];
2474 				port_info->pi_hard_addr.hard_addr = d_id.b24;
2475 			}
2476 
2477 			/* Set the node id data */
2478 			if (ql_get_rnid_params(ha,
2479 			    sizeof (port_info->pi_rnid_params.params),
2480 			    (caddr_t)&port_info->pi_rnid_params.params) ==
2481 			    QL_SUCCESS) {
2482 				port_info->pi_rnid_params.status = FC_SUCCESS;
2483 			} else {
2484 				port_info->pi_rnid_params.status = FC_FAILURE;
2485 			}
2486 
2487 			/* Populate T11 FC-HBA details */
2488 			ql_populate_hba_fru_details(ha, port_info);
2489 			ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2490 			    KM_SLEEP);
2491 			if (ha->pi_attrs != NULL) {
2492 				bcopy(&port_info->pi_attrs, ha->pi_attrs,
2493 				    sizeof (fca_port_attrs_t));
2494 			}
2495 		} else {
2496 			port_info->pi_rnid_params.status = FC_FAILURE;
2497 			if (ha->pi_attrs != NULL) {
2498 				bcopy(ha->pi_attrs, &port_info->pi_attrs,
2499 				    sizeof (fca_port_attrs_t));
2500 			}
2501 		}
2502 
2503 		/* Generate handle for this FCA. */
2504 		fca_handle = (opaque_t)vha;
2505 
2506 		ADAPTER_STATE_LOCK(ha);
2507 		vha->flags |= FCA_BOUND;
2508 		ADAPTER_STATE_UNLOCK(ha);
2509 		/* Set port's current state. */
2510 		port_info->pi_port_state = vha->state;
2511 	}
2512 
2513 	QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2514 	    "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2515 	    port_info->pi_port_state, port_info->pi_s_id.port_id);
2516 
2517 	return (fca_handle);
2518 }
2519 
2520 /*
2521  * ql_unbind_port
2522  *	To unbind a Fibre Channel Adapter from an FC Port driver.
2523  *
2524  * Input:
2525  *	fca_handle = handle setup by ql_bind_port().
2526  *
2527  * Context:
2528  *	Kernel context.
2529  */
2530 static void
2531 ql_unbind_port(opaque_t fca_handle)
2532 {
2533 	ql_adapter_state_t	*ha;
2534 	ql_tgt_t		*tq;
2535 	uint32_t		flgs;
2536 
2537 	ha = ql_fca_handle_to_state(fca_handle);
2538 	if (ha == NULL) {
2539 		/*EMPTY*/
2540 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2541 		    (void *)fca_handle);
2542 	} else {
2543 		QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2544 		    ha->vp_index);
2545 
2546 		if (!(ha->flags & FCA_BOUND)) {
2547 			/*EMPTY*/
2548 			QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2549 			    ha->instance, ha->vp_index);
2550 		} else {
2551 			if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2552 				if ((tq = ql_loop_id_to_queue(ha,
2553 				    FL_PORT_24XX_HDL)) != NULL) {
2554 					(void) ql_logout_fabric_port(ha, tq);
2555 				}
2556 				(void) ql_vport_control(ha, (uint8_t)
2557 				    (CFG_IST(ha, CFG_CTRL_2425) ?
2558 				    VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2559 				flgs = FCA_BOUND | VP_ENABLED;
2560 			} else {
2561 				flgs = FCA_BOUND;
2562 			}
2563 			ADAPTER_STATE_LOCK(ha);
2564 			ha->flags &= ~flgs;
2565 			ADAPTER_STATE_UNLOCK(ha);
2566 		}
2567 
2568 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2569 		    ha->vp_index);
2570 	}
2571 }
2572 
2573 /*
2574  * ql_init_pkt
2575  *	Initialize FCA portion of packet.
2576  *
2577  * Input:
2578  *	fca_handle = handle setup by ql_bind_port().
2579  *	pkt = pointer to fc_packet.
2580  *
2581  * Returns:
2582  *	FC_SUCCESS - the packet has successfully been initialized.
2583  *	FC_UNBOUND - the fca_handle specified is not bound.
2584  *	FC_NOMEM - the FCA failed initialization due to an allocation error.
2585  *	FC_FAILURE - the FCA failed initialization for undisclosed reasons
2586  *
2587  * Context:
2588  *	Kernel context.
2589  */
2590 /* ARGSUSED */
2591 static int
2592 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2593 {
2594 	ql_adapter_state_t	*ha;
2595 	ql_srb_t		*sp;
2596 	int			rval = FC_SUCCESS;
2597 
2598 	ha = ql_fca_handle_to_state(fca_handle);
2599 	if (ha == NULL) {
2600 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2601 		    (void *)fca_handle);
2602 		return (FC_UNBOUND);
2603 	}
2604 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2605 
2606 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2607 	sp->flags = 0;
2608 
2609 	/* init cmd links */
2610 	sp->cmd.base_address = sp;
2611 	sp->cmd.prev = NULL;
2612 	sp->cmd.next = NULL;
2613 	sp->cmd.head = NULL;
2614 
2615 	/* init watchdog links */
2616 	sp->wdg.base_address = sp;
2617 	sp->wdg.prev = NULL;
2618 	sp->wdg.next = NULL;
2619 	sp->wdg.head = NULL;
2620 	sp->pkt = pkt;
2621 	sp->ha = ha;
2622 	sp->magic_number = QL_FCA_BRAND;
2623 	sp->sg_dma.dma_handle = NULL;
2624 #ifndef __sparc
2625 	if (CFG_IST(ha, CFG_CTRL_8021)) {
2626 		/* Setup DMA for scatter gather list. */
2627 		sp->sg_dma.size = sizeof (cmd6_2400_dma_t);
2628 		sp->sg_dma.type = LITTLE_ENDIAN_DMA;
2629 		sp->sg_dma.cookie_count = 1;
2630 		sp->sg_dma.alignment = 64;
2631 		if (ql_alloc_phys(ha, &sp->sg_dma, KM_SLEEP) != QL_SUCCESS) {
2632 			rval = FC_NOMEM;
2633 		}
2634 	}
2635 #endif	/* __sparc */
2636 
2637 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2638 
2639 	return (rval);
2640 }
2641 
2642 /*
2643  * ql_un_init_pkt
2644  *	Release all local resources bound to packet.
2645  *
2646  * Input:
2647  *	fca_handle = handle setup by ql_bind_port().
2648  *	pkt = pointer to fc_packet.
2649  *
2650  * Returns:
2651  *	FC_SUCCESS - the packet has successfully been invalidated.
2652  *	FC_UNBOUND - the fca_handle specified is not bound.
2653  *	FC_BADPACKET - the packet has not been initialized or has
2654  *			already been freed by this FCA.
2655  *
2656  * Context:
2657  *	Kernel context.
2658  */
2659 static int
2660 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2661 {
2662 	ql_adapter_state_t *ha;
2663 	int rval;
2664 	ql_srb_t *sp;
2665 
2666 	ha = ql_fca_handle_to_state(fca_handle);
2667 	if (ha == NULL) {
2668 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2669 		    (void *)fca_handle);
2670 		return (FC_UNBOUND);
2671 	}
2672 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2673 
2674 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2675 
2676 	if (sp->magic_number != QL_FCA_BRAND) {
2677 		EL(ha, "failed, FC_BADPACKET\n");
2678 		rval = FC_BADPACKET;
2679 	} else {
2680 		sp->magic_number = NULL;
2681 		ql_free_phys(ha, &sp->sg_dma);
2682 		rval = FC_SUCCESS;
2683 	}
2684 
2685 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2686 
2687 	return (rval);
2688 }
2689 
2690 /*
2691  * ql_els_send
2692  *	Issue a extended link service request.
2693  *
2694  * Input:
2695  *	fca_handle = handle setup by ql_bind_port().
2696  *	pkt = pointer to fc_packet.
2697  *
2698  * Returns:
2699  *	FC_SUCCESS - the command was successful.
2700  *	FC_ELS_FREJECT - the command was rejected by a Fabric.
2701  *	FC_ELS_PREJECT - the command was rejected by an N-port.
2702  *	FC_TRANSPORT_ERROR - a transport error occurred.
2703  *	FC_UNBOUND - the fca_handle specified is not bound.
2704  *	FC_ELS_BAD - the FCA can not issue the requested ELS.
2705  *
2706  * Context:
2707  *	Kernel context.
2708  */
2709 static int
2710 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2711 {
2712 	ql_adapter_state_t	*ha;
2713 	int			rval;
2714 	clock_t			timer = drv_usectohz(30000000);
2715 	ls_code_t		els;
2716 	la_els_rjt_t		rjt;
2717 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2718 
2719 	/* Verify proper command. */
2720 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2721 	if (ha == NULL) {
2722 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2723 		    rval, fca_handle);
2724 		return (FC_INVALID_REQUEST);
2725 	}
2726 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2727 
2728 	/* Wait for suspension to end. */
2729 	TASK_DAEMON_LOCK(ha);
2730 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2731 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2732 
2733 		/* 30 seconds from now */
2734 		if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
2735 		    &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
2736 			/*
2737 			 * The timeout time 'timer' was
2738 			 * reached without the condition
2739 			 * being signaled.
2740 			 */
2741 			pkt->pkt_state = FC_PKT_TRAN_BSY;
2742 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
2743 
2744 			/* Release task daemon lock. */
2745 			TASK_DAEMON_UNLOCK(ha);
2746 
2747 			EL(ha, "QL_SUSPENDED failed=%xh\n",
2748 			    QL_FUNCTION_TIMEOUT);
2749 			return (FC_TRAN_BUSY);
2750 		}
2751 	}
2752 	/* Release task daemon lock. */
2753 	TASK_DAEMON_UNLOCK(ha);
2754 
2755 	/* Setup response header. */
2756 	bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2757 	    sizeof (fc_frame_hdr_t));
2758 
2759 	if (pkt->pkt_rsplen) {
2760 		bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2761 	}
2762 
2763 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2764 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2765 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2766 	    R_CTL_SOLICITED_CONTROL;
2767 	pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2768 	    F_CTL_END_SEQ;
2769 
2770 	sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2771 	    SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2772 	    SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2773 
2774 	sp->flags |= SRB_ELS_PKT;
2775 
2776 	/* map the type of ELS to a function */
2777 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2778 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2779 
2780 #if 0
2781 	QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2782 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2783 	    sizeof (fc_frame_hdr_t) / 4);
2784 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2785 	QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2786 #endif
2787 
2788 	sp->iocb = ha->els_cmd;
2789 	sp->req_cnt = 1;
2790 
2791 	switch (els.ls_code) {
2792 	case LA_ELS_RJT:
2793 	case LA_ELS_ACC:
2794 		EL(ha, "LA_ELS_RJT\n");
2795 		pkt->pkt_state = FC_PKT_SUCCESS;
2796 		rval = FC_SUCCESS;
2797 		break;
2798 	case LA_ELS_PLOGI:
2799 	case LA_ELS_PDISC:
2800 		rval = ql_els_plogi(ha, pkt);
2801 		break;
2802 	case LA_ELS_FLOGI:
2803 	case LA_ELS_FDISC:
2804 		rval = ql_els_flogi(ha, pkt);
2805 		break;
2806 	case LA_ELS_LOGO:
2807 		rval = ql_els_logo(ha, pkt);
2808 		break;
2809 	case LA_ELS_PRLI:
2810 		rval = ql_els_prli(ha, pkt);
2811 		break;
2812 	case LA_ELS_PRLO:
2813 		rval = ql_els_prlo(ha, pkt);
2814 		break;
2815 	case LA_ELS_ADISC:
2816 		rval = ql_els_adisc(ha, pkt);
2817 		break;
2818 	case LA_ELS_LINIT:
2819 		rval = ql_els_linit(ha, pkt);
2820 		break;
2821 	case LA_ELS_LPC:
2822 		rval = ql_els_lpc(ha, pkt);
2823 		break;
2824 	case LA_ELS_LSTS:
2825 		rval = ql_els_lsts(ha, pkt);
2826 		break;
2827 	case LA_ELS_SCR:
2828 		rval = ql_els_scr(ha, pkt);
2829 		break;
2830 	case LA_ELS_RSCN:
2831 		rval = ql_els_rscn(ha, pkt);
2832 		break;
2833 	case LA_ELS_FARP_REQ:
2834 		rval = ql_els_farp_req(ha, pkt);
2835 		break;
2836 	case LA_ELS_FARP_REPLY:
2837 		rval = ql_els_farp_reply(ha, pkt);
2838 		break;
2839 	case LA_ELS_RLS:
2840 		rval = ql_els_rls(ha, pkt);
2841 		break;
2842 	case LA_ELS_RNID:
2843 		rval = ql_els_rnid(ha, pkt);
2844 		break;
2845 	default:
2846 		EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2847 		    els.ls_code);
2848 		/* Build RJT. */
2849 		bzero(&rjt, sizeof (rjt));
2850 		rjt.ls_code.ls_code = LA_ELS_RJT;
2851 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2852 
2853 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2854 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2855 
2856 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
2857 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2858 		rval = FC_SUCCESS;
2859 		break;
2860 	}
2861 
2862 #if 0
2863 	QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2864 	QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2865 	    sizeof (fc_frame_hdr_t) / 4);
2866 #endif
2867 	/*
2868 	 * Return success if the srb was consumed by an iocb. The packet
2869 	 * completion callback will be invoked by the response handler.
2870 	 */
2871 	if (rval == QL_CONSUMED) {
2872 		rval = FC_SUCCESS;
2873 	} else if (rval == FC_SUCCESS &&
2874 	    !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2875 		/* Do command callback only if no error */
2876 		ql_awaken_task_daemon(ha, sp, 0, 0);
2877 	}
2878 
2879 	if (rval != FC_SUCCESS) {
2880 		EL(ha, "failed, rval = %xh\n", rval);
2881 	} else {
2882 		/*EMPTY*/
2883 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2884 	}
2885 	return (rval);
2886 }
2887 
2888 /*
2889  * ql_get_cap
2890  *	Export FCA hardware and software capabilities.
2891  *
2892  * Input:
2893  *	fca_handle = handle setup by ql_bind_port().
2894  *	cap = pointer to the capabilities string.
2895  *	ptr = buffer pointer for return capability.
2896  *
2897  * Returns:
2898  *	FC_CAP_ERROR - no such capability
2899  *	FC_CAP_FOUND - the capability was returned and cannot be set
2900  *	FC_CAP_SETTABLE - the capability was returned and can be set
2901  *	FC_UNBOUND - the fca_handle specified is not bound.
2902  *
2903  * Context:
2904  *	Kernel context.
2905  */
2906 static int
2907 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2908 {
2909 	ql_adapter_state_t	*ha;
2910 	int			rval;
2911 	uint32_t		*rptr = (uint32_t *)ptr;
2912 
2913 	ha = ql_fca_handle_to_state(fca_handle);
2914 	if (ha == NULL) {
2915 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2916 		    (void *)fca_handle);
2917 		return (FC_UNBOUND);
2918 	}
2919 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2920 
2921 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2922 		bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2923 		    ptr, 8);
2924 		rval = FC_CAP_FOUND;
2925 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2926 		bcopy((void *)&ha->loginparams, ptr,
2927 		    sizeof (la_els_logi_t));
2928 		rval = FC_CAP_FOUND;
2929 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2930 		*rptr = (uint32_t)QL_UB_LIMIT;
2931 		rval = FC_CAP_FOUND;
2932 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2933 
2934 		dev_info_t	*psydip = NULL;
2935 #ifdef __sparc
2936 		/*
2937 		 * Disable streaming for certain 2 chip adapters
2938 		 * below Psycho to handle Psycho byte hole issue.
2939 		 */
2940 		if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2941 		    (!CFG_IST(ha, CFG_SBUS_CARD))) {
2942 			for (psydip = ddi_get_parent(ha->dip); psydip;
2943 			    psydip = ddi_get_parent(psydip)) {
2944 				if (strcmp(ddi_driver_name(psydip),
2945 				    "pcipsy") == 0) {
2946 					break;
2947 				}
2948 			}
2949 		}
2950 #endif	/* __sparc */
2951 
2952 		if (psydip) {
2953 			*rptr = (uint32_t)FC_NO_STREAMING;
2954 			EL(ha, "No Streaming\n");
2955 		} else {
2956 			*rptr = (uint32_t)FC_ALLOW_STREAMING;
2957 			EL(ha, "Allow Streaming\n");
2958 		}
2959 		rval = FC_CAP_FOUND;
2960 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2961 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
2962 			*rptr = (uint32_t)CHAR_TO_SHORT(
2963 			    ha->init_ctrl_blk.cb24.max_frame_length[0],
2964 			    ha->init_ctrl_blk.cb24.max_frame_length[1]);
2965 		} else {
2966 			*rptr = (uint32_t)CHAR_TO_SHORT(
2967 			    ha->init_ctrl_blk.cb.max_frame_length[0],
2968 			    ha->init_ctrl_blk.cb.max_frame_length[1]);
2969 		}
2970 		rval = FC_CAP_FOUND;
2971 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2972 		*rptr = FC_RESET_RETURN_ALL;
2973 		rval = FC_CAP_FOUND;
2974 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2975 		*rptr = FC_NO_DVMA_SPACE;
2976 		rval = FC_CAP_FOUND;
2977 	} else {
2978 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2979 		rval = FC_CAP_ERROR;
2980 	}
2981 
2982 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2983 
2984 	return (rval);
2985 }
2986 
2987 /*
2988  * ql_set_cap
2989  *	Allow the FC Transport to set FCA capabilities if possible.
2990  *
2991  * Input:
2992  *	fca_handle = handle setup by ql_bind_port().
2993  *	cap = pointer to the capabilities string.
2994  *	ptr = buffer pointer for capability.
2995  *
2996  * Returns:
2997  *	FC_CAP_ERROR - no such capability
2998  *	FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2999  *	FC_CAP_SETTABLE - the capability was successfully set.
3000  *	FC_UNBOUND - the fca_handle specified is not bound.
3001  *
3002  * Context:
3003  *	Kernel context.
3004  */
3005 /* ARGSUSED */
3006 static int
3007 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
3008 {
3009 	ql_adapter_state_t	*ha;
3010 	int			rval;
3011 
3012 	ha = ql_fca_handle_to_state(fca_handle);
3013 	if (ha == NULL) {
3014 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3015 		    (void *)fca_handle);
3016 		return (FC_UNBOUND);
3017 	}
3018 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3019 
3020 	if (strcmp(cap, FC_NODE_WWN) == 0) {
3021 		rval = FC_CAP_FOUND;
3022 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3023 		rval = FC_CAP_FOUND;
3024 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3025 		rval = FC_CAP_FOUND;
3026 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3027 		rval = FC_CAP_FOUND;
3028 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3029 		rval = FC_CAP_FOUND;
3030 	} else {
3031 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3032 		rval = FC_CAP_ERROR;
3033 	}
3034 
3035 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3036 
3037 	return (rval);
3038 }
3039 
3040 /*
3041  * ql_getmap
3042  *	Request of Arbitrated Loop (AL-PA) map.
3043  *
3044  * Input:
3045  *	fca_handle = handle setup by ql_bind_port().
3046  *	mapbuf= buffer pointer for map.
3047  *
3048  * Returns:
3049  *	FC_OLDPORT - the specified port is not operating in loop mode.
3050  *	FC_OFFLINE - the specified port is not online.
3051  *	FC_NOMAP - there is no loop map available for this port.
3052  *	FC_UNBOUND - the fca_handle specified is not bound.
3053  *	FC_SUCCESS - a valid map has been placed in mapbuf.
3054  *
3055  * Context:
3056  *	Kernel context.
3057  */
3058 static int
3059 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
3060 {
3061 	ql_adapter_state_t	*ha;
3062 	clock_t			timer = drv_usectohz(30000000);
3063 	int			rval = FC_SUCCESS;
3064 
3065 	ha = ql_fca_handle_to_state(fca_handle);
3066 	if (ha == NULL) {
3067 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3068 		    (void *)fca_handle);
3069 		return (FC_UNBOUND);
3070 	}
3071 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3072 
3073 	mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
3074 	mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
3075 
3076 	/* Wait for suspension to end. */
3077 	TASK_DAEMON_LOCK(ha);
3078 	while (ha->task_daemon_flags & QL_SUSPENDED) {
3079 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3080 
3081 		/* 30 seconds from now */
3082 		if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3083 		    &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3084 			/*
3085 			 * The timeout time 'timer' was
3086 			 * reached without the condition
3087 			 * being signaled.
3088 			 */
3089 
3090 			/* Release task daemon lock. */
3091 			TASK_DAEMON_UNLOCK(ha);
3092 
3093 			EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
3094 			return (FC_TRAN_BUSY);
3095 		}
3096 	}
3097 	/* Release task daemon lock. */
3098 	TASK_DAEMON_UNLOCK(ha);
3099 
3100 	if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
3101 	    (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
3102 		/*
3103 		 * Now, since transport drivers cosider this as an
3104 		 * offline condition, let's wait for few seconds
3105 		 * for any loop transitions before we reset the.
3106 		 * chip and restart all over again.
3107 		 */
3108 		ql_delay(ha, 2000000);
3109 		EL(ha, "failed, FC_NOMAP\n");
3110 		rval = FC_NOMAP;
3111 	} else {
3112 		/*EMPTY*/
3113 		QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
3114 		    "data %xh %xh %xh %xh\n", ha->instance,
3115 		    mapbuf->lilp_myalpa, mapbuf->lilp_length,
3116 		    mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
3117 		    mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
3118 	}
3119 
3120 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3121 #if 0
3122 	QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
3123 #endif
3124 	return (rval);
3125 }
3126 
3127 /*
3128  * ql_transport
3129  *	Issue an I/O request. Handles all regular requests.
3130  *
3131  * Input:
3132  *	fca_handle = handle setup by ql_bind_port().
3133  *	pkt = pointer to fc_packet.
3134  *
3135  * Returns:
3136  *	FC_SUCCESS - the packet was accepted for transport.
3137  *	FC_TRANSPORT_ERROR - a transport error occurred.
3138  *	FC_BADPACKET - the packet to be transported had not been
3139  *			initialized by this FCA.
3140  *	FC_UNBOUND - the fca_handle specified is not bound.
3141  *
3142  * Context:
3143  *	Kernel context.
3144  */
3145 static int
3146 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
3147 {
3148 	ql_adapter_state_t	*ha;
3149 	int			rval = FC_TRANSPORT_ERROR;
3150 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
3151 
3152 	/* Verify proper command. */
3153 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
3154 	if (ha == NULL) {
3155 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
3156 		    rval, fca_handle);
3157 		return (rval);
3158 	}
3159 	QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
3160 #if 0
3161 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
3162 	    sizeof (fc_frame_hdr_t) / 4);
3163 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
3164 	QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
3165 #endif
3166 
3167 	/* Reset SRB flags. */
3168 	sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
3169 	    SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
3170 	    SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
3171 	    SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
3172 	    SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
3173 	    SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
3174 	    SRB_MS_PKT | SRB_ELS_PKT);
3175 
3176 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3177 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
3178 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3179 	pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
3180 	pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
3181 
3182 	switch (pkt->pkt_cmd_fhdr.r_ctl) {
3183 	case R_CTL_COMMAND:
3184 		if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
3185 			sp->flags |= SRB_FCP_CMD_PKT;
3186 			rval = ql_fcp_scsi_cmd(ha, pkt, sp);
3187 		}
3188 		break;
3189 
3190 	default:
3191 		/* Setup response header and buffer. */
3192 		if (pkt->pkt_rsplen) {
3193 			bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3194 		}
3195 
3196 		switch (pkt->pkt_cmd_fhdr.r_ctl) {
3197 		case R_CTL_UNSOL_DATA:
3198 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
3199 				sp->flags |= SRB_IP_PKT;
3200 				rval = ql_fcp_ip_cmd(ha, pkt, sp);
3201 			}
3202 			break;
3203 
3204 		case R_CTL_UNSOL_CONTROL:
3205 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
3206 				sp->flags |= SRB_GENERIC_SERVICES_PKT;
3207 				rval = ql_fc_services(ha, pkt);
3208 			}
3209 			break;
3210 
3211 		case R_CTL_SOLICITED_DATA:
3212 		case R_CTL_STATUS:
3213 		default:
3214 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
3215 			pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3216 			rval = FC_TRANSPORT_ERROR;
3217 			EL(ha, "unknown, r_ctl=%xh\n",
3218 			    pkt->pkt_cmd_fhdr.r_ctl);
3219 			break;
3220 		}
3221 	}
3222 
3223 	if (rval != FC_SUCCESS) {
3224 		EL(ha, "failed, rval = %xh\n", rval);
3225 	} else {
3226 		/*EMPTY*/
3227 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3228 	}
3229 
3230 	return (rval);
3231 }
3232 
3233 /*
3234  * ql_ub_alloc
3235  *	Allocate buffers for unsolicited exchanges.
3236  *
3237  * Input:
3238  *	fca_handle = handle setup by ql_bind_port().
3239  *	tokens = token array for each buffer.
3240  *	size = size of each buffer.
3241  *	count = pointer to number of buffers.
3242  *	type = the FC-4 type the buffers are reserved for.
3243  *		1 = Extended Link Services, 5 = LLC/SNAP
3244  *
3245  * Returns:
3246  *	FC_FAILURE - buffers could not be allocated.
3247  *	FC_TOOMANY - the FCA could not allocate the requested
3248  *			number of buffers.
3249  *	FC_SUCCESS - unsolicited buffers were allocated.
3250  *	FC_UNBOUND - the fca_handle specified is not bound.
3251  *
3252  * Context:
3253  *	Kernel context.
3254  */
3255 static int
3256 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3257     uint32_t *count, uint32_t type)
3258 {
3259 	ql_adapter_state_t	*ha;
3260 	caddr_t			bufp = NULL;
3261 	fc_unsol_buf_t		*ubp;
3262 	ql_srb_t		*sp;
3263 	uint32_t		index;
3264 	uint32_t		cnt;
3265 	uint32_t		ub_array_index = 0;
3266 	int			rval = FC_SUCCESS;
3267 	int			ub_updated = FALSE;
3268 
3269 	/* Check handle. */
3270 	ha = ql_fca_handle_to_state(fca_handle);
3271 	if (ha == NULL) {
3272 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3273 		    (void *)fca_handle);
3274 		return (FC_UNBOUND);
3275 	}
3276 	QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3277 	    ha->instance, ha->vp_index, *count);
3278 
3279 	QL_PM_LOCK(ha);
3280 	if (ha->power_level != PM_LEVEL_D0) {
3281 		QL_PM_UNLOCK(ha);
3282 		QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3283 		    ha->vp_index);
3284 		return (FC_FAILURE);
3285 	}
3286 	QL_PM_UNLOCK(ha);
3287 
3288 	/* Acquire adapter state lock. */
3289 	ADAPTER_STATE_LOCK(ha);
3290 
3291 	/* Check the count. */
3292 	if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3293 		*count = 0;
3294 		EL(ha, "failed, FC_TOOMANY\n");
3295 		rval = FC_TOOMANY;
3296 	}
3297 
3298 	/*
3299 	 * reset ub_array_index
3300 	 */
3301 	ub_array_index = 0;
3302 
3303 	/*
3304 	 * Now proceed to allocate any buffers required
3305 	 */
3306 	for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3307 		/* Allocate all memory needed. */
3308 		ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3309 		    KM_SLEEP);
3310 		if (ubp == NULL) {
3311 			EL(ha, "failed, FC_FAILURE\n");
3312 			rval = FC_FAILURE;
3313 		} else {
3314 			sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3315 			if (sp == NULL) {
3316 				kmem_free(ubp, sizeof (fc_unsol_buf_t));
3317 				rval = FC_FAILURE;
3318 			} else {
3319 				if (type == FC_TYPE_IS8802_SNAP) {
3320 #ifdef	__sparc
3321 					if (ql_get_dma_mem(ha,
3322 					    &sp->ub_buffer, size,
3323 					    BIG_ENDIAN_DMA,
3324 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3325 						rval = FC_FAILURE;
3326 						kmem_free(ubp,
3327 						    sizeof (fc_unsol_buf_t));
3328 						kmem_free(sp,
3329 						    sizeof (ql_srb_t));
3330 					} else {
3331 						bufp = sp->ub_buffer.bp;
3332 						sp->ub_size = size;
3333 					}
3334 #else
3335 					if (ql_get_dma_mem(ha,
3336 					    &sp->ub_buffer, size,
3337 					    LITTLE_ENDIAN_DMA,
3338 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3339 						rval = FC_FAILURE;
3340 						kmem_free(ubp,
3341 						    sizeof (fc_unsol_buf_t));
3342 						kmem_free(sp,
3343 						    sizeof (ql_srb_t));
3344 					} else {
3345 						bufp = sp->ub_buffer.bp;
3346 						sp->ub_size = size;
3347 					}
3348 #endif
3349 				} else {
3350 					bufp = kmem_zalloc(size, KM_SLEEP);
3351 					if (bufp == NULL) {
3352 						rval = FC_FAILURE;
3353 						kmem_free(ubp,
3354 						    sizeof (fc_unsol_buf_t));
3355 						kmem_free(sp,
3356 						    sizeof (ql_srb_t));
3357 					} else {
3358 						sp->ub_size = size;
3359 					}
3360 				}
3361 			}
3362 		}
3363 
3364 		if (rval == FC_SUCCESS) {
3365 			/* Find next available slot. */
3366 			QL_UB_LOCK(ha);
3367 			while (ha->ub_array[ub_array_index] != NULL) {
3368 				ub_array_index++;
3369 			}
3370 
3371 			ubp->ub_fca_private = (void *)sp;
3372 
3373 			/* init cmd links */
3374 			sp->cmd.base_address = sp;
3375 			sp->cmd.prev = NULL;
3376 			sp->cmd.next = NULL;
3377 			sp->cmd.head = NULL;
3378 
3379 			/* init wdg links */
3380 			sp->wdg.base_address = sp;
3381 			sp->wdg.prev = NULL;
3382 			sp->wdg.next = NULL;
3383 			sp->wdg.head = NULL;
3384 			sp->ha = ha;
3385 
3386 			ubp->ub_buffer = bufp;
3387 			ubp->ub_bufsize = size;
3388 			ubp->ub_port_handle = fca_handle;
3389 			ubp->ub_token = ub_array_index;
3390 
3391 			/* Save the token. */
3392 			tokens[index] = ub_array_index;
3393 
3394 			/* Setup FCA private information. */
3395 			sp->ub_type = type;
3396 			sp->handle = ub_array_index;
3397 			sp->flags |= SRB_UB_IN_FCA;
3398 
3399 			ha->ub_array[ub_array_index] = ubp;
3400 			ha->ub_allocated++;
3401 			ub_updated = TRUE;
3402 			QL_UB_UNLOCK(ha);
3403 		}
3404 	}
3405 
3406 	/* Release adapter state lock. */
3407 	ADAPTER_STATE_UNLOCK(ha);
3408 
3409 	/* IP buffer. */
3410 	if (ub_updated) {
3411 		if ((type == FC_TYPE_IS8802_SNAP) &&
3412 		    (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3413 
3414 			ADAPTER_STATE_LOCK(ha);
3415 			ha->flags |= IP_ENABLED;
3416 			ADAPTER_STATE_UNLOCK(ha);
3417 
3418 			if (!(ha->flags & IP_INITIALIZED)) {
3419 				if (CFG_IST(ha, CFG_CTRL_2422)) {
3420 					ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3421 					    LSB(ql_ip_mtu);
3422 					ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3423 					    MSB(ql_ip_mtu);
3424 					ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3425 					    LSB(size);
3426 					ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3427 					    MSB(size);
3428 
3429 					cnt = CHAR_TO_SHORT(
3430 					    ha->ip_init_ctrl_blk.cb24.cc[0],
3431 					    ha->ip_init_ctrl_blk.cb24.cc[1]);
3432 
3433 					if (cnt < *count) {
3434 						ha->ip_init_ctrl_blk.cb24.cc[0]
3435 						    = LSB(*count);
3436 						ha->ip_init_ctrl_blk.cb24.cc[1]
3437 						    = MSB(*count);
3438 					}
3439 				} else {
3440 					ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3441 					    LSB(ql_ip_mtu);
3442 					ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3443 					    MSB(ql_ip_mtu);
3444 					ha->ip_init_ctrl_blk.cb.buf_size[0] =
3445 					    LSB(size);
3446 					ha->ip_init_ctrl_blk.cb.buf_size[1] =
3447 					    MSB(size);
3448 
3449 					cnt = CHAR_TO_SHORT(
3450 					    ha->ip_init_ctrl_blk.cb.cc[0],
3451 					    ha->ip_init_ctrl_blk.cb.cc[1]);
3452 
3453 					if (cnt < *count) {
3454 						ha->ip_init_ctrl_blk.cb.cc[0] =
3455 						    LSB(*count);
3456 						ha->ip_init_ctrl_blk.cb.cc[1] =
3457 						    MSB(*count);
3458 					}
3459 				}
3460 
3461 				(void) ql_initialize_ip(ha);
3462 			}
3463 			ql_isp_rcvbuf(ha);
3464 		}
3465 	}
3466 
3467 	if (rval != FC_SUCCESS) {
3468 		EL(ha, "failed=%xh\n", rval);
3469 	} else {
3470 		/*EMPTY*/
3471 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3472 		    ha->vp_index);
3473 	}
3474 	return (rval);
3475 }
3476 
3477 /*
3478  * ql_ub_free
3479  *	Free unsolicited buffers.
3480  *
3481  * Input:
3482  *	fca_handle = handle setup by ql_bind_port().
3483  *	count = number of buffers.
3484  *	tokens = token array for each buffer.
3485  *
3486  * Returns:
3487  *	FC_SUCCESS - the requested buffers have been freed.
3488  *	FC_UNBOUND - the fca_handle specified is not bound.
3489  *	FC_UB_BADTOKEN - an invalid token was encountered.
3490  *			 No buffers have been released.
3491  *
3492  * Context:
3493  *	Kernel context.
3494  */
3495 static int
3496 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3497 {
3498 	ql_adapter_state_t	*ha;
3499 	ql_srb_t		*sp;
3500 	uint32_t		index;
3501 	uint64_t		ub_array_index;
3502 	int			rval = FC_SUCCESS;
3503 
3504 	/* Check handle. */
3505 	ha = ql_fca_handle_to_state(fca_handle);
3506 	if (ha == NULL) {
3507 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3508 		    (void *)fca_handle);
3509 		return (FC_UNBOUND);
3510 	}
3511 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3512 
3513 	/* Acquire adapter state lock. */
3514 	ADAPTER_STATE_LOCK(ha);
3515 
3516 	/* Check all returned tokens. */
3517 	for (index = 0; index < count; index++) {
3518 		fc_unsol_buf_t	*ubp;
3519 
3520 		/* Check the token range. */
3521 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3522 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3523 			rval = FC_UB_BADTOKEN;
3524 			break;
3525 		}
3526 
3527 		/* Check the unsolicited buffer array. */
3528 		QL_UB_LOCK(ha);
3529 		ubp = ha->ub_array[ub_array_index];
3530 
3531 		if (ubp == NULL) {
3532 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3533 			rval = FC_UB_BADTOKEN;
3534 			QL_UB_UNLOCK(ha);
3535 			break;
3536 		}
3537 
3538 		/* Check the state of the unsolicited buffer. */
3539 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3540 		sp->flags |= SRB_UB_FREE_REQUESTED;
3541 
3542 		while (!(sp->flags & SRB_UB_IN_FCA) ||
3543 		    (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3544 			QL_UB_UNLOCK(ha);
3545 			ADAPTER_STATE_UNLOCK(ha);
3546 			delay(drv_usectohz(100000));
3547 			ADAPTER_STATE_LOCK(ha);
3548 			QL_UB_LOCK(ha);
3549 		}
3550 		ha->ub_array[ub_array_index] = NULL;
3551 		QL_UB_UNLOCK(ha);
3552 		ql_free_unsolicited_buffer(ha, ubp);
3553 	}
3554 
3555 	if (rval == FC_SUCCESS) {
3556 		/*
3557 		 * Signal any pending hardware reset when there are
3558 		 * no more unsolicited buffers in use.
3559 		 */
3560 		if (ha->ub_allocated == 0) {
3561 			cv_broadcast(&ha->pha->cv_ub);
3562 		}
3563 	}
3564 
3565 	/* Release adapter state lock. */
3566 	ADAPTER_STATE_UNLOCK(ha);
3567 
3568 	if (rval != FC_SUCCESS) {
3569 		EL(ha, "failed=%xh\n", rval);
3570 	} else {
3571 		/*EMPTY*/
3572 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3573 	}
3574 	return (rval);
3575 }
3576 
3577 /*
3578  * ql_ub_release
3579  *	Release unsolicited buffers from FC Transport
3580  *	to FCA for future use.
3581  *
3582  * Input:
3583  *	fca_handle = handle setup by ql_bind_port().
3584  *	count = number of buffers.
3585  *	tokens = token array for each buffer.
3586  *
3587  * Returns:
3588  *	FC_SUCCESS - the requested buffers have been released.
3589  *	FC_UNBOUND - the fca_handle specified is not bound.
3590  *	FC_UB_BADTOKEN - an invalid token was encountered.
3591  *		No buffers have been released.
3592  *
3593  * Context:
3594  *	Kernel context.
3595  */
3596 static int
3597 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3598 {
3599 	ql_adapter_state_t	*ha;
3600 	ql_srb_t		*sp;
3601 	uint32_t		index;
3602 	uint64_t		ub_array_index;
3603 	int			rval = FC_SUCCESS;
3604 	int			ub_ip_updated = FALSE;
3605 
3606 	/* Check handle. */
3607 	ha = ql_fca_handle_to_state(fca_handle);
3608 	if (ha == NULL) {
3609 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3610 		    (void *)fca_handle);
3611 		return (FC_UNBOUND);
3612 	}
3613 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3614 
3615 	/* Acquire adapter state lock. */
3616 	ADAPTER_STATE_LOCK(ha);
3617 	QL_UB_LOCK(ha);
3618 
3619 	/* Check all returned tokens. */
3620 	for (index = 0; index < count; index++) {
3621 		/* Check the token range. */
3622 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3623 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3624 			rval = FC_UB_BADTOKEN;
3625 			break;
3626 		}
3627 
3628 		/* Check the unsolicited buffer array. */
3629 		if (ha->ub_array[ub_array_index] == NULL) {
3630 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3631 			rval = FC_UB_BADTOKEN;
3632 			break;
3633 		}
3634 
3635 		/* Check the state of the unsolicited buffer. */
3636 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3637 		if (sp->flags & SRB_UB_IN_FCA) {
3638 			EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3639 			rval = FC_UB_BADTOKEN;
3640 			break;
3641 		}
3642 	}
3643 
3644 	/* If all tokens checkout, release the buffers. */
3645 	if (rval == FC_SUCCESS) {
3646 		/* Check all returned tokens. */
3647 		for (index = 0; index < count; index++) {
3648 			fc_unsol_buf_t	*ubp;
3649 
3650 			ub_array_index = tokens[index];
3651 			ubp = ha->ub_array[ub_array_index];
3652 			sp = ubp->ub_fca_private;
3653 
3654 			ubp->ub_resp_flags = 0;
3655 			sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3656 			sp->flags |= SRB_UB_IN_FCA;
3657 
3658 			/* IP buffer. */
3659 			if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3660 				ub_ip_updated = TRUE;
3661 			}
3662 		}
3663 	}
3664 
3665 	QL_UB_UNLOCK(ha);
3666 	/* Release adapter state lock. */
3667 	ADAPTER_STATE_UNLOCK(ha);
3668 
3669 	/*
3670 	 * XXX: We should call ql_isp_rcvbuf() to return a
3671 	 * buffer to ISP only if the number of buffers fall below
3672 	 * the low water mark.
3673 	 */
3674 	if (ub_ip_updated) {
3675 		ql_isp_rcvbuf(ha);
3676 	}
3677 
3678 	if (rval != FC_SUCCESS) {
3679 		EL(ha, "failed, rval = %xh\n", rval);
3680 	} else {
3681 		/*EMPTY*/
3682 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3683 	}
3684 	return (rval);
3685 }
3686 
3687 /*
3688  * ql_abort
3689  *	Abort a packet.
3690  *
3691  * Input:
3692  *	fca_handle = handle setup by ql_bind_port().
3693  *	pkt = pointer to fc_packet.
3694  *	flags = KM_SLEEP flag.
3695  *
3696  * Returns:
3697  *	FC_SUCCESS - the packet has successfully aborted.
3698  *	FC_ABORTED - the packet has successfully aborted.
3699  *	FC_ABORTING - the packet is being aborted.
3700  *	FC_ABORT_FAILED - the packet could not be aborted.
3701  *	FC_TRANSPORT_ERROR - a transport error occurred while attempting
3702  *		to abort the packet.
3703  *	FC_BADEXCHANGE - no packet found.
3704  *	FC_UNBOUND - the fca_handle specified is not bound.
3705  *
3706  * Context:
3707  *	Kernel context.
3708  */
3709 static int
3710 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3711 {
3712 	port_id_t		d_id;
3713 	ql_link_t		*link;
3714 	ql_adapter_state_t	*ha, *pha;
3715 	ql_srb_t		*sp;
3716 	ql_tgt_t		*tq;
3717 	ql_lun_t		*lq;
3718 	int			rval = FC_ABORTED;
3719 
3720 	ha = ql_fca_handle_to_state(fca_handle);
3721 	if (ha == NULL) {
3722 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3723 		    (void *)fca_handle);
3724 		return (FC_UNBOUND);
3725 	}
3726 
3727 	pha = ha->pha;
3728 
3729 	QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3730 
3731 	/* Get target queue pointer. */
3732 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3733 	tq = ql_d_id_to_queue(ha, d_id);
3734 
3735 	if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3736 		if (tq == NULL) {
3737 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3738 			rval = FC_TRANSPORT_ERROR;
3739 		} else {
3740 			EL(ha, "failed, FC_OFFLINE\n");
3741 			rval = FC_OFFLINE;
3742 		}
3743 		return (rval);
3744 	}
3745 
3746 	sp = (ql_srb_t *)pkt->pkt_fca_private;
3747 	lq = sp->lun_queue;
3748 
3749 	/* Set poll flag if sleep wanted. */
3750 	if (flags == KM_SLEEP) {
3751 		sp->flags |= SRB_POLL;
3752 	}
3753 
3754 	/* Acquire target queue lock. */
3755 	DEVICE_QUEUE_LOCK(tq);
3756 	REQUEST_RING_LOCK(ha);
3757 
3758 	/* If command not already started. */
3759 	if (!(sp->flags & SRB_ISP_STARTED)) {
3760 		/* Check pending queue for command. */
3761 		sp = NULL;
3762 		for (link = pha->pending_cmds.first; link != NULL;
3763 		    link = link->next) {
3764 			sp = link->base_address;
3765 			if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3766 				/* Remove srb from q. */
3767 				ql_remove_link(&pha->pending_cmds, &sp->cmd);
3768 				break;
3769 			} else {
3770 				sp = NULL;
3771 			}
3772 		}
3773 		REQUEST_RING_UNLOCK(ha);
3774 
3775 		if (sp == NULL) {
3776 			/* Check for cmd on device queue. */
3777 			for (link = lq->cmd.first; link != NULL;
3778 			    link = link->next) {
3779 				sp = link->base_address;
3780 				if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3781 					/* Remove srb from q. */
3782 					ql_remove_link(&lq->cmd, &sp->cmd);
3783 					break;
3784 				} else {
3785 					sp = NULL;
3786 				}
3787 			}
3788 		}
3789 		/* Release device lock */
3790 		DEVICE_QUEUE_UNLOCK(tq);
3791 
3792 		/* If command on target queue. */
3793 		if (sp != NULL) {
3794 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3795 
3796 			/* Set return status */
3797 			pkt->pkt_reason = CS_ABORTED;
3798 
3799 			sp->cmd.next = NULL;
3800 			ql_done(&sp->cmd);
3801 			rval = FC_ABORTED;
3802 		} else {
3803 			EL(ha, "failed, FC_BADEXCHANGE\n");
3804 			rval = FC_BADEXCHANGE;
3805 		}
3806 	} else if (sp->flags & SRB_ISP_COMPLETED) {
3807 		/* Release device queue lock. */
3808 		REQUEST_RING_UNLOCK(ha);
3809 		DEVICE_QUEUE_UNLOCK(tq);
3810 		EL(ha, "failed, already done, FC_FAILURE\n");
3811 		rval = FC_FAILURE;
3812 	} else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3813 	    (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3814 		/*
3815 		 * If here, target data/resp ctio is with Fw.
3816 		 * Since firmware is supposed to terminate such I/Os
3817 		 * with an error, we need not do any thing. If FW
3818 		 * decides not to terminate those IOs and simply keep
3819 		 * quite then we need to initiate cleanup here by
3820 		 * calling ql_done.
3821 		 */
3822 		REQUEST_RING_UNLOCK(ha);
3823 		DEVICE_QUEUE_UNLOCK(tq);
3824 		rval = FC_ABORTED;
3825 	} else {
3826 		request_t	*ep = pha->request_ring_bp;
3827 		uint16_t	cnt;
3828 
3829 		if (sp->handle != 0) {
3830 			for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3831 				if (sp->handle == ddi_get32(
3832 				    pha->hba_buf.acc_handle, &ep->handle)) {
3833 					ep->entry_type = INVALID_ENTRY_TYPE;
3834 					break;
3835 				}
3836 				ep++;
3837 			}
3838 		}
3839 
3840 		/* Release device queue lock. */
3841 		REQUEST_RING_UNLOCK(ha);
3842 		DEVICE_QUEUE_UNLOCK(tq);
3843 
3844 		sp->flags |= SRB_ABORTING;
3845 		(void) ql_abort_command(ha, sp);
3846 		pkt->pkt_reason = CS_ABORTED;
3847 		rval = FC_ABORTED;
3848 	}
3849 
3850 	QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3851 
3852 	return (rval);
3853 }
3854 
3855 /*
3856  * ql_reset
3857  *	Reset link or hardware.
3858  *
3859  * Input:
3860  *	fca_handle = handle setup by ql_bind_port().
3861  *	cmd = reset type command.
3862  *
3863  * Returns:
3864  *	FC_SUCCESS - reset has successfully finished.
3865  *	FC_UNBOUND - the fca_handle specified is not bound.
3866  *	FC_FAILURE - reset failed.
3867  *
3868  * Context:
3869  *	Kernel context.
3870  */
3871 static int
3872 ql_reset(opaque_t fca_handle, uint32_t cmd)
3873 {
3874 	ql_adapter_state_t	*ha;
3875 	int			rval = FC_SUCCESS, rval2;
3876 
3877 	ha = ql_fca_handle_to_state(fca_handle);
3878 	if (ha == NULL) {
3879 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3880 		    (void *)fca_handle);
3881 		return (FC_UNBOUND);
3882 	}
3883 
3884 	QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3885 	    ha->vp_index, cmd);
3886 
3887 	switch (cmd) {
3888 	case FC_FCA_CORE:
3889 		/* dump firmware core if specified. */
3890 		if (ha->vp_index == 0) {
3891 			if (ql_dump_firmware(ha) != QL_SUCCESS) {
3892 				EL(ha, "failed, FC_FAILURE\n");
3893 				rval = FC_FAILURE;
3894 			}
3895 		}
3896 		break;
3897 	case FC_FCA_LINK_RESET:
3898 		if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3899 			if (ql_loop_reset(ha) != QL_SUCCESS) {
3900 				EL(ha, "failed, FC_FAILURE-2\n");
3901 				rval = FC_FAILURE;
3902 			}
3903 		}
3904 		break;
3905 	case FC_FCA_RESET_CORE:
3906 	case FC_FCA_RESET:
3907 		/* if dump firmware core if specified. */
3908 		if (cmd == FC_FCA_RESET_CORE) {
3909 			if (ha->vp_index != 0) {
3910 				rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3911 				    ? QL_SUCCESS : ql_loop_reset(ha);
3912 			} else {
3913 				rval2 = ql_dump_firmware(ha);
3914 			}
3915 			if (rval2 != QL_SUCCESS) {
3916 				EL(ha, "failed, FC_FAILURE-3\n");
3917 				rval = FC_FAILURE;
3918 			}
3919 		}
3920 
3921 		/* Free up all unsolicited buffers. */
3922 		if (ha->ub_allocated != 0) {
3923 			/* Inform to release buffers. */
3924 			ha->state = FC_PORT_SPEED_MASK(ha->state);
3925 			ha->state |= FC_STATE_RESET_REQUESTED;
3926 			if (ha->flags & FCA_BOUND) {
3927 				(ha->bind_info.port_statec_cb)
3928 				    (ha->bind_info.port_handle,
3929 				    ha->state);
3930 			}
3931 		}
3932 
3933 		ha->state = FC_PORT_SPEED_MASK(ha->state);
3934 
3935 		/* All buffers freed */
3936 		if (ha->ub_allocated == 0) {
3937 			/* Hardware reset. */
3938 			if (cmd == FC_FCA_RESET) {
3939 				if (ha->vp_index == 0) {
3940 					(void) ql_abort_isp(ha);
3941 				} else if (!(ha->pha->task_daemon_flags &
3942 				    LOOP_DOWN)) {
3943 					(void) ql_loop_reset(ha);
3944 				}
3945 			}
3946 
3947 			/* Inform that the hardware has been reset */
3948 			ha->state |= FC_STATE_RESET;
3949 		} else {
3950 			/*
3951 			 * the port driver expects an online if
3952 			 * buffers are not freed.
3953 			 */
3954 			if (ha->topology & QL_LOOP_CONNECTION) {
3955 				ha->state |= FC_STATE_LOOP;
3956 			} else {
3957 				ha->state |= FC_STATE_ONLINE;
3958 			}
3959 		}
3960 
3961 		TASK_DAEMON_LOCK(ha);
3962 		ha->task_daemon_flags |= FC_STATE_CHANGE;
3963 		TASK_DAEMON_UNLOCK(ha);
3964 
3965 		ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3966 
3967 		break;
3968 	default:
3969 		EL(ha, "unknown cmd=%xh\n", cmd);
3970 		break;
3971 	}
3972 
3973 	if (rval != FC_SUCCESS) {
3974 		EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3975 	} else {
3976 		/*EMPTY*/
3977 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3978 		    ha->vp_index);
3979 	}
3980 
3981 	return (rval);
3982 }
3983 
3984 /*
3985  * ql_port_manage
3986  *	Perform port management or diagnostics.
3987  *
3988  * Input:
3989  *	fca_handle = handle setup by ql_bind_port().
3990  *	cmd = pointer to command structure.
3991  *
3992  * Returns:
3993  *	FC_SUCCESS - the request completed successfully.
3994  *	FC_FAILURE - the request did not complete successfully.
3995  *	FC_UNBOUND - the fca_handle specified is not bound.
3996  *
3997  * Context:
3998  *	Kernel context.
3999  */
4000 static int
4001 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
4002 {
4003 	clock_t			timer;
4004 	uint16_t		index;
4005 	uint32_t		*bp;
4006 	port_id_t		d_id;
4007 	ql_link_t		*link;
4008 	ql_adapter_state_t	*ha, *pha;
4009 	ql_tgt_t		*tq;
4010 	dma_mem_t		buffer_xmt, buffer_rcv;
4011 	size_t			length;
4012 	uint32_t		cnt;
4013 	char			buf[80];
4014 	lbp_t			*lb;
4015 	ql_mbx_data_t		mr;
4016 	app_mbx_cmd_t		*mcp;
4017 	int			i0;
4018 	uint8_t			*bptr;
4019 	int			rval2, rval = FC_SUCCESS;
4020 	uint32_t		opcode;
4021 	uint32_t		set_flags = 0;
4022 
4023 	ha = ql_fca_handle_to_state(fca_handle);
4024 	if (ha == NULL) {
4025 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
4026 		    (void *)fca_handle);
4027 		return (FC_UNBOUND);
4028 	}
4029 	pha = ha->pha;
4030 
4031 	QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
4032 	    cmd->pm_cmd_code);
4033 
4034 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
4035 
4036 	/*
4037 	 * Wait for all outstanding commands to complete
4038 	 */
4039 	index = (uint16_t)ql_wait_outstanding(ha);
4040 
4041 	if (index != MAX_OUTSTANDING_COMMANDS) {
4042 		ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4043 		ql_restart_queues(ha);
4044 		EL(ha, "failed, FC_TRAN_BUSY\n");
4045 		return (FC_TRAN_BUSY);
4046 	}
4047 
4048 	switch (cmd->pm_cmd_code) {
4049 	case FC_PORT_BYPASS:
4050 		d_id.b24 = *cmd->pm_cmd_buf;
4051 		tq = ql_d_id_to_queue(ha, d_id);
4052 		if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
4053 			EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
4054 			rval = FC_FAILURE;
4055 		}
4056 		break;
4057 	case FC_PORT_UNBYPASS:
4058 		d_id.b24 = *cmd->pm_cmd_buf;
4059 		tq = ql_d_id_to_queue(ha, d_id);
4060 		if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
4061 			EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
4062 			rval = FC_FAILURE;
4063 		}
4064 		break;
4065 	case FC_PORT_GET_FW_REV:
4066 		(void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
4067 		    pha->fw_minor_version, pha->fw_subminor_version);
4068 		length = strlen(buf) + 1;
4069 		if (cmd->pm_data_len < length) {
4070 			cmd->pm_data_len = length;
4071 			EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
4072 			rval = FC_FAILURE;
4073 		} else {
4074 			(void) strcpy(cmd->pm_data_buf, buf);
4075 		}
4076 		break;
4077 
4078 	case FC_PORT_GET_FCODE_REV: {
4079 		caddr_t		fcode_ver_buf = NULL;
4080 
4081 		i0 = 0;
4082 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
4083 		rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
4084 		    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
4085 		    (caddr_t)&fcode_ver_buf, &i0);
4086 		length = (uint_t)i0;
4087 
4088 		if (rval2 != DDI_PROP_SUCCESS) {
4089 			EL(ha, "failed, getting version = %xh\n", rval2);
4090 			length = 20;
4091 			fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
4092 			if (fcode_ver_buf != NULL) {
4093 				(void) sprintf(fcode_ver_buf,
4094 				    "NO FCODE FOUND");
4095 			}
4096 		}
4097 
4098 		if (cmd->pm_data_len < length) {
4099 			EL(ha, "length error, FC_PORT_GET_FCODE_REV "
4100 			    "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
4101 			cmd->pm_data_len = length;
4102 			rval = FC_FAILURE;
4103 		} else if (fcode_ver_buf != NULL) {
4104 			bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
4105 			    length);
4106 		}
4107 
4108 		if (fcode_ver_buf != NULL) {
4109 			kmem_free(fcode_ver_buf, length);
4110 		}
4111 		break;
4112 	}
4113 
4114 	case FC_PORT_GET_DUMP:
4115 		QL_DUMP_LOCK(pha);
4116 		if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
4117 			EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
4118 			    "length=%lxh\n", cmd->pm_data_len);
4119 			cmd->pm_data_len = pha->risc_dump_size;
4120 			rval = FC_FAILURE;
4121 		} else if (pha->ql_dump_state & QL_DUMPING) {
4122 			EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
4123 			rval = FC_TRAN_BUSY;
4124 		} else if (pha->ql_dump_state & QL_DUMP_VALID) {
4125 			(void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
4126 			pha->ql_dump_state |= QL_DUMP_UPLOADED;
4127 		} else {
4128 			EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
4129 			rval = FC_FAILURE;
4130 		}
4131 		QL_DUMP_UNLOCK(pha);
4132 		break;
4133 	case FC_PORT_FORCE_DUMP:
4134 		PORTMANAGE_LOCK(ha);
4135 		if (ql_dump_firmware(ha) != QL_SUCCESS) {
4136 			EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
4137 			rval = FC_FAILURE;
4138 		}
4139 		PORTMANAGE_UNLOCK(ha);
4140 		break;
4141 	case FC_PORT_DOWNLOAD_FW:
4142 		PORTMANAGE_LOCK(ha);
4143 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
4144 			if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4145 			    (uint32_t)cmd->pm_data_len,
4146 			    ha->flash_fw_addr << 2) != QL_SUCCESS) {
4147 				EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
4148 				rval = FC_FAILURE;
4149 			}
4150 			ql_reset_chip(ha);
4151 			set_flags |= ISP_ABORT_NEEDED;
4152 		} else {
4153 			/* Save copy of the firmware. */
4154 			if (pha->risc_code != NULL) {
4155 				kmem_free(pha->risc_code, pha->risc_code_size);
4156 				pha->risc_code = NULL;
4157 				pha->risc_code_size = 0;
4158 			}
4159 
4160 			pha->risc_code = kmem_alloc(cmd->pm_data_len,
4161 			    KM_SLEEP);
4162 			if (pha->risc_code != NULL) {
4163 				pha->risc_code_size =
4164 				    (uint32_t)cmd->pm_data_len;
4165 				bcopy(cmd->pm_data_buf, pha->risc_code,
4166 				    cmd->pm_data_len);
4167 
4168 				/* Do abort to force reload. */
4169 				ql_reset_chip(ha);
4170 				if (ql_abort_isp(ha) != QL_SUCCESS) {
4171 					kmem_free(pha->risc_code,
4172 					    pha->risc_code_size);
4173 					pha->risc_code = NULL;
4174 					pha->risc_code_size = 0;
4175 					ql_reset_chip(ha);
4176 					(void) ql_abort_isp(ha);
4177 					EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
4178 					    " FC_FAILURE\n");
4179 					rval = FC_FAILURE;
4180 				}
4181 			}
4182 		}
4183 		PORTMANAGE_UNLOCK(ha);
4184 		break;
4185 	case FC_PORT_GET_DUMP_SIZE:
4186 		bp = (uint32_t *)cmd->pm_data_buf;
4187 		*bp = pha->risc_dump_size;
4188 		break;
4189 	case FC_PORT_DIAG:
4190 		/*
4191 		 * Prevents concurrent diags
4192 		 */
4193 		PORTMANAGE_LOCK(ha);
4194 
4195 		/* Wait for suspension to end. */
4196 		for (timer = 0; timer < 3000 &&
4197 		    pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
4198 			ql_delay(ha, 10000);
4199 		}
4200 
4201 		if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
4202 			EL(ha, "failed, FC_TRAN_BUSY-2\n");
4203 			rval = FC_TRAN_BUSY;
4204 			PORTMANAGE_UNLOCK(ha);
4205 			break;
4206 		}
4207 
4208 		switch (cmd->pm_cmd_flags) {
4209 		case QL_DIAG_EXEFMW:
4210 			if (ql_start_firmware(ha) != QL_SUCCESS) {
4211 				EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4212 				rval = FC_FAILURE;
4213 			}
4214 			break;
4215 		case QL_DIAG_CHKCMDQUE:
4216 			for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
4217 			    i0++) {
4218 				cnt += (pha->outstanding_cmds[i0] != NULL);
4219 			}
4220 			if (cnt != 0) {
4221 				EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4222 				    "FC_FAILURE\n");
4223 				rval = FC_FAILURE;
4224 			}
4225 			break;
4226 		case QL_DIAG_FMWCHKSUM:
4227 			if (ql_verify_checksum(ha) != QL_SUCCESS) {
4228 				EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4229 				    "FC_FAILURE\n");
4230 				rval = FC_FAILURE;
4231 			}
4232 			break;
4233 		case QL_DIAG_SLFTST:
4234 			if (ql_online_selftest(ha) != QL_SUCCESS) {
4235 				EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4236 				rval = FC_FAILURE;
4237 			}
4238 			ql_reset_chip(ha);
4239 			set_flags |= ISP_ABORT_NEEDED;
4240 			break;
4241 		case QL_DIAG_REVLVL:
4242 			if (cmd->pm_stat_len <
4243 			    sizeof (ql_adapter_revlvl_t)) {
4244 				EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4245 				    "slen=%lxh, rlvllen=%lxh\n",
4246 				    cmd->pm_stat_len,
4247 				    sizeof (ql_adapter_revlvl_t));
4248 				rval = FC_NOMEM;
4249 			} else {
4250 				bcopy((void *)&(pha->adapter_stats->revlvl),
4251 				    cmd->pm_stat_buf,
4252 				    (size_t)cmd->pm_stat_len);
4253 				cmd->pm_stat_len =
4254 				    sizeof (ql_adapter_revlvl_t);
4255 			}
4256 			break;
4257 		case QL_DIAG_LPBMBX:
4258 
4259 			if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4260 				EL(ha, "failed, QL_DIAG_LPBMBX "
4261 				    "FC_INVALID_REQUEST, pmlen=%lxh, "
4262 				    "reqd=%lxh\n", cmd->pm_data_len,
4263 				    sizeof (struct app_mbx_cmd));
4264 				rval = FC_INVALID_REQUEST;
4265 				break;
4266 			}
4267 			/*
4268 			 * Don't do the wrap test on a 2200 when the
4269 			 * firmware is running.
4270 			 */
4271 			if (!CFG_IST(ha, CFG_CTRL_2200)) {
4272 				mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4273 				mr.mb[1] = mcp->mb[1];
4274 				mr.mb[2] = mcp->mb[2];
4275 				mr.mb[3] = mcp->mb[3];
4276 				mr.mb[4] = mcp->mb[4];
4277 				mr.mb[5] = mcp->mb[5];
4278 				mr.mb[6] = mcp->mb[6];
4279 				mr.mb[7] = mcp->mb[7];
4280 
4281 				bcopy(&mr.mb[0], &mr.mb[10],
4282 				    sizeof (uint16_t) * 8);
4283 
4284 				if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4285 					EL(ha, "failed, QL_DIAG_LPBMBX "
4286 					    "FC_FAILURE\n");
4287 					rval = FC_FAILURE;
4288 					break;
4289 				} else {
4290 					for (i0 = 1; i0 < 8; i0++) {
4291 						if (mr.mb[i0] !=
4292 						    mr.mb[i0 + 10]) {
4293 							EL(ha, "failed, "
4294 							    "QL_DIAG_LPBMBX "
4295 							    "FC_FAILURE-2\n");
4296 							rval = FC_FAILURE;
4297 							break;
4298 						}
4299 					}
4300 				}
4301 
4302 				if (rval == FC_FAILURE) {
4303 					(void) ql_flash_errlog(ha,
4304 					    FLASH_ERRLOG_ISP_ERR, 0,
4305 					    RD16_IO_REG(ha, hccr),
4306 					    RD16_IO_REG(ha, istatus));
4307 					set_flags |= ISP_ABORT_NEEDED;
4308 				}
4309 			}
4310 			break;
4311 		case QL_DIAG_LPBDTA:
4312 			/*
4313 			 * For loopback data, we receive the
4314 			 * data back in pm_stat_buf. This provides
4315 			 * the user an opportunity to compare the
4316 			 * transmitted and received data.
4317 			 *
4318 			 * NB: lb->options are:
4319 			 *	0 --> Ten bit loopback
4320 			 *	1 --> One bit loopback
4321 			 *	2 --> External loopback
4322 			 */
4323 			if (cmd->pm_data_len > 65536) {
4324 				rval = FC_TOOMANY;
4325 				EL(ha, "failed, QL_DIAG_LPBDTA "
4326 				    "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4327 				break;
4328 			}
4329 			if (ql_get_dma_mem(ha, &buffer_xmt,
4330 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4331 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4332 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4333 				rval = FC_NOMEM;
4334 				break;
4335 			}
4336 			if (ql_get_dma_mem(ha, &buffer_rcv,
4337 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4338 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4339 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4340 				rval = FC_NOMEM;
4341 				break;
4342 			}
4343 			ddi_rep_put8(buffer_xmt.acc_handle,
4344 			    (uint8_t *)cmd->pm_data_buf,
4345 			    (uint8_t *)buffer_xmt.bp,
4346 			    cmd->pm_data_len, DDI_DEV_AUTOINCR);
4347 
4348 			/* 22xx's adapter must be in loop mode for test. */
4349 			if (CFG_IST(ha, CFG_CTRL_2200)) {
4350 				bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4351 				if (ha->flags & POINT_TO_POINT ||
4352 				    (ha->task_daemon_flags & LOOP_DOWN &&
4353 				    *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4354 					cnt = *bptr;
4355 					*bptr = (uint8_t)
4356 					    (*bptr & ~(BIT_6|BIT_5|BIT_4));
4357 					(void) ql_abort_isp(ha);
4358 					*bptr = (uint8_t)cnt;
4359 				}
4360 			}
4361 
4362 			/* Shutdown IP. */
4363 			if (pha->flags & IP_INITIALIZED) {
4364 				(void) ql_shutdown_ip(pha);
4365 			}
4366 
4367 			lb = (lbp_t *)cmd->pm_cmd_buf;
4368 			lb->transfer_count =
4369 			    (uint32_t)cmd->pm_data_len;
4370 			lb->transfer_segment_count = 0;
4371 			lb->receive_segment_count = 0;
4372 			lb->transfer_data_address =
4373 			    buffer_xmt.cookie.dmac_address;
4374 			lb->receive_data_address =
4375 			    buffer_rcv.cookie.dmac_address;
4376 
4377 			if (ql_loop_back(ha, 0, lb,
4378 			    buffer_xmt.cookie.dmac_notused,
4379 			    buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4380 				bzero((void *)cmd->pm_stat_buf,
4381 				    cmd->pm_stat_len);
4382 				ddi_rep_get8(buffer_rcv.acc_handle,
4383 				    (uint8_t *)cmd->pm_stat_buf,
4384 				    (uint8_t *)buffer_rcv.bp,
4385 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4386 				rval = FC_SUCCESS;
4387 			} else {
4388 				EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4389 				rval = FC_FAILURE;
4390 			}
4391 
4392 			ql_free_phys(ha, &buffer_xmt);
4393 			ql_free_phys(ha, &buffer_rcv);
4394 
4395 			/* Needed to recover the f/w */
4396 			set_flags |= ISP_ABORT_NEEDED;
4397 
4398 			/* Restart IP if it was shutdown. */
4399 			if (pha->flags & IP_ENABLED &&
4400 			    !(pha->flags & IP_INITIALIZED)) {
4401 				(void) ql_initialize_ip(pha);
4402 				ql_isp_rcvbuf(pha);
4403 			}
4404 
4405 			break;
4406 		case QL_DIAG_ECHO: {
4407 			/*
4408 			 * issue an echo command with a user supplied
4409 			 * data pattern and destination address
4410 			 */
4411 			echo_t		echo;		/* temp echo struct */
4412 
4413 			/* Setup echo cmd & adjust for platform */
4414 			opcode = QL_ECHO_CMD;
4415 			BIG_ENDIAN_32(&opcode);
4416 
4417 			/*
4418 			 * due to limitations in the ql
4419 			 * firmaware the echo data field is
4420 			 * limited to 220
4421 			 */
4422 			if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4423 			    (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4424 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4425 				    "cmdl1=%lxh, statl2=%lxh\n",
4426 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4427 				rval = FC_TOOMANY;
4428 				break;
4429 			}
4430 
4431 			/*
4432 			 * the input data buffer has the user
4433 			 * supplied data pattern.  The "echoed"
4434 			 * data will be DMAed into the output
4435 			 * data buffer.  Therefore the length
4436 			 * of the output buffer must be equal
4437 			 * to or greater then the input buffer
4438 			 * length
4439 			 */
4440 			if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4441 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4442 				    " cmdl1=%lxh, statl2=%lxh\n",
4443 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4444 				rval = FC_TOOMANY;
4445 				break;
4446 			}
4447 			/* add four bytes for the opcode */
4448 			echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4449 
4450 			/*
4451 			 * are we 32 or 64 bit addressed???
4452 			 * We need to get the appropriate
4453 			 * DMA and set the command options;
4454 			 * 64 bit (bit 6) or 32 bit
4455 			 * (no bit 6) addressing.
4456 			 * while we are at it lets ask for
4457 			 * real echo (bit 15)
4458 			 */
4459 			echo.options = BIT_15;
4460 			if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4461 			    !(CFG_IST(ha, CFG_CTRL_8081))) {
4462 				echo.options = (uint16_t)
4463 				    (echo.options | BIT_6);
4464 			}
4465 
4466 			/*
4467 			 * Set up the DMA mappings for the
4468 			 * output and input data buffers.
4469 			 * First the output buffer
4470 			 */
4471 			if (ql_get_dma_mem(ha, &buffer_xmt,
4472 			    (uint32_t)(cmd->pm_data_len + 4),
4473 			    LITTLE_ENDIAN_DMA,
4474 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4475 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4476 				rval = FC_NOMEM;
4477 				break;
4478 			}
4479 			echo.transfer_data_address = buffer_xmt.cookie;
4480 
4481 			/* Next the input buffer */
4482 			if (ql_get_dma_mem(ha, &buffer_rcv,
4483 			    (uint32_t)(cmd->pm_data_len + 4),
4484 			    LITTLE_ENDIAN_DMA,
4485 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4486 				/*
4487 				 * since we could not allocate
4488 				 * DMA space for the input
4489 				 * buffer we need to clean up
4490 				 * by freeing the DMA space
4491 				 * we allocated for the output
4492 				 * buffer
4493 				 */
4494 				ql_free_phys(ha, &buffer_xmt);
4495 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4496 				rval = FC_NOMEM;
4497 				break;
4498 			}
4499 			echo.receive_data_address = buffer_rcv.cookie;
4500 
4501 			/*
4502 			 * copy the 4 byte ECHO op code to the
4503 			 * allocated DMA space
4504 			 */
4505 			ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4506 			    (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4507 
4508 			/*
4509 			 * copy the user supplied data to the
4510 			 * allocated DMA space
4511 			 */
4512 			ddi_rep_put8(buffer_xmt.acc_handle,
4513 			    (uint8_t *)cmd->pm_cmd_buf,
4514 			    (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4515 			    DDI_DEV_AUTOINCR);
4516 
4517 			/* Shutdown IP. */
4518 			if (pha->flags & IP_INITIALIZED) {
4519 				(void) ql_shutdown_ip(pha);
4520 			}
4521 
4522 			/* send the echo */
4523 			if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4524 				ddi_rep_put8(buffer_rcv.acc_handle,
4525 				    (uint8_t *)buffer_rcv.bp + 4,
4526 				    (uint8_t *)cmd->pm_stat_buf,
4527 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4528 			} else {
4529 				EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4530 				rval = FC_FAILURE;
4531 			}
4532 
4533 			/* Restart IP if it was shutdown. */
4534 			if (pha->flags & IP_ENABLED &&
4535 			    !(pha->flags & IP_INITIALIZED)) {
4536 				(void) ql_initialize_ip(pha);
4537 				ql_isp_rcvbuf(pha);
4538 			}
4539 			/* free up our DMA buffers */
4540 			ql_free_phys(ha, &buffer_xmt);
4541 			ql_free_phys(ha, &buffer_rcv);
4542 			break;
4543 		}
4544 		default:
4545 			EL(ha, "unknown=%xh, FC_PORT_DIAG "
4546 			    "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4547 			rval = FC_INVALID_REQUEST;
4548 			break;
4549 		}
4550 		PORTMANAGE_UNLOCK(ha);
4551 		break;
4552 	case FC_PORT_LINK_STATE:
4553 		/* Check for name equal to null. */
4554 		for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4555 		    index++) {
4556 			if (cmd->pm_cmd_buf[index] != 0) {
4557 				break;
4558 			}
4559 		}
4560 
4561 		/* If name not null. */
4562 		if (index < 8 && cmd->pm_cmd_len >= 8) {
4563 			/* Locate device queue. */
4564 			tq = NULL;
4565 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4566 			    tq == NULL; index++) {
4567 				for (link = ha->dev[index].first; link != NULL;
4568 				    link = link->next) {
4569 					tq = link->base_address;
4570 
4571 					if (bcmp((void *)&tq->port_name[0],
4572 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4573 						break;
4574 					} else {
4575 						tq = NULL;
4576 					}
4577 				}
4578 			}
4579 
4580 			if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4581 				cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4582 				cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4583 			} else {
4584 				cnt = FC_PORT_SPEED_MASK(ha->state) |
4585 				    FC_STATE_OFFLINE;
4586 				cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4587 				cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4588 			}
4589 		} else {
4590 			cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4591 			cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4592 		}
4593 		break;
4594 	case FC_PORT_INITIALIZE:
4595 		if (cmd->pm_cmd_len >= 8) {
4596 			tq = NULL;
4597 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4598 			    tq == NULL; index++) {
4599 				for (link = ha->dev[index].first; link != NULL;
4600 				    link = link->next) {
4601 					tq = link->base_address;
4602 
4603 					if (bcmp((void *)&tq->port_name[0],
4604 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4605 						if (!VALID_DEVICE_ID(ha,
4606 						    tq->loop_id)) {
4607 							tq = NULL;
4608 						}
4609 						break;
4610 					} else {
4611 						tq = NULL;
4612 					}
4613 				}
4614 			}
4615 
4616 			if (tq == NULL || ql_target_reset(ha, tq,
4617 			    ha->loop_reset_delay) != QL_SUCCESS) {
4618 				EL(ha, "failed, FC_PORT_INITIALIZE "
4619 				    "FC_FAILURE\n");
4620 				rval = FC_FAILURE;
4621 			}
4622 		} else {
4623 			EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4624 			    "clen=%lxh\n", cmd->pm_cmd_len);
4625 
4626 			rval = FC_FAILURE;
4627 		}
4628 		break;
4629 	case FC_PORT_RLS:
4630 		if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4631 			EL(ha, "failed, buffer size passed: %lxh, "
4632 			    "req: %lxh\n", cmd->pm_data_len,
4633 			    (sizeof (fc_rls_acc_t)));
4634 			rval = FC_FAILURE;
4635 		} else if (LOOP_NOT_READY(pha)) {
4636 			EL(ha, "loop NOT ready\n");
4637 			bzero(cmd->pm_data_buf, cmd->pm_data_len);
4638 		} else if (ql_get_link_status(ha, ha->loop_id,
4639 		    cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4640 			EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4641 			rval = FC_FAILURE;
4642 #ifdef _BIG_ENDIAN
4643 		} else {
4644 			fc_rls_acc_t		*rls;
4645 
4646 			rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4647 			LITTLE_ENDIAN_32(&rls->rls_link_fail);
4648 			LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4649 			LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4650 			LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4651 #endif /* _BIG_ENDIAN */
4652 		}
4653 		break;
4654 	case FC_PORT_GET_NODE_ID:
4655 		if (ql_get_rnid_params(ha, cmd->pm_data_len,
4656 		    cmd->pm_data_buf) != QL_SUCCESS) {
4657 			EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4658 			rval = FC_FAILURE;
4659 		}
4660 		break;
4661 	case FC_PORT_SET_NODE_ID:
4662 		if (ql_set_rnid_params(ha, cmd->pm_data_len,
4663 		    cmd->pm_data_buf) != QL_SUCCESS) {
4664 			EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4665 			rval = FC_FAILURE;
4666 		}
4667 		break;
4668 	case FC_PORT_DOWNLOAD_FCODE:
4669 		PORTMANAGE_LOCK(ha);
4670 		if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
4671 			rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4672 			    (uint32_t)cmd->pm_data_len);
4673 		} else {
4674 			if (cmd->pm_data_buf[0] == 4 &&
4675 			    cmd->pm_data_buf[8] == 0 &&
4676 			    cmd->pm_data_buf[9] == 0x10 &&
4677 			    cmd->pm_data_buf[10] == 0 &&
4678 			    cmd->pm_data_buf[11] == 0) {
4679 				rval = ql_24xx_load_flash(ha,
4680 				    (uint8_t *)cmd->pm_data_buf,
4681 				    (uint32_t)cmd->pm_data_len,
4682 				    ha->flash_fw_addr << 2);
4683 			} else {
4684 				rval = ql_24xx_load_flash(ha,
4685 				    (uint8_t *)cmd->pm_data_buf,
4686 				    (uint32_t)cmd->pm_data_len, 0);
4687 			}
4688 		}
4689 
4690 		if (rval != QL_SUCCESS) {
4691 			EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4692 			rval = FC_FAILURE;
4693 		} else {
4694 			rval = FC_SUCCESS;
4695 		}
4696 		ql_reset_chip(ha);
4697 		set_flags |= ISP_ABORT_NEEDED;
4698 		PORTMANAGE_UNLOCK(ha);
4699 		break;
4700 	default:
4701 		EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4702 		rval = FC_BADCMD;
4703 		break;
4704 	}
4705 
4706 	/* Wait for suspension to end. */
4707 	ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
4708 	timer = 0;
4709 
4710 	while (timer++ < 3000 &&
4711 	    ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4712 		ql_delay(ha, 10000);
4713 	}
4714 
4715 	ql_restart_queues(ha);
4716 
4717 	if (rval != FC_SUCCESS) {
4718 		EL(ha, "failed, rval = %xh\n", rval);
4719 	} else {
4720 		/*EMPTY*/
4721 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4722 	}
4723 
4724 	return (rval);
4725 }
4726 
4727 static opaque_t
4728 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4729 {
4730 	port_id_t		id;
4731 	ql_adapter_state_t	*ha;
4732 	ql_tgt_t		*tq;
4733 
4734 	id.r.rsvd_1 = 0;
4735 	id.b24 = d_id.port_id;
4736 
4737 	ha = ql_fca_handle_to_state(fca_handle);
4738 	if (ha == NULL) {
4739 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4740 		    (void *)fca_handle);
4741 		return (NULL);
4742 	}
4743 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4744 
4745 	tq = ql_d_id_to_queue(ha, id);
4746 
4747 	if (tq == NULL) {
4748 		EL(ha, "failed, tq=NULL\n");
4749 	} else {
4750 		/*EMPTY*/
4751 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4752 	}
4753 	return (tq);
4754 }
4755 
4756 /* ************************************************************************ */
4757 /*			FCA Driver Local Support Functions.		    */
4758 /* ************************************************************************ */
4759 
4760 /*
4761  * ql_cmd_setup
4762  *	Verifies proper command.
4763  *
4764  * Input:
4765  *	fca_handle = handle setup by ql_bind_port().
4766  *	pkt = pointer to fc_packet.
4767  *	rval = pointer for return value.
4768  *
4769  * Returns:
4770  *	Adapter state pointer, NULL = failure.
4771  *
4772  * Context:
4773  *	Kernel context.
4774  */
4775 static ql_adapter_state_t *
4776 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4777 {
4778 	ql_adapter_state_t	*ha, *pha;
4779 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
4780 	ql_tgt_t		*tq;
4781 	port_id_t		d_id;
4782 
4783 	pkt->pkt_resp_resid = 0;
4784 	pkt->pkt_data_resid = 0;
4785 
4786 	/* check that the handle is assigned by this FCA */
4787 	ha = ql_fca_handle_to_state(fca_handle);
4788 	if (ha == NULL) {
4789 		*rval = FC_UNBOUND;
4790 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4791 		    (void *)fca_handle);
4792 		return (NULL);
4793 	}
4794 	pha = ha->pha;
4795 
4796 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4797 
4798 	if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4799 		return (ha);
4800 	}
4801 
4802 	if (!(pha->flags & ONLINE)) {
4803 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
4804 		pkt->pkt_reason = FC_REASON_HW_ERROR;
4805 		*rval = FC_TRANSPORT_ERROR;
4806 		EL(ha, "failed, not online hf=%xh\n", pha->flags);
4807 		return (NULL);
4808 	}
4809 
4810 	/* Exit on loop down. */
4811 	if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4812 	    pha->task_daemon_flags & LOOP_DOWN &&
4813 	    pha->loop_down_timer <= pha->loop_down_abort_time) {
4814 		pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4815 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4816 		*rval = FC_OFFLINE;
4817 		EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4818 		return (NULL);
4819 	}
4820 
4821 	if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4822 	    pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4823 		tq = (ql_tgt_t *)pkt->pkt_fca_device;
4824 		if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4825 			d_id.r.rsvd_1 = 0;
4826 			d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4827 			tq = ql_d_id_to_queue(ha, d_id);
4828 
4829 			pkt->pkt_fca_device = (opaque_t)tq;
4830 		}
4831 
4832 		if (tq != NULL) {
4833 			DEVICE_QUEUE_LOCK(tq);
4834 			if (tq->flags & (TQF_RSCN_RCVD |
4835 			    TQF_NEED_AUTHENTICATION)) {
4836 				*rval = FC_DEVICE_BUSY;
4837 				DEVICE_QUEUE_UNLOCK(tq);
4838 				EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4839 				    tq->flags, tq->d_id.b24);
4840 				return (NULL);
4841 			}
4842 			DEVICE_QUEUE_UNLOCK(tq);
4843 		}
4844 	}
4845 
4846 	/*
4847 	 * Check DMA pointers.
4848 	 */
4849 	*rval = DDI_SUCCESS;
4850 	if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4851 		QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4852 		*rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4853 		if (*rval == DDI_SUCCESS) {
4854 			*rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4855 		}
4856 	}
4857 
4858 	if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4859 	    pkt->pkt_rsplen != 0) {
4860 		QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4861 		*rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4862 		if (*rval == DDI_SUCCESS) {
4863 			*rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4864 		}
4865 	}
4866 
4867 	/*
4868 	 * Minimum branch conditional; Change it with care.
4869 	 */
4870 	if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4871 	    (pkt->pkt_datalen != 0)) != 0) {
4872 		QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4873 		*rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4874 		if (*rval == DDI_SUCCESS) {
4875 			*rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4876 		}
4877 	}
4878 
4879 	if (*rval != DDI_SUCCESS) {
4880 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
4881 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
4882 
4883 		/* Do command callback. */
4884 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4885 			ql_awaken_task_daemon(ha, sp, 0, 0);
4886 		}
4887 		*rval = FC_BADPACKET;
4888 		EL(ha, "failed, bad DMA pointers\n");
4889 		return (NULL);
4890 	}
4891 
4892 	if (sp->magic_number != QL_FCA_BRAND) {
4893 		*rval = FC_BADPACKET;
4894 		EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4895 		return (NULL);
4896 	}
4897 	*rval = FC_SUCCESS;
4898 
4899 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4900 
4901 	return (ha);
4902 }
4903 
4904 /*
4905  * ql_els_plogi
4906  *	Issue a extended link service port login request.
4907  *
4908  * Input:
4909  *	ha = adapter state pointer.
4910  *	pkt = pointer to fc_packet.
4911  *
4912  * Returns:
4913  *	FC_SUCCESS - the packet was accepted for transport.
4914  *	FC_TRANSPORT_ERROR - a transport error occurred.
4915  *
4916  * Context:
4917  *	Kernel context.
4918  */
4919 static int
4920 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4921 {
4922 	ql_tgt_t		*tq = NULL;
4923 	port_id_t		d_id;
4924 	la_els_logi_t		acc;
4925 	class_svc_param_t	*class3_param;
4926 	int			ret;
4927 	int			rval = FC_SUCCESS;
4928 
4929 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4930 	    pkt->pkt_cmd_fhdr.d_id);
4931 
4932 	TASK_DAEMON_LOCK(ha);
4933 	if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4934 		TASK_DAEMON_UNLOCK(ha);
4935 		QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4936 		return (FC_OFFLINE);
4937 	}
4938 	TASK_DAEMON_UNLOCK(ha);
4939 
4940 	bzero(&acc, sizeof (acc));
4941 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4942 
4943 	ret = QL_SUCCESS;
4944 
4945 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4946 		/*
4947 		 * In p2p topology he sends a PLOGI after determining
4948 		 * he has the N_Port login initiative.
4949 		 */
4950 		ret = ql_p2p_plogi(ha, pkt);
4951 	}
4952 	if (ret == QL_CONSUMED) {
4953 		return (ret);
4954 	}
4955 
4956 	switch (ret = ql_login_port(ha, d_id)) {
4957 	case QL_SUCCESS:
4958 		tq = ql_d_id_to_queue(ha, d_id);
4959 		break;
4960 
4961 	case QL_LOOP_ID_USED:
4962 		if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4963 			tq = ql_d_id_to_queue(ha, d_id);
4964 		}
4965 		break;
4966 
4967 	default:
4968 		break;
4969 	}
4970 
4971 	if (ret != QL_SUCCESS) {
4972 		/*
4973 		 * Invalidate this entry so as to seek a fresh loop ID
4974 		 * in case firmware reassigns it to something else
4975 		 */
4976 		tq = ql_d_id_to_queue(ha, d_id);
4977 		if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4978 			tq->loop_id = PORT_NO_LOOP_ID;
4979 		}
4980 	} else if (tq) {
4981 		(void) ql_get_port_database(ha, tq, PDF_ADISC);
4982 	}
4983 
4984 	if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4985 	    (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4986 
4987 		/* Build ACC. */
4988 		acc.ls_code.ls_code = LA_ELS_ACC;
4989 		acc.common_service.fcph_version = 0x2006;
4990 		acc.common_service.cmn_features = 0x8800;
4991 		acc.common_service.rx_bufsize = QL_MAX_FRAME_SIZE(ha);
4992 		acc.common_service.conc_sequences = 0xff;
4993 		acc.common_service.relative_offset = 0x03;
4994 		acc.common_service.e_d_tov = 0x7d0;
4995 
4996 		bcopy((void *)&tq->port_name[0],
4997 		    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4998 		bcopy((void *)&tq->node_name[0],
4999 		    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5000 
5001 		class3_param = (class_svc_param_t *)&acc.class_3;
5002 		class3_param->class_valid_svc_opt = 0x8000;
5003 		class3_param->recipient_ctl = tq->class3_recipient_ctl;
5004 		class3_param->rcv_data_size = tq->class3_rcv_data_size;
5005 		class3_param->conc_sequences = tq->class3_conc_sequences;
5006 		class3_param->open_sequences_per_exch =
5007 		    tq->class3_open_sequences_per_exch;
5008 
5009 		if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
5010 			acc.ls_code.ls_code = LA_ELS_RJT;
5011 			pkt->pkt_state = FC_PKT_TRAN_BSY;
5012 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
5013 			EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
5014 			rval = FC_TRAN_BUSY;
5015 		} else {
5016 			DEVICE_QUEUE_LOCK(tq);
5017 			tq->logout_sent = 0;
5018 			tq->flags &= ~TQF_NEED_AUTHENTICATION;
5019 			if (CFG_IST(ha, CFG_CTRL_242581)) {
5020 				tq->flags |= TQF_IIDMA_NEEDED;
5021 			}
5022 			DEVICE_QUEUE_UNLOCK(tq);
5023 
5024 			if (CFG_IST(ha, CFG_CTRL_242581)) {
5025 				TASK_DAEMON_LOCK(ha);
5026 				ha->task_daemon_flags |= TD_IIDMA_NEEDED;
5027 				TASK_DAEMON_UNLOCK(ha);
5028 			}
5029 
5030 			pkt->pkt_state = FC_PKT_SUCCESS;
5031 		}
5032 	} else {
5033 		/* Build RJT. */
5034 		acc.ls_code.ls_code = LA_ELS_RJT;
5035 
5036 		switch (ret) {
5037 		case QL_FUNCTION_TIMEOUT:
5038 			pkt->pkt_state = FC_PKT_TIMEOUT;
5039 			pkt->pkt_reason = FC_REASON_HW_ERROR;
5040 			break;
5041 
5042 		case QL_MEMORY_ALLOC_FAILED:
5043 			pkt->pkt_state = FC_PKT_LOCAL_BSY;
5044 			pkt->pkt_reason = FC_REASON_NOMEM;
5045 			rval = FC_TRAN_BUSY;
5046 			break;
5047 
5048 		case QL_FABRIC_NOT_INITIALIZED:
5049 			pkt->pkt_state = FC_PKT_FABRIC_BSY;
5050 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5051 			rval = FC_TRAN_BUSY;
5052 			break;
5053 
5054 		default:
5055 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5056 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5057 			break;
5058 		}
5059 
5060 		EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
5061 		    "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
5062 		    pkt->pkt_reason, ret, rval);
5063 	}
5064 
5065 	if (tq != NULL) {
5066 		DEVICE_QUEUE_LOCK(tq);
5067 		tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
5068 		if (rval == FC_TRAN_BUSY) {
5069 			if (tq->d_id.b24 != BROADCAST_ADDR) {
5070 				tq->flags |= TQF_NEED_AUTHENTICATION;
5071 			}
5072 		}
5073 		DEVICE_QUEUE_UNLOCK(tq);
5074 	}
5075 
5076 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5077 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5078 
5079 	if (rval != FC_SUCCESS) {
5080 		EL(ha, "failed, rval = %xh\n", rval);
5081 	} else {
5082 		/*EMPTY*/
5083 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5084 	}
5085 	return (rval);
5086 }
5087 
5088 /*
5089  * ql_p2p_plogi
5090  *	Start an extended link service port login request using
5091  *	an ELS Passthru iocb.
5092  *
5093  * Input:
5094  *	ha = adapter state pointer.
5095  *	pkt = pointer to fc_packet.
5096  *
5097  * Returns:
5098  *	QL_CONSUMMED - the iocb was queued for transport.
5099  *
5100  * Context:
5101  *	Kernel context.
5102  */
5103 static int
5104 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5105 {
5106 	uint16_t	id;
5107 	ql_tgt_t	tmp;
5108 	ql_tgt_t	*tq = &tmp;
5109 	int		rval;
5110 	port_id_t	d_id;
5111 	ql_srb_t	*sp = (ql_srb_t *)pkt->pkt_fca_private;
5112 
5113 	tq->d_id.b.al_pa = 0;
5114 	tq->d_id.b.area = 0;
5115 	tq->d_id.b.domain = 0;
5116 
5117 	/*
5118 	 * Verify that the port database hasn't moved beneath our feet by
5119 	 * switching to the appropriate n_port_handle if necessary.  This is
5120 	 * less unplesant than the error recovery if the wrong one is used.
5121 	 */
5122 	for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
5123 		tq->loop_id = id;
5124 		rval = ql_get_port_database(ha, tq, PDF_NONE);
5125 		EL(ha, "rval=%xh\n", rval);
5126 		/* check all the ones not logged in for possible use */
5127 		if (rval == QL_NOT_LOGGED_IN) {
5128 			if (tq->master_state == PD_STATE_PLOGI_PENDING) {
5129 				ha->n_port->n_port_handle = tq->loop_id;
5130 				EL(ha, "n_port_handle =%xh, master state=%x\n",
5131 				    tq->loop_id, tq->master_state);
5132 				break;
5133 			}
5134 			/*
5135 			 * Use a 'port unavailable' entry only
5136 			 * if we used it before.
5137 			 */
5138 			if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
5139 				/* if the port_id matches, reuse it */
5140 				if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
5141 					EL(ha, "n_port_handle =%xh,"
5142 					    "master state=%xh\n",
5143 					    tq->loop_id, tq->master_state);
5144 					break;
5145 				} else if (tq->loop_id ==
5146 				    ha->n_port->n_port_handle) {
5147 				    // avoid a lint error
5148 					uint16_t *hndl;
5149 					uint16_t val;
5150 
5151 					hndl = &ha->n_port->n_port_handle;
5152 					val = *hndl;
5153 					val++;
5154 					val++;
5155 					*hndl = val;
5156 				}
5157 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5158 			    "master state=%x\n", rval, id, tq->loop_id,
5159 			    tq->master_state);
5160 			}
5161 
5162 		}
5163 		if (rval == QL_SUCCESS) {
5164 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
5165 				ha->n_port->n_port_handle = tq->loop_id;
5166 				EL(ha, "n_port_handle =%xh, master state=%x\n",
5167 				    tq->loop_id, tq->master_state);
5168 				break;
5169 			}
5170 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5171 			    "master state=%x\n", rval, id, tq->loop_id,
5172 			    tq->master_state);
5173 		}
5174 	}
5175 	(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
5176 
5177 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5178 	tq = ql_d_id_to_queue(ha, d_id);
5179 	ql_timeout_insert(ha, tq, sp);
5180 	ql_start_iocb(ha, sp);
5181 
5182 	return (QL_CONSUMED);
5183 }
5184 
5185 
5186 /*
5187  * ql_els_flogi
5188  *	Issue a extended link service fabric login request.
5189  *
5190  * Input:
5191  *	ha = adapter state pointer.
5192  *	pkt = pointer to fc_packet.
5193  *
5194  * Returns:
5195  *	FC_SUCCESS - the packet was accepted for transport.
5196  *	FC_TRANSPORT_ERROR - a transport error occurred.
5197  *
5198  * Context:
5199  *	Kernel context.
5200  */
5201 static int
5202 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5203 {
5204 	ql_tgt_t		*tq = NULL;
5205 	port_id_t		d_id;
5206 	la_els_logi_t		acc;
5207 	class_svc_param_t	*class3_param;
5208 	int			rval = FC_SUCCESS;
5209 	int			accept = 0;
5210 
5211 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5212 	    pkt->pkt_cmd_fhdr.d_id);
5213 
5214 	bzero(&acc, sizeof (acc));
5215 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5216 
5217 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5218 		/*
5219 		 * d_id of zero in a FLOGI accept response in a point to point
5220 		 * topology triggers evaluation of N Port login initiative.
5221 		 */
5222 		pkt->pkt_resp_fhdr.d_id = 0;
5223 		/*
5224 		 * An N_Port already logged in with the firmware
5225 		 * will have the only database entry.
5226 		 */
5227 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5228 			tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5229 		}
5230 
5231 		if (tq != NULL) {
5232 			/*
5233 			 * If the target port has initiative send
5234 			 * up a PLOGI about the new device.
5235 			 */
5236 			if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5237 			    (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5238 			    &ha->init_ctrl_blk.cb24.port_name[0] :
5239 			    &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5240 				ha->send_plogi_timer = 3;
5241 			} else {
5242 				ha->send_plogi_timer = 0;
5243 			}
5244 			pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5245 		} else {
5246 			/*
5247 			 * An N_Port not logged in with the firmware will not
5248 			 * have a database entry.  We accept anyway and rely
5249 			 * on a PLOGI from the upper layers to set the d_id
5250 			 * and s_id.
5251 			 */
5252 			accept = 1;
5253 		}
5254 	} else {
5255 		tq = ql_d_id_to_queue(ha, d_id);
5256 	}
5257 	if ((tq != NULL) || (accept != NULL)) {
5258 		/* Build ACC. */
5259 		pkt->pkt_state = FC_PKT_SUCCESS;
5260 		class3_param = (class_svc_param_t *)&acc.class_3;
5261 
5262 		acc.ls_code.ls_code = LA_ELS_ACC;
5263 		acc.common_service.fcph_version = 0x2006;
5264 		if (ha->topology & QL_N_PORT) {
5265 			/* clear F_Port indicator */
5266 			acc.common_service.cmn_features = 0x0800;
5267 		} else {
5268 			acc.common_service.cmn_features = 0x1b00;
5269 		}
5270 		CFG_IST(ha, CFG_CTRL_24258081) ?
5271 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5272 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
5273 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5274 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5275 		    ha->init_ctrl_blk.cb.max_frame_length[0],
5276 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
5277 		acc.common_service.conc_sequences = 0xff;
5278 		acc.common_service.relative_offset = 0x03;
5279 		acc.common_service.e_d_tov = 0x7d0;
5280 		if (accept) {
5281 			/* Use the saved N_Port WWNN and WWPN */
5282 			if (ha->n_port != NULL) {
5283 				bcopy((void *)&ha->n_port->port_name[0],
5284 				    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5285 				bcopy((void *)&ha->n_port->node_name[0],
5286 				    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5287 				/* mark service options invalid */
5288 				class3_param->class_valid_svc_opt = 0x0800;
5289 			} else {
5290 				EL(ha, "ha->n_port is NULL\n");
5291 				/* Build RJT. */
5292 				acc.ls_code.ls_code = LA_ELS_RJT;
5293 
5294 				pkt->pkt_state = FC_PKT_TRAN_ERROR;
5295 				pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5296 			}
5297 		} else {
5298 			bcopy((void *)&tq->port_name[0],
5299 			    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5300 			bcopy((void *)&tq->node_name[0],
5301 			    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5302 
5303 			class3_param = (class_svc_param_t *)&acc.class_3;
5304 			class3_param->class_valid_svc_opt = 0x8800;
5305 			class3_param->recipient_ctl = tq->class3_recipient_ctl;
5306 			class3_param->rcv_data_size = tq->class3_rcv_data_size;
5307 			class3_param->conc_sequences =
5308 			    tq->class3_conc_sequences;
5309 			class3_param->open_sequences_per_exch =
5310 			    tq->class3_open_sequences_per_exch;
5311 		}
5312 	} else {
5313 		/* Build RJT. */
5314 		acc.ls_code.ls_code = LA_ELS_RJT;
5315 
5316 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5317 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5318 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5319 	}
5320 
5321 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5322 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5323 
5324 	if (rval != FC_SUCCESS) {
5325 		EL(ha, "failed, rval = %xh\n", rval);
5326 	} else {
5327 		/*EMPTY*/
5328 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5329 	}
5330 	return (rval);
5331 }
5332 
5333 /*
5334  * ql_els_logo
5335  *	Issue a extended link service logout request.
5336  *
5337  * Input:
5338  *	ha = adapter state pointer.
5339  *	pkt = pointer to fc_packet.
5340  *
5341  * Returns:
5342  *	FC_SUCCESS - the packet was accepted for transport.
5343  *	FC_TRANSPORT_ERROR - a transport error occurred.
5344  *
5345  * Context:
5346  *	Kernel context.
5347  */
5348 static int
5349 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5350 {
5351 	port_id_t	d_id;
5352 	ql_tgt_t	*tq;
5353 	la_els_logo_t	acc;
5354 	int		rval = FC_SUCCESS;
5355 
5356 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5357 	    pkt->pkt_cmd_fhdr.d_id);
5358 
5359 	bzero(&acc, sizeof (acc));
5360 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5361 
5362 	tq = ql_d_id_to_queue(ha, d_id);
5363 	if (tq) {
5364 		DEVICE_QUEUE_LOCK(tq);
5365 		if (tq->d_id.b24 == BROADCAST_ADDR) {
5366 			DEVICE_QUEUE_UNLOCK(tq);
5367 			return (FC_SUCCESS);
5368 		}
5369 
5370 		tq->flags |= TQF_NEED_AUTHENTICATION;
5371 
5372 		do {
5373 			DEVICE_QUEUE_UNLOCK(tq);
5374 			(void) ql_abort_device(ha, tq, 1);
5375 
5376 			/*
5377 			 * Wait for commands to drain in F/W (doesn't
5378 			 * take more than a few milliseconds)
5379 			 */
5380 			ql_delay(ha, 10000);
5381 
5382 			DEVICE_QUEUE_LOCK(tq);
5383 		} while (tq->outcnt);
5384 
5385 		DEVICE_QUEUE_UNLOCK(tq);
5386 	}
5387 
5388 	if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5389 		/* Build ACC. */
5390 		acc.ls_code.ls_code = LA_ELS_ACC;
5391 
5392 		pkt->pkt_state = FC_PKT_SUCCESS;
5393 	} else {
5394 		/* Build RJT. */
5395 		acc.ls_code.ls_code = LA_ELS_RJT;
5396 
5397 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5398 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5399 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5400 	}
5401 
5402 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5403 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5404 
5405 	if (rval != FC_SUCCESS) {
5406 		EL(ha, "failed, rval = %xh\n", rval);
5407 	} else {
5408 		/*EMPTY*/
5409 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5410 	}
5411 	return (rval);
5412 }
5413 
5414 /*
5415  * ql_els_prli
5416  *	Issue a extended link service process login request.
5417  *
5418  * Input:
5419  *	ha = adapter state pointer.
5420  *	pkt = pointer to fc_packet.
5421  *
5422  * Returns:
5423  *	FC_SUCCESS - the packet was accepted for transport.
5424  *	FC_TRANSPORT_ERROR - a transport error occurred.
5425  *
5426  * Context:
5427  *	Kernel context.
5428  */
5429 static int
5430 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5431 {
5432 	ql_tgt_t		*tq;
5433 	port_id_t		d_id;
5434 	la_els_prli_t		acc;
5435 	prli_svc_param_t	*param;
5436 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
5437 	int			rval = FC_SUCCESS;
5438 
5439 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5440 	    pkt->pkt_cmd_fhdr.d_id);
5441 
5442 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5443 
5444 	tq = ql_d_id_to_queue(ha, d_id);
5445 	if (tq != NULL) {
5446 		(void) ql_get_port_database(ha, tq, PDF_NONE);
5447 
5448 		if ((ha->topology & QL_N_PORT) &&
5449 		    (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5450 			ql_timeout_insert(ha, tq, sp);
5451 			ql_start_iocb(ha, sp);
5452 			rval = QL_CONSUMED;
5453 		} else {
5454 			/* Build ACC. */
5455 			bzero(&acc, sizeof (acc));
5456 			acc.ls_code = LA_ELS_ACC;
5457 			acc.page_length = 0x10;
5458 			acc.payload_length = tq->prli_payload_length;
5459 
5460 			param = (prli_svc_param_t *)&acc.service_params[0];
5461 			param->type = 0x08;
5462 			param->rsvd = 0x00;
5463 			param->process_assoc_flags = tq->prli_svc_param_word_0;
5464 			param->process_flags = tq->prli_svc_param_word_3;
5465 
5466 			ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5467 			    (uint8_t *)pkt->pkt_resp, sizeof (acc),
5468 			    DDI_DEV_AUTOINCR);
5469 
5470 			pkt->pkt_state = FC_PKT_SUCCESS;
5471 		}
5472 	} else {
5473 		la_els_rjt_t rjt;
5474 
5475 		/* Build RJT. */
5476 		bzero(&rjt, sizeof (rjt));
5477 		rjt.ls_code.ls_code = LA_ELS_RJT;
5478 
5479 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5480 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5481 
5482 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5483 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5484 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5485 	}
5486 
5487 	if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5488 		EL(ha, "failed, rval = %xh\n", rval);
5489 	} else {
5490 		/*EMPTY*/
5491 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5492 	}
5493 	return (rval);
5494 }
5495 
5496 /*
5497  * ql_els_prlo
5498  *	Issue a extended link service process logout request.
5499  *
5500  * Input:
5501  *	ha = adapter state pointer.
5502  *	pkt = pointer to fc_packet.
5503  *
5504  * Returns:
5505  *	FC_SUCCESS - the packet was accepted for transport.
5506  *	FC_TRANSPORT_ERROR - a transport error occurred.
5507  *
5508  * Context:
5509  *	Kernel context.
5510  */
5511 /* ARGSUSED */
5512 static int
5513 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5514 {
5515 	la_els_prli_t	acc;
5516 	int		rval = FC_SUCCESS;
5517 
5518 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5519 	    pkt->pkt_cmd_fhdr.d_id);
5520 
5521 	/* Build ACC. */
5522 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5523 	    (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5524 
5525 	acc.ls_code = LA_ELS_ACC;
5526 	acc.service_params[2] = 1;
5527 
5528 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5529 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5530 
5531 	pkt->pkt_state = FC_PKT_SUCCESS;
5532 
5533 	if (rval != FC_SUCCESS) {
5534 		EL(ha, "failed, rval = %xh\n", rval);
5535 	} else {
5536 		/*EMPTY*/
5537 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5538 	}
5539 	return (rval);
5540 }
5541 
5542 /*
5543  * ql_els_adisc
5544  *	Issue a extended link service address discovery request.
5545  *
5546  * Input:
5547  *	ha = adapter state pointer.
5548  *	pkt = pointer to fc_packet.
5549  *
5550  * Returns:
5551  *	FC_SUCCESS - the packet was accepted for transport.
5552  *	FC_TRANSPORT_ERROR - a transport error occurred.
5553  *
5554  * Context:
5555  *	Kernel context.
5556  */
5557 static int
5558 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5559 {
5560 	ql_dev_id_list_t	*list;
5561 	uint32_t		list_size;
5562 	ql_link_t		*link;
5563 	ql_tgt_t		*tq;
5564 	ql_lun_t		*lq;
5565 	port_id_t		d_id;
5566 	la_els_adisc_t		acc;
5567 	uint16_t		index, loop_id;
5568 	ql_mbx_data_t		mr;
5569 	int			rval = FC_SUCCESS;
5570 
5571 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5572 
5573 	bzero(&acc, sizeof (acc));
5574 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5575 
5576 	/*
5577 	 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5578 	 * the device from the firmware
5579 	 */
5580 	index = ql_alpa_to_index[d_id.b.al_pa];
5581 	tq = NULL;
5582 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5583 		tq = link->base_address;
5584 		if (tq->d_id.b24 == d_id.b24) {
5585 			break;
5586 		} else {
5587 			tq = NULL;
5588 		}
5589 	}
5590 
5591 	if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5592 		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5593 		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5594 
5595 		if (list != NULL &&
5596 		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5597 		    QL_SUCCESS) {
5598 
5599 			for (index = 0; index < mr.mb[1]; index++) {
5600 				ql_dev_list(ha, list, index, &d_id, &loop_id);
5601 
5602 				if (tq->d_id.b24 == d_id.b24) {
5603 					tq->loop_id = loop_id;
5604 					break;
5605 				}
5606 			}
5607 		} else {
5608 			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5609 			    QL_NAME, ha->instance, d_id.b24);
5610 			tq = NULL;
5611 		}
5612 		if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5613 			cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5614 			    QL_NAME, ha->instance, tq->d_id.b24);
5615 			tq = NULL;
5616 		}
5617 
5618 		if (list != NULL) {
5619 			kmem_free(list, list_size);
5620 		}
5621 	}
5622 
5623 	if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5624 	    ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5625 
5626 		/* Build ACC. */
5627 
5628 		DEVICE_QUEUE_LOCK(tq);
5629 		tq->flags &= ~TQF_NEED_AUTHENTICATION;
5630 		if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5631 			for (link = tq->lun_queues.first; link != NULL;
5632 			    link = link->next) {
5633 				lq = link->base_address;
5634 
5635 				if (lq->cmd.first != NULL) {
5636 					ql_next(ha, lq);
5637 					DEVICE_QUEUE_LOCK(tq);
5638 				}
5639 			}
5640 		}
5641 		DEVICE_QUEUE_UNLOCK(tq);
5642 
5643 		acc.ls_code.ls_code = LA_ELS_ACC;
5644 		acc.hard_addr.hard_addr = tq->hard_addr.b24;
5645 
5646 		bcopy((void *)&tq->port_name[0],
5647 		    (void *)&acc.port_wwn.raw_wwn[0], 8);
5648 		bcopy((void *)&tq->node_name[0],
5649 		    (void *)&acc.node_wwn.raw_wwn[0], 8);
5650 
5651 		acc.nport_id.port_id = tq->d_id.b24;
5652 
5653 		pkt->pkt_state = FC_PKT_SUCCESS;
5654 	} else {
5655 		/* Build RJT. */
5656 		acc.ls_code.ls_code = LA_ELS_RJT;
5657 
5658 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5659 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5660 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5661 	}
5662 
5663 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5664 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5665 
5666 	if (rval != FC_SUCCESS) {
5667 		EL(ha, "failed, rval = %xh\n", rval);
5668 	} else {
5669 		/*EMPTY*/
5670 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5671 	}
5672 	return (rval);
5673 }
5674 
5675 /*
5676  * ql_els_linit
5677  *	Issue a extended link service loop initialize request.
5678  *
5679  * Input:
5680  *	ha = adapter state pointer.
5681  *	pkt = pointer to fc_packet.
5682  *
5683  * Returns:
5684  *	FC_SUCCESS - the packet was accepted for transport.
5685  *	FC_TRANSPORT_ERROR - a transport error occurred.
5686  *
5687  * Context:
5688  *	Kernel context.
5689  */
5690 static int
5691 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5692 {
5693 	ddi_dma_cookie_t	*cp;
5694 	uint32_t		cnt;
5695 	conv_num_t		n;
5696 	port_id_t		d_id;
5697 	int			rval = FC_SUCCESS;
5698 
5699 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5700 
5701 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5702 	if (ha->topology & QL_SNS_CONNECTION) {
5703 		fc_linit_req_t els;
5704 		lfa_cmd_t lfa;
5705 
5706 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5707 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5708 
5709 		/* Setup LFA mailbox command data. */
5710 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5711 
5712 		lfa.resp_buffer_length[0] = 4;
5713 
5714 		cp = pkt->pkt_resp_cookie;
5715 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5716 			n.size64 = (uint64_t)cp->dmac_laddress;
5717 			LITTLE_ENDIAN_64(&n.size64);
5718 		} else {
5719 			n.size32[0] = LSD(cp->dmac_laddress);
5720 			LITTLE_ENDIAN_32(&n.size32[0]);
5721 			n.size32[1] = MSD(cp->dmac_laddress);
5722 			LITTLE_ENDIAN_32(&n.size32[1]);
5723 		}
5724 
5725 		/* Set buffer address. */
5726 		for (cnt = 0; cnt < 8; cnt++) {
5727 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5728 		}
5729 
5730 		lfa.subcommand_length[0] = 4;
5731 		n.size32[0] = d_id.b24;
5732 		LITTLE_ENDIAN_32(&n.size32[0]);
5733 		lfa.addr[0] = n.size8[0];
5734 		lfa.addr[1] = n.size8[1];
5735 		lfa.addr[2] = n.size8[2];
5736 		lfa.subcommand[1] = 0x70;
5737 		lfa.payload[2] = els.func;
5738 		lfa.payload[4] = els.lip_b3;
5739 		lfa.payload[5] = els.lip_b4;
5740 
5741 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5742 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5743 		} else {
5744 			pkt->pkt_state = FC_PKT_SUCCESS;
5745 		}
5746 	} else {
5747 		fc_linit_resp_t rjt;
5748 
5749 		/* Build RJT. */
5750 		bzero(&rjt, sizeof (rjt));
5751 		rjt.ls_code.ls_code = LA_ELS_RJT;
5752 
5753 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5754 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5755 
5756 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5757 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5758 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5759 	}
5760 
5761 	if (rval != FC_SUCCESS) {
5762 		EL(ha, "failed, rval = %xh\n", rval);
5763 	} else {
5764 		/*EMPTY*/
5765 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5766 	}
5767 	return (rval);
5768 }
5769 
5770 /*
5771  * ql_els_lpc
5772  *	Issue a extended link service loop control request.
5773  *
5774  * Input:
5775  *	ha = adapter state pointer.
5776  *	pkt = pointer to fc_packet.
5777  *
5778  * Returns:
5779  *	FC_SUCCESS - the packet was accepted for transport.
5780  *	FC_TRANSPORT_ERROR - a transport error occurred.
5781  *
5782  * Context:
5783  *	Kernel context.
5784  */
5785 static int
5786 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5787 {
5788 	ddi_dma_cookie_t	*cp;
5789 	uint32_t		cnt;
5790 	conv_num_t		n;
5791 	port_id_t		d_id;
5792 	int			rval = FC_SUCCESS;
5793 
5794 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5795 
5796 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5797 	if (ha->topology & QL_SNS_CONNECTION) {
5798 		ql_lpc_t els;
5799 		lfa_cmd_t lfa;
5800 
5801 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5802 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5803 
5804 		/* Setup LFA mailbox command data. */
5805 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5806 
5807 		lfa.resp_buffer_length[0] = 4;
5808 
5809 		cp = pkt->pkt_resp_cookie;
5810 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5811 			n.size64 = (uint64_t)(cp->dmac_laddress);
5812 			LITTLE_ENDIAN_64(&n.size64);
5813 		} else {
5814 			n.size32[0] = cp->dmac_address;
5815 			LITTLE_ENDIAN_32(&n.size32[0]);
5816 			n.size32[1] = 0;
5817 		}
5818 
5819 		/* Set buffer address. */
5820 		for (cnt = 0; cnt < 8; cnt++) {
5821 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5822 		}
5823 
5824 		lfa.subcommand_length[0] = 20;
5825 		n.size32[0] = d_id.b24;
5826 		LITTLE_ENDIAN_32(&n.size32[0]);
5827 		lfa.addr[0] = n.size8[0];
5828 		lfa.addr[1] = n.size8[1];
5829 		lfa.addr[2] = n.size8[2];
5830 		lfa.subcommand[1] = 0x71;
5831 		lfa.payload[4] = els.port_control;
5832 		bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5833 
5834 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5835 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5836 		} else {
5837 			pkt->pkt_state = FC_PKT_SUCCESS;
5838 		}
5839 	} else {
5840 		ql_lpc_resp_t rjt;
5841 
5842 		/* Build RJT. */
5843 		bzero(&rjt, sizeof (rjt));
5844 		rjt.ls_code.ls_code = LA_ELS_RJT;
5845 
5846 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5847 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5848 
5849 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5850 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5851 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5852 	}
5853 
5854 	if (rval != FC_SUCCESS) {
5855 		EL(ha, "failed, rval = %xh\n", rval);
5856 	} else {
5857 		/*EMPTY*/
5858 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5859 	}
5860 	return (rval);
5861 }
5862 
5863 /*
5864  * ql_els_lsts
5865  *	Issue a extended link service loop status request.
5866  *
5867  * Input:
5868  *	ha = adapter state pointer.
5869  *	pkt = pointer to fc_packet.
5870  *
5871  * Returns:
5872  *	FC_SUCCESS - the packet was accepted for transport.
5873  *	FC_TRANSPORT_ERROR - a transport error occurred.
5874  *
5875  * Context:
5876  *	Kernel context.
5877  */
5878 static int
5879 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5880 {
5881 	ddi_dma_cookie_t	*cp;
5882 	uint32_t		cnt;
5883 	conv_num_t		n;
5884 	port_id_t		d_id;
5885 	int			rval = FC_SUCCESS;
5886 
5887 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5888 
5889 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5890 	if (ha->topology & QL_SNS_CONNECTION) {
5891 		fc_lsts_req_t els;
5892 		lfa_cmd_t lfa;
5893 
5894 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5895 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5896 
5897 		/* Setup LFA mailbox command data. */
5898 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5899 
5900 		lfa.resp_buffer_length[0] = 84;
5901 
5902 		cp = pkt->pkt_resp_cookie;
5903 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5904 			n.size64 = cp->dmac_laddress;
5905 			LITTLE_ENDIAN_64(&n.size64);
5906 		} else {
5907 			n.size32[0] = cp->dmac_address;
5908 			LITTLE_ENDIAN_32(&n.size32[0]);
5909 			n.size32[1] = 0;
5910 		}
5911 
5912 		/* Set buffer address. */
5913 		for (cnt = 0; cnt < 8; cnt++) {
5914 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5915 		}
5916 
5917 		lfa.subcommand_length[0] = 2;
5918 		n.size32[0] = d_id.b24;
5919 		LITTLE_ENDIAN_32(&n.size32[0]);
5920 		lfa.addr[0] = n.size8[0];
5921 		lfa.addr[1] = n.size8[1];
5922 		lfa.addr[2] = n.size8[2];
5923 		lfa.subcommand[1] = 0x72;
5924 
5925 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5926 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5927 		} else {
5928 			pkt->pkt_state = FC_PKT_SUCCESS;
5929 		}
5930 	} else {
5931 		fc_lsts_resp_t rjt;
5932 
5933 		/* Build RJT. */
5934 		bzero(&rjt, sizeof (rjt));
5935 		rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5936 
5937 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5938 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5939 
5940 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5941 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5942 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5943 	}
5944 
5945 	if (rval != FC_SUCCESS) {
5946 		EL(ha, "failed=%xh\n", rval);
5947 	} else {
5948 		/*EMPTY*/
5949 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5950 	}
5951 	return (rval);
5952 }
5953 
5954 /*
5955  * ql_els_scr
5956  *	Issue a extended link service state change registration request.
5957  *
5958  * Input:
5959  *	ha = adapter state pointer.
5960  *	pkt = pointer to fc_packet.
5961  *
5962  * Returns:
5963  *	FC_SUCCESS - the packet was accepted for transport.
5964  *	FC_TRANSPORT_ERROR - a transport error occurred.
5965  *
5966  * Context:
5967  *	Kernel context.
5968  */
5969 static int
5970 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5971 {
5972 	fc_scr_resp_t	acc;
5973 	int		rval = FC_SUCCESS;
5974 
5975 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5976 
5977 	bzero(&acc, sizeof (acc));
5978 	if (ha->topology & QL_SNS_CONNECTION) {
5979 		fc_scr_req_t els;
5980 
5981 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5982 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5983 
5984 		if (ql_send_change_request(ha, els.scr_func) ==
5985 		    QL_SUCCESS) {
5986 			/* Build ACC. */
5987 			acc.scr_acc = LA_ELS_ACC;
5988 
5989 			pkt->pkt_state = FC_PKT_SUCCESS;
5990 		} else {
5991 			/* Build RJT. */
5992 			acc.scr_acc = LA_ELS_RJT;
5993 
5994 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5995 			pkt->pkt_reason = FC_REASON_HW_ERROR;
5996 			EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5997 		}
5998 	} else {
5999 		/* Build RJT. */
6000 		acc.scr_acc = LA_ELS_RJT;
6001 
6002 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6003 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6004 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6005 	}
6006 
6007 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6008 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6009 
6010 	if (rval != FC_SUCCESS) {
6011 		EL(ha, "failed, rval = %xh\n", rval);
6012 	} else {
6013 		/*EMPTY*/
6014 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6015 	}
6016 	return (rval);
6017 }
6018 
6019 /*
6020  * ql_els_rscn
6021  *	Issue a extended link service register state
6022  *	change notification request.
6023  *
6024  * Input:
6025  *	ha = adapter state pointer.
6026  *	pkt = pointer to fc_packet.
6027  *
6028  * Returns:
6029  *	FC_SUCCESS - the packet was accepted for transport.
6030  *	FC_TRANSPORT_ERROR - a transport error occurred.
6031  *
6032  * Context:
6033  *	Kernel context.
6034  */
6035 static int
6036 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
6037 {
6038 	ql_rscn_resp_t	acc;
6039 	int		rval = FC_SUCCESS;
6040 
6041 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6042 
6043 	bzero(&acc, sizeof (acc));
6044 	if (ha->topology & QL_SNS_CONNECTION) {
6045 		/* Build ACC. */
6046 		acc.scr_acc = LA_ELS_ACC;
6047 
6048 		pkt->pkt_state = FC_PKT_SUCCESS;
6049 	} else {
6050 		/* Build RJT. */
6051 		acc.scr_acc = LA_ELS_RJT;
6052 
6053 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6054 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6055 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6056 	}
6057 
6058 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6059 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6060 
6061 	if (rval != FC_SUCCESS) {
6062 		EL(ha, "failed, rval = %xh\n", rval);
6063 	} else {
6064 		/*EMPTY*/
6065 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6066 	}
6067 	return (rval);
6068 }
6069 
6070 /*
6071  * ql_els_farp_req
6072  *	Issue FC Address Resolution Protocol (FARP)
6073  *	extended link service request.
6074  *
6075  *	Note: not supported.
6076  *
6077  * Input:
6078  *	ha = adapter state pointer.
6079  *	pkt = pointer to fc_packet.
6080  *
6081  * Returns:
6082  *	FC_SUCCESS - the packet was accepted for transport.
6083  *	FC_TRANSPORT_ERROR - a transport error occurred.
6084  *
6085  * Context:
6086  *	Kernel context.
6087  */
6088 static int
6089 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
6090 {
6091 	ql_acc_rjt_t	acc;
6092 	int		rval = FC_SUCCESS;
6093 
6094 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6095 
6096 	bzero(&acc, sizeof (acc));
6097 
6098 	/* Build ACC. */
6099 	acc.ls_code.ls_code = LA_ELS_ACC;
6100 
6101 	pkt->pkt_state = FC_PKT_SUCCESS;
6102 
6103 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6104 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6105 
6106 	if (rval != FC_SUCCESS) {
6107 		EL(ha, "failed, rval = %xh\n", rval);
6108 	} else {
6109 		/*EMPTY*/
6110 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6111 	}
6112 	return (rval);
6113 }
6114 
6115 /*
6116  * ql_els_farp_reply
6117  *	Issue FC Address Resolution Protocol (FARP)
6118  *	extended link service reply.
6119  *
6120  *	Note: not supported.
6121  *
6122  * Input:
6123  *	ha = adapter state pointer.
6124  *	pkt = pointer to fc_packet.
6125  *
6126  * Returns:
6127  *	FC_SUCCESS - the packet was accepted for transport.
6128  *	FC_TRANSPORT_ERROR - a transport error occurred.
6129  *
6130  * Context:
6131  *	Kernel context.
6132  */
6133 /* ARGSUSED */
6134 static int
6135 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
6136 {
6137 	ql_acc_rjt_t	acc;
6138 	int		rval = FC_SUCCESS;
6139 
6140 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6141 
6142 	bzero(&acc, sizeof (acc));
6143 
6144 	/* Build ACC. */
6145 	acc.ls_code.ls_code = LA_ELS_ACC;
6146 
6147 	pkt->pkt_state = FC_PKT_SUCCESS;
6148 
6149 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6150 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6151 
6152 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6153 
6154 	return (rval);
6155 }
6156 
6157 static int
6158 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
6159 {
6160 	uchar_t			*rnid_acc;
6161 	port_id_t		d_id;
6162 	ql_link_t		*link;
6163 	ql_tgt_t		*tq;
6164 	uint16_t		index;
6165 	la_els_rnid_acc_t	acc;
6166 	la_els_rnid_t		*req;
6167 	size_t			req_len;
6168 
6169 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6170 
6171 	req_len =  FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
6172 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6173 	index = ql_alpa_to_index[d_id.b.al_pa];
6174 
6175 	tq = NULL;
6176 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6177 		tq = link->base_address;
6178 		if (tq->d_id.b24 == d_id.b24) {
6179 			break;
6180 		} else {
6181 			tq = NULL;
6182 		}
6183 	}
6184 
6185 	/* Allocate memory for rnid status block */
6186 	rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
6187 
6188 	bzero(&acc, sizeof (acc));
6189 
6190 	req = (la_els_rnid_t *)pkt->pkt_cmd;
6191 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6192 	    (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
6193 	    (caddr_t)rnid_acc) != QL_SUCCESS)) {
6194 
6195 		kmem_free(rnid_acc, req_len);
6196 		acc.ls_code.ls_code = LA_ELS_RJT;
6197 
6198 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6199 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6200 
6201 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6202 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6203 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6204 
6205 		return (FC_FAILURE);
6206 	}
6207 
6208 	acc.ls_code.ls_code = LA_ELS_ACC;
6209 	bcopy(rnid_acc, &acc.hdr, req_len);
6210 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6211 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6212 
6213 	kmem_free(rnid_acc, req_len);
6214 	pkt->pkt_state = FC_PKT_SUCCESS;
6215 
6216 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6217 
6218 	return (FC_SUCCESS);
6219 }
6220 
6221 static int
6222 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6223 {
6224 	fc_rls_acc_t		*rls_acc;
6225 	port_id_t		d_id;
6226 	ql_link_t		*link;
6227 	ql_tgt_t		*tq;
6228 	uint16_t		index;
6229 	la_els_rls_acc_t	acc;
6230 
6231 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6232 
6233 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6234 	index = ql_alpa_to_index[d_id.b.al_pa];
6235 
6236 	tq = NULL;
6237 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6238 		tq = link->base_address;
6239 		if (tq->d_id.b24 == d_id.b24) {
6240 			break;
6241 		} else {
6242 			tq = NULL;
6243 		}
6244 	}
6245 
6246 	/* Allocate memory for link error status block */
6247 	rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6248 
6249 	bzero(&acc, sizeof (la_els_rls_acc_t));
6250 
6251 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6252 	    (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6253 	    (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6254 
6255 		kmem_free(rls_acc, sizeof (*rls_acc));
6256 		acc.ls_code.ls_code = LA_ELS_RJT;
6257 
6258 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6259 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6260 
6261 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6262 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6263 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6264 
6265 		return (FC_FAILURE);
6266 	}
6267 
6268 	LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6269 	LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6270 	LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6271 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6272 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6273 
6274 	acc.ls_code.ls_code = LA_ELS_ACC;
6275 	acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6276 	acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6277 	acc.rls_link_params.rls_sig_loss  = rls_acc->rls_sig_loss;
6278 	acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6279 	acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6280 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6281 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6282 
6283 	kmem_free(rls_acc, sizeof (*rls_acc));
6284 	pkt->pkt_state = FC_PKT_SUCCESS;
6285 
6286 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6287 
6288 	return (FC_SUCCESS);
6289 }
6290 
6291 static int
6292 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6293 {
6294 	port_id_t	d_id;
6295 	ql_srb_t	*sp;
6296 	fc_unsol_buf_t  *ubp;
6297 	ql_link_t	*link, *next_link;
6298 	int		rval = FC_SUCCESS;
6299 	int		cnt = 5;
6300 
6301 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6302 
6303 	/*
6304 	 * we need to ensure that q->outcnt == 0, otherwise
6305 	 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6306 	 * will confuse ulps.
6307 	 */
6308 
6309 	DEVICE_QUEUE_LOCK(tq);
6310 	do {
6311 		/*
6312 		 * wait for the cmds to get drained. If they
6313 		 * don't get drained then the transport will
6314 		 * retry PLOGI after few secs.
6315 		 */
6316 		if (tq->outcnt != 0) {
6317 			rval = FC_TRAN_BUSY;
6318 			DEVICE_QUEUE_UNLOCK(tq);
6319 			ql_delay(ha, 10000);
6320 			DEVICE_QUEUE_LOCK(tq);
6321 			cnt--;
6322 			if (!cnt) {
6323 				cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6324 				    " for %xh outcount %xh", QL_NAME,
6325 				    ha->instance, tq->d_id.b24, tq->outcnt);
6326 			}
6327 		} else {
6328 			rval = FC_SUCCESS;
6329 			break;
6330 		}
6331 	} while (cnt > 0);
6332 	DEVICE_QUEUE_UNLOCK(tq);
6333 
6334 	/*
6335 	 * return, if busy or if the plogi was asynchronous.
6336 	 */
6337 	if ((rval != FC_SUCCESS) ||
6338 	    (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6339 	    pkt->pkt_comp)) {
6340 		QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6341 		    ha->instance);
6342 		return (rval);
6343 	}
6344 
6345 	/*
6346 	 * Let us give daemon sufficient time and hopefully
6347 	 * when transport retries PLOGI, it would have flushed
6348 	 * callback queue.
6349 	 */
6350 	TASK_DAEMON_LOCK(ha);
6351 	for (link = ha->callback_queue.first; link != NULL;
6352 	    link = next_link) {
6353 		next_link = link->next;
6354 		sp = link->base_address;
6355 		if (sp->flags & SRB_UB_CALLBACK) {
6356 			ubp = ha->ub_array[sp->handle];
6357 			d_id.b24 = ubp->ub_frame.s_id;
6358 		} else {
6359 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6360 		}
6361 		if (tq->d_id.b24 == d_id.b24) {
6362 			cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6363 			    ha->instance, tq->d_id.b24);
6364 			rval = FC_TRAN_BUSY;
6365 			break;
6366 		}
6367 	}
6368 	TASK_DAEMON_UNLOCK(ha);
6369 
6370 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6371 
6372 	return (rval);
6373 }
6374 
6375 /*
6376  * ql_login_port
6377  *	Logs in a device if not already logged in.
6378  *
6379  * Input:
6380  *	ha = adapter state pointer.
6381  *	d_id = 24 bit port ID.
6382  *	DEVICE_QUEUE_LOCK must be released.
6383  *
6384  * Returns:
6385  *	QL local function return status code.
6386  *
6387  * Context:
6388  *	Kernel context.
6389  */
6390 static int
6391 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6392 {
6393 	ql_adapter_state_t	*vha;
6394 	ql_link_t		*link;
6395 	uint16_t		index;
6396 	ql_tgt_t		*tq, *tq2;
6397 	uint16_t		loop_id, first_loop_id, last_loop_id;
6398 	int			rval = QL_SUCCESS;
6399 
6400 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6401 	    d_id.b24);
6402 
6403 	/* Get head queue index. */
6404 	index = ql_alpa_to_index[d_id.b.al_pa];
6405 
6406 	/* Check for device already has a queue. */
6407 	tq = NULL;
6408 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6409 		tq = link->base_address;
6410 		if (tq->d_id.b24 == d_id.b24) {
6411 			loop_id = tq->loop_id;
6412 			break;
6413 		} else {
6414 			tq = NULL;
6415 		}
6416 	}
6417 
6418 	/* Let's stop issuing any IO and unsolicited logo */
6419 	if ((tq != NULL) && (!(ddi_in_panic()))) {
6420 		DEVICE_QUEUE_LOCK(tq);
6421 		tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6422 		tq->flags &= ~TQF_RSCN_RCVD;
6423 		DEVICE_QUEUE_UNLOCK(tq);
6424 	}
6425 	if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6426 	    !(tq->flags & TQF_FABRIC_DEVICE)) {
6427 		loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6428 	}
6429 
6430 	/* Special case for Nameserver */
6431 	if (d_id.b24 == 0xFFFFFC) {
6432 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
6433 		    SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6434 		if (tq == NULL) {
6435 			ADAPTER_STATE_LOCK(ha);
6436 			tq = ql_dev_init(ha, d_id, loop_id);
6437 			ADAPTER_STATE_UNLOCK(ha);
6438 			if (tq == NULL) {
6439 				EL(ha, "failed=%xh, d_id=%xh\n",
6440 				    QL_FUNCTION_FAILED, d_id.b24);
6441 				return (QL_FUNCTION_FAILED);
6442 			}
6443 		}
6444 		if (!(CFG_IST(ha, CFG_CTRL_8021))) {
6445 			rval = ql_login_fabric_port(ha, tq, loop_id);
6446 			if (rval == QL_SUCCESS) {
6447 				tq->loop_id = loop_id;
6448 				tq->flags |= TQF_FABRIC_DEVICE;
6449 				(void) ql_get_port_database(ha, tq, PDF_NONE);
6450 			}
6451 		} else {
6452 			ha->topology = (uint8_t)
6453 			    (ha->topology | QL_SNS_CONNECTION);
6454 		}
6455 	/* Check for device already logged in. */
6456 	} else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6457 		if (tq->flags & TQF_FABRIC_DEVICE) {
6458 			rval = ql_login_fabric_port(ha, tq, loop_id);
6459 			if (rval == QL_PORT_ID_USED) {
6460 				rval = QL_SUCCESS;
6461 			}
6462 		} else if (LOCAL_LOOP_ID(loop_id)) {
6463 			rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6464 			    (tq->flags & TQF_INITIATOR_DEVICE ?
6465 			    LLF_NONE : LLF_PLOGI));
6466 			if (rval == QL_SUCCESS) {
6467 				DEVICE_QUEUE_LOCK(tq);
6468 				tq->loop_id = loop_id;
6469 				DEVICE_QUEUE_UNLOCK(tq);
6470 			}
6471 		}
6472 	} else if (ha->topology & QL_SNS_CONNECTION) {
6473 		/* Locate unused loop ID. */
6474 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
6475 			first_loop_id = 0;
6476 			last_loop_id = LAST_N_PORT_HDL;
6477 		} else if (ha->topology & QL_F_PORT) {
6478 			first_loop_id = 0;
6479 			last_loop_id = SNS_LAST_LOOP_ID;
6480 		} else {
6481 			first_loop_id = SNS_FIRST_LOOP_ID;
6482 			last_loop_id = SNS_LAST_LOOP_ID;
6483 		}
6484 
6485 		/* Acquire adapter state lock. */
6486 		ADAPTER_STATE_LOCK(ha);
6487 
6488 		tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6489 		if (tq == NULL) {
6490 			EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6491 			    d_id.b24);
6492 
6493 			ADAPTER_STATE_UNLOCK(ha);
6494 
6495 			return (QL_FUNCTION_FAILED);
6496 		}
6497 
6498 		rval = QL_FUNCTION_FAILED;
6499 		loop_id = ha->pha->free_loop_id++;
6500 		for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6501 		    index--) {
6502 			if (loop_id < first_loop_id ||
6503 			    loop_id > last_loop_id) {
6504 				loop_id = first_loop_id;
6505 				ha->pha->free_loop_id = (uint16_t)
6506 				    (loop_id + 1);
6507 			}
6508 
6509 			/* Bypass if loop ID used. */
6510 			for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6511 				tq2 = ql_loop_id_to_queue(vha, loop_id);
6512 				if (tq2 != NULL && tq2 != tq) {
6513 					break;
6514 				}
6515 			}
6516 			if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6517 			    loop_id == ha->loop_id) {
6518 				loop_id = ha->pha->free_loop_id++;
6519 				continue;
6520 			}
6521 
6522 			ADAPTER_STATE_UNLOCK(ha);
6523 			rval = ql_login_fabric_port(ha, tq, loop_id);
6524 
6525 			/*
6526 			 * If PORT_ID_USED is returned
6527 			 * the login_fabric_port() updates
6528 			 * with the correct loop ID
6529 			 */
6530 			switch (rval) {
6531 			case QL_PORT_ID_USED:
6532 				/*
6533 				 * use f/w handle and try to
6534 				 * login again.
6535 				 */
6536 				ADAPTER_STATE_LOCK(ha);
6537 				ha->pha->free_loop_id--;
6538 				ADAPTER_STATE_UNLOCK(ha);
6539 				loop_id = tq->loop_id;
6540 				break;
6541 
6542 			case QL_SUCCESS:
6543 				tq->flags |= TQF_FABRIC_DEVICE;
6544 				(void) ql_get_port_database(ha,
6545 				    tq, PDF_NONE);
6546 				index = 1;
6547 				break;
6548 
6549 			case QL_LOOP_ID_USED:
6550 				tq->loop_id = PORT_NO_LOOP_ID;
6551 				loop_id = ha->pha->free_loop_id++;
6552 				break;
6553 
6554 			case QL_ALL_IDS_IN_USE:
6555 				tq->loop_id = PORT_NO_LOOP_ID;
6556 				index = 1;
6557 				break;
6558 
6559 			default:
6560 				tq->loop_id = PORT_NO_LOOP_ID;
6561 				index = 1;
6562 				break;
6563 			}
6564 
6565 			ADAPTER_STATE_LOCK(ha);
6566 		}
6567 
6568 		ADAPTER_STATE_UNLOCK(ha);
6569 	} else {
6570 		rval = QL_FUNCTION_FAILED;
6571 	}
6572 
6573 	if (rval != QL_SUCCESS) {
6574 		EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6575 	} else {
6576 		EL(ha, "d_id=%xh, loop_id=%xh, "
6577 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6578 		    tq->loop_id, tq->port_name[0], tq->port_name[1],
6579 		    tq->port_name[2], tq->port_name[3], tq->port_name[4],
6580 		    tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6581 	}
6582 	return (rval);
6583 }
6584 
6585 /*
6586  * ql_login_fabric_port
6587  *	Issue login fabric port mailbox command.
6588  *
6589  * Input:
6590  *	ha:		adapter state pointer.
6591  *	tq:		target queue pointer.
6592  *	loop_id:	FC Loop ID.
6593  *
6594  * Returns:
6595  *	ql local function return status code.
6596  *
6597  * Context:
6598  *	Kernel context.
6599  */
6600 static int
6601 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6602 {
6603 	int		rval;
6604 	int		index;
6605 	int		retry = 0;
6606 	port_id_t	d_id;
6607 	ql_tgt_t	*newq;
6608 	ql_mbx_data_t	mr;
6609 
6610 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6611 	    tq->d_id.b24);
6612 
6613 	/*
6614 	 * QL_PARAMETER_ERROR also means the firmware is
6615 	 * not able to allocate PCB entry due to resource
6616 	 * issues, or collision.
6617 	 */
6618 	do {
6619 		rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6620 		if ((rval == QL_PARAMETER_ERROR) ||
6621 		    ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6622 		    mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6623 			retry++;
6624 			drv_usecwait(10 * MILLISEC);
6625 		} else {
6626 			break;
6627 		}
6628 	} while (retry < 5);
6629 
6630 	switch (rval) {
6631 	case QL_SUCCESS:
6632 		tq->loop_id = loop_id;
6633 		break;
6634 
6635 	case QL_PORT_ID_USED:
6636 		/*
6637 		 * This Loop ID should NOT be in use in drivers
6638 		 */
6639 		newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6640 
6641 		if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6642 			cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6643 			    "dup loop_id=%xh, d_id=%xh", ha->instance,
6644 			    newq->loop_id, newq->d_id.b24);
6645 			ql_send_logo(ha, newq, NULL);
6646 		}
6647 
6648 		tq->loop_id = mr.mb[1];
6649 		break;
6650 
6651 	case QL_LOOP_ID_USED:
6652 		d_id.b.al_pa = LSB(mr.mb[2]);
6653 		d_id.b.area = MSB(mr.mb[2]);
6654 		d_id.b.domain = LSB(mr.mb[1]);
6655 
6656 		newq = ql_d_id_to_queue(ha, d_id);
6657 		if (newq && (newq->loop_id != loop_id)) {
6658 			/*
6659 			 * This should NEVER ever happen; but this
6660 			 * code is needed to bail out when the worst
6661 			 * case happens - or as used to happen before
6662 			 */
6663 			QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6664 			    "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6665 			    "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6666 			    ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6667 			    newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6668 			    newq->d_id.b24, loop_id);
6669 
6670 			if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6671 				ADAPTER_STATE_LOCK(ha);
6672 
6673 				index = ql_alpa_to_index[newq->d_id.b.al_pa];
6674 				ql_add_link_b(&ha->dev[index], &newq->device);
6675 
6676 				newq->d_id.b24 = d_id.b24;
6677 
6678 				index = ql_alpa_to_index[d_id.b.al_pa];
6679 				ql_add_link_b(&ha->dev[index], &newq->device);
6680 
6681 				ADAPTER_STATE_UNLOCK(ha);
6682 			}
6683 
6684 			(void) ql_get_port_database(ha, newq, PDF_NONE);
6685 
6686 		}
6687 
6688 		/*
6689 		 * Invalidate the loop ID for the
6690 		 * us to obtain a new one.
6691 		 */
6692 		tq->loop_id = PORT_NO_LOOP_ID;
6693 		break;
6694 
6695 	case QL_ALL_IDS_IN_USE:
6696 		rval = QL_FUNCTION_FAILED;
6697 		EL(ha, "no loop id's available\n");
6698 		break;
6699 
6700 	default:
6701 		if (rval == QL_COMMAND_ERROR) {
6702 			switch (mr.mb[1]) {
6703 			case 2:
6704 			case 3:
6705 				rval = QL_MEMORY_ALLOC_FAILED;
6706 				break;
6707 
6708 			case 4:
6709 				rval = QL_FUNCTION_TIMEOUT;
6710 				break;
6711 			case 7:
6712 				rval = QL_FABRIC_NOT_INITIALIZED;
6713 				break;
6714 			default:
6715 				EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6716 				break;
6717 			}
6718 		} else {
6719 			cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6720 			    " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6721 			    ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6722 		}
6723 		break;
6724 	}
6725 
6726 	if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6727 	    rval != QL_LOOP_ID_USED) {
6728 		EL(ha, "failed=%xh\n", rval);
6729 	} else {
6730 		/*EMPTY*/
6731 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6732 	}
6733 	return (rval);
6734 }
6735 
6736 /*
6737  * ql_logout_port
6738  *	Logs out a device if possible.
6739  *
6740  * Input:
6741  *	ha:	adapter state pointer.
6742  *	d_id:	24 bit port ID.
6743  *
6744  * Returns:
6745  *	QL local function return status code.
6746  *
6747  * Context:
6748  *	Kernel context.
6749  */
6750 static int
6751 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6752 {
6753 	ql_link_t	*link;
6754 	ql_tgt_t	*tq;
6755 	uint16_t	index;
6756 
6757 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6758 
6759 	/* Get head queue index. */
6760 	index = ql_alpa_to_index[d_id.b.al_pa];
6761 
6762 	/* Get device queue. */
6763 	tq = NULL;
6764 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6765 		tq = link->base_address;
6766 		if (tq->d_id.b24 == d_id.b24) {
6767 			break;
6768 		} else {
6769 			tq = NULL;
6770 		}
6771 	}
6772 
6773 	if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6774 		(void) ql_logout_fabric_port(ha, tq);
6775 		tq->loop_id = PORT_NO_LOOP_ID;
6776 	}
6777 
6778 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6779 
6780 	return (QL_SUCCESS);
6781 }
6782 
6783 /*
6784  * ql_dev_init
6785  *	Initialize/allocate device queue.
6786  *
6787  * Input:
6788  *	ha:		adapter state pointer.
6789  *	d_id:		device destination ID
6790  *	loop_id:	device loop ID
6791  *	ADAPTER_STATE_LOCK must be already obtained.
6792  *
6793  * Returns:
6794  *	NULL = failure
6795  *
6796  * Context:
6797  *	Kernel context.
6798  */
6799 ql_tgt_t *
6800 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6801 {
6802 	ql_link_t	*link;
6803 	uint16_t	index;
6804 	ql_tgt_t	*tq;
6805 
6806 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6807 	    ha->instance, d_id.b24, loop_id);
6808 
6809 	index = ql_alpa_to_index[d_id.b.al_pa];
6810 
6811 	/* If device queue exists, set proper loop ID. */
6812 	tq = NULL;
6813 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6814 		tq = link->base_address;
6815 		if (tq->d_id.b24 == d_id.b24) {
6816 			tq->loop_id = loop_id;
6817 
6818 			/* Reset port down retry count. */
6819 			tq->port_down_retry_count = ha->port_down_retry_count;
6820 			tq->qfull_retry_count = ha->qfull_retry_count;
6821 
6822 			break;
6823 		} else {
6824 			tq = NULL;
6825 		}
6826 	}
6827 
6828 	/* If device does not have queue. */
6829 	if (tq == NULL) {
6830 		tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6831 		if (tq != NULL) {
6832 			/*
6833 			 * mutex to protect the device queue,
6834 			 * does not block interrupts.
6835 			 */
6836 			mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6837 			    (ha->iflags & IFLG_INTR_AIF) ?
6838 			    (void *)(uintptr_t)ha->intr_pri :
6839 			    (void *)(uintptr_t)ha->iblock_cookie);
6840 
6841 			tq->d_id.b24 = d_id.b24;
6842 			tq->loop_id = loop_id;
6843 			tq->device.base_address = tq;
6844 			tq->iidma_rate = IIDMA_RATE_INIT;
6845 
6846 			/* Reset port down retry count. */
6847 			tq->port_down_retry_count = ha->port_down_retry_count;
6848 			tq->qfull_retry_count = ha->qfull_retry_count;
6849 
6850 			/* Add device to device queue. */
6851 			ql_add_link_b(&ha->dev[index], &tq->device);
6852 		}
6853 	}
6854 
6855 	if (tq == NULL) {
6856 		EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6857 	} else {
6858 		/*EMPTY*/
6859 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6860 	}
6861 	return (tq);
6862 }
6863 
6864 /*
6865  * ql_dev_free
6866  *	Remove queue from device list and frees resources used by queue.
6867  *
6868  * Input:
6869  *	ha:	adapter state pointer.
6870  *	tq:	target queue pointer.
6871  *	ADAPTER_STATE_LOCK must be already obtained.
6872  *
6873  * Context:
6874  *	Kernel context.
6875  */
6876 void
6877 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6878 {
6879 	ql_link_t	*link;
6880 	uint16_t	index;
6881 	ql_lun_t	*lq;
6882 
6883 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6884 
6885 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6886 		lq = link->base_address;
6887 		if (lq->cmd.first != NULL) {
6888 			return;
6889 		}
6890 	}
6891 
6892 	if (tq->outcnt == 0) {
6893 		/* Get head queue index. */
6894 		index = ql_alpa_to_index[tq->d_id.b.al_pa];
6895 		for (link = ha->dev[index].first; link != NULL;
6896 		    link = link->next) {
6897 			if (link->base_address == tq) {
6898 				ql_remove_link(&ha->dev[index], link);
6899 
6900 				link = tq->lun_queues.first;
6901 				while (link != NULL) {
6902 					lq = link->base_address;
6903 					link = link->next;
6904 
6905 					ql_remove_link(&tq->lun_queues,
6906 					    &lq->link);
6907 					kmem_free(lq, sizeof (ql_lun_t));
6908 				}
6909 
6910 				mutex_destroy(&tq->mutex);
6911 				kmem_free(tq, sizeof (ql_tgt_t));
6912 				break;
6913 			}
6914 		}
6915 	}
6916 
6917 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6918 }
6919 
6920 /*
6921  * ql_lun_queue
6922  *	Allocate LUN queue if does not exists.
6923  *
6924  * Input:
6925  *	ha:	adapter state pointer.
6926  *	tq:	target queue.
6927  *	lun:	LUN number.
6928  *
6929  * Returns:
6930  *	NULL = failure
6931  *
6932  * Context:
6933  *	Kernel context.
6934  */
6935 static ql_lun_t *
6936 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6937 {
6938 	ql_lun_t	*lq;
6939 	ql_link_t	*link;
6940 
6941 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6942 
6943 	/* Fast path. */
6944 	if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6945 		QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6946 		return (tq->last_lun_queue);
6947 	}
6948 
6949 	if (lun >= MAX_LUNS) {
6950 		EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6951 		return (NULL);
6952 	}
6953 	/* If device queue exists, set proper loop ID. */
6954 	lq = NULL;
6955 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6956 		lq = link->base_address;
6957 		if (lq->lun_no == lun) {
6958 			QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6959 			tq->last_lun_queue = lq;
6960 			return (lq);
6961 		}
6962 	}
6963 
6964 	/* If queue does exist. */
6965 	lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6966 
6967 	/* Initialize LUN queue. */
6968 	if (lq != NULL) {
6969 		lq->link.base_address = lq;
6970 
6971 		lq->lun_no = lun;
6972 		lq->target_queue = tq;
6973 
6974 		DEVICE_QUEUE_LOCK(tq);
6975 		ql_add_link_b(&tq->lun_queues, &lq->link);
6976 		DEVICE_QUEUE_UNLOCK(tq);
6977 		tq->last_lun_queue = lq;
6978 	}
6979 
6980 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6981 
6982 	return (lq);
6983 }
6984 
6985 /*
6986  * ql_fcp_scsi_cmd
6987  *	Process fibre channel (FCP) SCSI protocol commands.
6988  *
6989  * Input:
6990  *	ha = adapter state pointer.
6991  *	pkt = pointer to fc_packet.
6992  *	sp = srb pointer.
6993  *
6994  * Returns:
6995  *	FC_SUCCESS - the packet was accepted for transport.
6996  *	FC_TRANSPORT_ERROR - a transport error occurred.
6997  *
6998  * Context:
6999  *	Kernel context.
7000  */
7001 static int
7002 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7003 {
7004 	port_id_t	d_id;
7005 	ql_tgt_t	*tq;
7006 	uint64_t	*ptr;
7007 	uint16_t	lun;
7008 
7009 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7010 
7011 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
7012 	if (tq == NULL) {
7013 		d_id.r.rsvd_1 = 0;
7014 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7015 		tq = ql_d_id_to_queue(ha, d_id);
7016 	}
7017 
7018 	sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
7019 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7020 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7021 
7022 	if (tq != NULL &&
7023 	    (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
7024 
7025 		/*
7026 		 * zero out FCP response; 24 Bytes
7027 		 */
7028 		ptr = (uint64_t *)pkt->pkt_resp;
7029 		*ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
7030 
7031 		/* Handle task management function. */
7032 		if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
7033 		    sp->fcp->fcp_cntl.cntl_clr_aca |
7034 		    sp->fcp->fcp_cntl.cntl_reset_tgt |
7035 		    sp->fcp->fcp_cntl.cntl_reset_lun |
7036 		    sp->fcp->fcp_cntl.cntl_clr_tsk |
7037 		    sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
7038 			ql_task_mgmt(ha, tq, pkt, sp);
7039 		} else {
7040 			ha->pha->xioctl->IosRequested++;
7041 			ha->pha->xioctl->BytesRequested += (uint32_t)
7042 			    sp->fcp->fcp_data_len;
7043 
7044 			/*
7045 			 * Setup for commands with data transfer
7046 			 */
7047 			sp->iocb = ha->fcp_cmd;
7048 			sp->req_cnt = 1;
7049 			if (sp->fcp->fcp_data_len != 0) {
7050 				/*
7051 				 * FCP data is bound to pkt_data_dma
7052 				 */
7053 				if (sp->fcp->fcp_cntl.cntl_write_data) {
7054 					(void) ddi_dma_sync(pkt->pkt_data_dma,
7055 					    0, 0, DDI_DMA_SYNC_FORDEV);
7056 				}
7057 
7058 				/* Setup IOCB count. */
7059 				if (pkt->pkt_data_cookie_cnt > ha->cmd_segs &&
7060 				    (!CFG_IST(ha, CFG_CTRL_8021) ||
7061 				    sp->sg_dma.dma_handle == NULL)) {
7062 					uint32_t	cnt;
7063 
7064 					cnt = pkt->pkt_data_cookie_cnt -
7065 					    ha->cmd_segs;
7066 					sp->req_cnt = (uint16_t)
7067 					    (cnt / ha->cmd_cont_segs);
7068 					if (cnt % ha->cmd_cont_segs) {
7069 						sp->req_cnt = (uint16_t)
7070 						    (sp->req_cnt + 2);
7071 					} else {
7072 						sp->req_cnt++;
7073 					}
7074 				}
7075 			}
7076 			QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7077 
7078 			return (ql_start_cmd(ha, tq, pkt, sp));
7079 		}
7080 	} else {
7081 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7082 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7083 
7084 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7085 			ql_awaken_task_daemon(ha, sp, 0, 0);
7086 	}
7087 
7088 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7089 
7090 	return (FC_SUCCESS);
7091 }
7092 
7093 /*
7094  * ql_task_mgmt
7095  *	Task management function processor.
7096  *
7097  * Input:
7098  *	ha:	adapter state pointer.
7099  *	tq:	target queue pointer.
7100  *	pkt:	pointer to fc_packet.
7101  *	sp:	SRB pointer.
7102  *
7103  * Context:
7104  *	Kernel context.
7105  */
7106 static void
7107 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7108     ql_srb_t *sp)
7109 {
7110 	fcp_rsp_t		*fcpr;
7111 	struct fcp_rsp_info	*rsp;
7112 	uint16_t		lun;
7113 
7114 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7115 
7116 	fcpr = (fcp_rsp_t *)pkt->pkt_resp;
7117 	rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
7118 
7119 	bzero(fcpr, pkt->pkt_rsplen);
7120 
7121 	fcpr->fcp_u.fcp_status.rsp_len_set = 1;
7122 	fcpr->fcp_response_len = 8;
7123 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7124 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7125 
7126 	if (sp->fcp->fcp_cntl.cntl_clr_aca) {
7127 		if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
7128 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7129 		}
7130 	} else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
7131 		if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
7132 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7133 		}
7134 	} else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
7135 		if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
7136 		    QL_SUCCESS) {
7137 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7138 		}
7139 	} else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
7140 		if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
7141 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7142 		}
7143 	} else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
7144 		if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
7145 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7146 		}
7147 	} else {
7148 		rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
7149 	}
7150 
7151 	pkt->pkt_state = FC_PKT_SUCCESS;
7152 
7153 	/* Do command callback. */
7154 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7155 		ql_awaken_task_daemon(ha, sp, 0, 0);
7156 	}
7157 
7158 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7159 }
7160 
7161 /*
7162  * ql_fcp_ip_cmd
7163  *	Process fibre channel (FCP) Internet (IP) protocols commands.
7164  *
7165  * Input:
7166  *	ha:	adapter state pointer.
7167  *	pkt:	pointer to fc_packet.
7168  *	sp:	SRB pointer.
7169  *
7170  * Returns:
7171  *	FC_SUCCESS - the packet was accepted for transport.
7172  *	FC_TRANSPORT_ERROR - a transport error occurred.
7173  *
7174  * Context:
7175  *	Kernel context.
7176  */
7177 static int
7178 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7179 {
7180 	port_id_t	d_id;
7181 	ql_tgt_t	*tq;
7182 
7183 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7184 
7185 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
7186 	if (tq == NULL) {
7187 		d_id.r.rsvd_1 = 0;
7188 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7189 		tq = ql_d_id_to_queue(ha, d_id);
7190 	}
7191 
7192 	if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
7193 		/*
7194 		 * IP data is bound to pkt_cmd_dma
7195 		 */
7196 		(void) ddi_dma_sync(pkt->pkt_cmd_dma,
7197 		    0, 0, DDI_DMA_SYNC_FORDEV);
7198 
7199 		/* Setup IOCB count. */
7200 		sp->iocb = ha->ip_cmd;
7201 		if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
7202 			uint32_t	cnt;
7203 
7204 			cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
7205 			sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
7206 			if (cnt % ha->cmd_cont_segs) {
7207 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7208 			} else {
7209 				sp->req_cnt++;
7210 			}
7211 		} else {
7212 			sp->req_cnt = 1;
7213 		}
7214 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7215 
7216 		return (ql_start_cmd(ha, tq, pkt, sp));
7217 	} else {
7218 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7219 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7220 
7221 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7222 			ql_awaken_task_daemon(ha, sp, 0, 0);
7223 	}
7224 
7225 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7226 
7227 	return (FC_SUCCESS);
7228 }
7229 
7230 /*
7231  * ql_fc_services
7232  *	Process fibre channel services (name server).
7233  *
7234  * Input:
7235  *	ha:	adapter state pointer.
7236  *	pkt:	pointer to fc_packet.
7237  *
7238  * Returns:
7239  *	FC_SUCCESS - the packet was accepted for transport.
7240  *	FC_TRANSPORT_ERROR - a transport error occurred.
7241  *
7242  * Context:
7243  *	Kernel context.
7244  */
7245 static int
7246 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7247 {
7248 	uint32_t	cnt;
7249 	fc_ct_header_t	hdr;
7250 	la_els_rjt_t	rjt;
7251 	port_id_t	d_id;
7252 	ql_tgt_t	*tq;
7253 	ql_srb_t	*sp;
7254 	int		rval;
7255 
7256 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7257 
7258 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7259 	    (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7260 
7261 	bzero(&rjt, sizeof (rjt));
7262 
7263 	/* Do some sanity checks */
7264 	cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7265 	    sizeof (fc_ct_header_t));
7266 	if (cnt > (uint32_t)pkt->pkt_rsplen) {
7267 		EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7268 		    pkt->pkt_rsplen);
7269 		return (FC_ELS_MALFORMED);
7270 	}
7271 
7272 	switch (hdr.ct_fcstype) {
7273 	case FCSTYPE_DIRECTORY:
7274 	case FCSTYPE_MGMTSERVICE:
7275 		/* An FCA must make sure that the header is in big endian */
7276 		ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7277 
7278 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7279 		tq = ql_d_id_to_queue(ha, d_id);
7280 		sp = (ql_srb_t *)pkt->pkt_fca_private;
7281 		if (tq == NULL ||
7282 		    (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7283 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
7284 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7285 			rval = QL_SUCCESS;
7286 			break;
7287 		}
7288 
7289 		/*
7290 		 * Services data is bound to pkt_cmd_dma
7291 		 */
7292 		(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7293 		    DDI_DMA_SYNC_FORDEV);
7294 
7295 		sp->flags |= SRB_MS_PKT;
7296 		sp->retry_count = 32;
7297 
7298 		/* Setup IOCB count. */
7299 		sp->iocb = ha->ms_cmd;
7300 		if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7301 			cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7302 			sp->req_cnt =
7303 			    (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7304 			if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7305 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7306 			} else {
7307 				sp->req_cnt++;
7308 			}
7309 		} else {
7310 			sp->req_cnt = 1;
7311 		}
7312 		rval = ql_start_cmd(ha, tq, pkt, sp);
7313 
7314 		QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7315 		    ha->instance, rval);
7316 
7317 		return (rval);
7318 
7319 	default:
7320 		EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7321 		rval = QL_FUNCTION_PARAMETER_ERROR;
7322 		break;
7323 	}
7324 
7325 	if (rval != QL_SUCCESS) {
7326 		/* Build RJT. */
7327 		rjt.ls_code.ls_code = LA_ELS_RJT;
7328 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7329 
7330 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7331 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7332 
7333 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7334 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7335 		EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7336 	}
7337 
7338 	/* Do command callback. */
7339 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7340 		ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7341 		    0, 0);
7342 	}
7343 
7344 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7345 
7346 	return (FC_SUCCESS);
7347 }
7348 
7349 /*
7350  * ql_cthdr_endian
7351  *	Change endianess of ct passthrough header and payload.
7352  *
7353  * Input:
7354  *	acc_handle:	DMA buffer access handle.
7355  *	ct_hdr:		Pointer to header.
7356  *	restore:	Restore first flag.
7357  *
7358  * Context:
7359  *	Interrupt or Kernel context, no mailbox commands allowed.
7360  */
7361 void
7362 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7363     boolean_t restore)
7364 {
7365 	uint8_t		i, *bp;
7366 	fc_ct_header_t	hdr;
7367 	uint32_t	*hdrp = (uint32_t *)&hdr;
7368 
7369 	ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7370 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7371 
7372 	if (restore) {
7373 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7374 			*hdrp = BE_32(*hdrp);
7375 			hdrp++;
7376 		}
7377 	}
7378 
7379 	if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7380 		bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7381 
7382 		switch (hdr.ct_cmdrsp) {
7383 		case NS_GA_NXT:
7384 		case NS_GPN_ID:
7385 		case NS_GNN_ID:
7386 		case NS_GCS_ID:
7387 		case NS_GFT_ID:
7388 		case NS_GSPN_ID:
7389 		case NS_GPT_ID:
7390 		case NS_GID_FT:
7391 		case NS_GID_PT:
7392 		case NS_RPN_ID:
7393 		case NS_RNN_ID:
7394 		case NS_RSPN_ID:
7395 		case NS_DA_ID:
7396 			BIG_ENDIAN_32(bp);
7397 			break;
7398 		case NS_RFT_ID:
7399 		case NS_RCS_ID:
7400 		case NS_RPT_ID:
7401 			BIG_ENDIAN_32(bp);
7402 			bp += 4;
7403 			BIG_ENDIAN_32(bp);
7404 			break;
7405 		case NS_GNN_IP:
7406 		case NS_GIPA_IP:
7407 			BIG_ENDIAN(bp, 16);
7408 			break;
7409 		case NS_RIP_NN:
7410 			bp += 8;
7411 			BIG_ENDIAN(bp, 16);
7412 			break;
7413 		case NS_RIPA_NN:
7414 			bp += 8;
7415 			BIG_ENDIAN_64(bp);
7416 			break;
7417 		default:
7418 			break;
7419 		}
7420 	}
7421 
7422 	if (restore == B_FALSE) {
7423 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7424 			*hdrp = BE_32(*hdrp);
7425 			hdrp++;
7426 		}
7427 	}
7428 
7429 	ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7430 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7431 }
7432 
7433 /*
7434  * ql_start_cmd
7435  *	Finishes starting fibre channel protocol (FCP) command.
7436  *
7437  * Input:
7438  *	ha:	adapter state pointer.
7439  *	tq:	target queue pointer.
7440  *	pkt:	pointer to fc_packet.
7441  *	sp:	SRB pointer.
7442  *
7443  * Context:
7444  *	Kernel context.
7445  */
7446 static int
7447 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7448     ql_srb_t *sp)
7449 {
7450 	int		rval = FC_SUCCESS;
7451 	time_t		poll_wait = 0;
7452 	ql_lun_t	*lq = sp->lun_queue;
7453 
7454 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7455 
7456 	sp->handle = 0;
7457 
7458 	/* Set poll for finish. */
7459 	if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7460 		sp->flags |= SRB_POLL;
7461 		if (pkt->pkt_timeout == 0) {
7462 			pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7463 		}
7464 	}
7465 
7466 	/* Acquire device queue lock. */
7467 	DEVICE_QUEUE_LOCK(tq);
7468 
7469 	/*
7470 	 * If we need authentication, report device busy to
7471 	 * upper layers to retry later
7472 	 */
7473 	if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7474 		DEVICE_QUEUE_UNLOCK(tq);
7475 		EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7476 		    tq->d_id.b24);
7477 		return (FC_DEVICE_BUSY);
7478 	}
7479 
7480 	/* Insert command onto watchdog queue. */
7481 	if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7482 		ql_timeout_insert(ha, tq, sp);
7483 	} else {
7484 		/*
7485 		 * Run dump requests in polled mode as kernel threads
7486 		 * and interrupts may have been disabled.
7487 		 */
7488 		sp->flags |= SRB_POLL;
7489 		sp->init_wdg_q_time = 0;
7490 		sp->isp_timeout = 0;
7491 	}
7492 
7493 	/* If a polling command setup wait time. */
7494 	if (sp->flags & SRB_POLL) {
7495 		if (sp->flags & SRB_WATCHDOG_ENABLED) {
7496 			poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7497 		} else {
7498 			poll_wait = pkt->pkt_timeout;
7499 		}
7500 	}
7501 
7502 	if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7503 	    (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7504 		/* Set ending status. */
7505 		sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7506 
7507 		/* Call done routine to handle completions. */
7508 		sp->cmd.next = NULL;
7509 		DEVICE_QUEUE_UNLOCK(tq);
7510 		ql_done(&sp->cmd);
7511 	} else {
7512 		if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7513 			int do_lip = 0;
7514 
7515 			DEVICE_QUEUE_UNLOCK(tq);
7516 
7517 			ADAPTER_STATE_LOCK(ha);
7518 			if ((do_lip = ha->pha->lip_on_panic) == 0) {
7519 				ha->pha->lip_on_panic++;
7520 			}
7521 			ADAPTER_STATE_UNLOCK(ha);
7522 
7523 			if (!do_lip) {
7524 
7525 				/*
7526 				 * That Qlogic F/W performs PLOGI, PRLI, etc
7527 				 * is helpful here. If a PLOGI fails for some
7528 				 * reason, you would get CS_PORT_LOGGED_OUT
7529 				 * or some such error; and we should get a
7530 				 * careful polled mode login kicked off inside
7531 				 * of this driver itself. You don't have FC
7532 				 * transport's services as all threads are
7533 				 * suspended, interrupts disabled, and so
7534 				 * on. Right now we do re-login if the packet
7535 				 * state isn't FC_PKT_SUCCESS.
7536 				 */
7537 				(void) ql_abort_isp(ha);
7538 			}
7539 
7540 			ql_start_iocb(ha, sp);
7541 		} else {
7542 			/* Add the command to the device queue */
7543 			if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7544 				ql_add_link_t(&lq->cmd, &sp->cmd);
7545 			} else {
7546 				ql_add_link_b(&lq->cmd, &sp->cmd);
7547 			}
7548 
7549 			sp->flags |= SRB_IN_DEVICE_QUEUE;
7550 
7551 			/* Check whether next message can be processed */
7552 			ql_next(ha, lq);
7553 		}
7554 	}
7555 
7556 	/* If polling, wait for finish. */
7557 	if (poll_wait) {
7558 		if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7559 			int	res;
7560 
7561 			res = ql_abort((opaque_t)ha, pkt, 0);
7562 			if (res != FC_SUCCESS && res != FC_ABORTED) {
7563 				DEVICE_QUEUE_LOCK(tq);
7564 				ql_remove_link(&lq->cmd, &sp->cmd);
7565 				sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7566 				DEVICE_QUEUE_UNLOCK(tq);
7567 			}
7568 		}
7569 
7570 		if (pkt->pkt_state != FC_PKT_SUCCESS) {
7571 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7572 			rval = FC_TRANSPORT_ERROR;
7573 		}
7574 
7575 		if (ddi_in_panic()) {
7576 			if (pkt->pkt_state != FC_PKT_SUCCESS) {
7577 				port_id_t d_id;
7578 
7579 				/*
7580 				 * successful LOGIN implies by design
7581 				 * that PRLI also succeeded for disks
7582 				 * Note also that there is no special
7583 				 * mailbox command to send PRLI.
7584 				 */
7585 				d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7586 				(void) ql_login_port(ha, d_id);
7587 			}
7588 		}
7589 
7590 		/*
7591 		 * This should only happen during CPR dumping
7592 		 */
7593 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7594 		    pkt->pkt_comp) {
7595 			sp->flags &= ~SRB_POLL;
7596 			(*pkt->pkt_comp)(pkt);
7597 		}
7598 	}
7599 
7600 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7601 
7602 	return (rval);
7603 }
7604 
7605 /*
7606  * ql_poll_cmd
7607  *	Polls commands for completion.
7608  *
7609  * Input:
7610  *	ha = adapter state pointer.
7611  *	sp = SRB command pointer.
7612  *	poll_wait = poll wait time in seconds.
7613  *
7614  * Returns:
7615  *	QL local function return status code.
7616  *
7617  * Context:
7618  *	Kernel context.
7619  */
7620 static int
7621 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7622 {
7623 	int			rval = QL_SUCCESS;
7624 	time_t			msecs_left = poll_wait * 100;	/* 10ms inc */
7625 	ql_adapter_state_t	*ha = vha->pha;
7626 
7627 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7628 
7629 	while (sp->flags & SRB_POLL) {
7630 
7631 		if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7632 		    ha->idle_timer >= 15 || ddi_in_panic()) {
7633 
7634 			/* If waiting for restart, do it now. */
7635 			if (ha->port_retry_timer != 0) {
7636 				ADAPTER_STATE_LOCK(ha);
7637 				ha->port_retry_timer = 0;
7638 				ADAPTER_STATE_UNLOCK(ha);
7639 
7640 				TASK_DAEMON_LOCK(ha);
7641 				ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7642 				TASK_DAEMON_UNLOCK(ha);
7643 			}
7644 
7645 			if (INTERRUPT_PENDING(ha)) {
7646 				(void) ql_isr((caddr_t)ha);
7647 				INTR_LOCK(ha);
7648 				ha->intr_claimed = TRUE;
7649 				INTR_UNLOCK(ha);
7650 			}
7651 
7652 			/*
7653 			 * Call task thread function in case the
7654 			 * daemon is not running.
7655 			 */
7656 			TASK_DAEMON_LOCK(ha);
7657 
7658 			if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7659 			    QL_TASK_PENDING(ha)) {
7660 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7661 				ql_task_thread(ha);
7662 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7663 			}
7664 
7665 			TASK_DAEMON_UNLOCK(ha);
7666 		}
7667 
7668 		if (msecs_left < 10) {
7669 			rval = QL_FUNCTION_TIMEOUT;
7670 			break;
7671 		}
7672 
7673 		/*
7674 		 * Polling interval is 10 milli seconds; Increasing
7675 		 * the polling interval to seconds since disk IO
7676 		 * timeout values are ~60 seconds is tempting enough,
7677 		 * but CPR dump time increases, and so will the crash
7678 		 * dump time; Don't toy with the settings without due
7679 		 * consideration for all the scenarios that will be
7680 		 * impacted.
7681 		 */
7682 		ql_delay(ha, 10000);
7683 		msecs_left -= 10;
7684 	}
7685 
7686 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7687 
7688 	return (rval);
7689 }
7690 
7691 /*
7692  * ql_next
7693  *	Retrieve and process next job in the device queue.
7694  *
7695  * Input:
7696  *	ha:	adapter state pointer.
7697  *	lq:	LUN queue pointer.
7698  *	DEVICE_QUEUE_LOCK must be already obtained.
7699  *
7700  * Output:
7701  *	Releases DEVICE_QUEUE_LOCK upon exit.
7702  *
7703  * Context:
7704  *	Interrupt or Kernel context, no mailbox commands allowed.
7705  */
7706 void
7707 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7708 {
7709 	ql_srb_t		*sp;
7710 	ql_link_t		*link;
7711 	ql_tgt_t		*tq = lq->target_queue;
7712 	ql_adapter_state_t	*ha = vha->pha;
7713 
7714 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7715 
7716 	if (ddi_in_panic()) {
7717 		DEVICE_QUEUE_UNLOCK(tq);
7718 		QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7719 		    ha->instance);
7720 		return;
7721 	}
7722 
7723 	while ((link = lq->cmd.first) != NULL) {
7724 		sp = link->base_address;
7725 
7726 		/* Exit if can not start commands. */
7727 		if (DRIVER_SUSPENDED(ha) ||
7728 		    (ha->flags & ONLINE) == 0 ||
7729 		    !VALID_DEVICE_ID(ha, tq->loop_id) ||
7730 		    sp->flags & SRB_ABORT ||
7731 		    tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7732 		    TQF_QUEUE_SUSPENDED)) {
7733 			EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7734 			    "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7735 			    ha->task_daemon_flags, tq->flags, sp->flags,
7736 			    ha->flags, tq->loop_id);
7737 			break;
7738 		}
7739 
7740 		/*
7741 		 * Find out the LUN number for untagged command use.
7742 		 * If there is an untagged command pending for the LUN,
7743 		 * we would not submit another untagged command
7744 		 * or if reached LUN execution throttle.
7745 		 */
7746 		if (sp->flags & SRB_FCP_CMD_PKT) {
7747 			if (lq->flags & LQF_UNTAGGED_PENDING ||
7748 			    lq->lun_outcnt >= ha->execution_throttle) {
7749 				QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7750 				    "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7751 				    tq->d_id.b24, lq->flags, lq->lun_outcnt);
7752 				break;
7753 			}
7754 			if (sp->fcp->fcp_cntl.cntl_qtype ==
7755 			    FCP_QTYPE_UNTAGGED) {
7756 				/*
7757 				 * Set the untagged-flag for the LUN
7758 				 * so that no more untagged commands
7759 				 * can be submitted for this LUN.
7760 				 */
7761 				lq->flags |= LQF_UNTAGGED_PENDING;
7762 			}
7763 
7764 			/* Count command as sent. */
7765 			lq->lun_outcnt++;
7766 		}
7767 
7768 		/* Remove srb from device queue. */
7769 		ql_remove_link(&lq->cmd, &sp->cmd);
7770 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7771 
7772 		tq->outcnt++;
7773 
7774 		ql_start_iocb(vha, sp);
7775 	}
7776 
7777 	/* Release device queue lock. */
7778 	DEVICE_QUEUE_UNLOCK(tq);
7779 
7780 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7781 }
7782 
7783 /*
7784  * ql_done
7785  *	Process completed commands.
7786  *
7787  * Input:
7788  *	link:	first command link in chain.
7789  *
7790  * Context:
7791  *	Interrupt or Kernel context, no mailbox commands allowed.
7792  */
7793 void
7794 ql_done(ql_link_t *link)
7795 {
7796 	ql_adapter_state_t	*ha;
7797 	ql_link_t		*next_link;
7798 	ql_srb_t		*sp;
7799 	ql_tgt_t		*tq;
7800 	ql_lun_t		*lq;
7801 
7802 	QL_PRINT_3(CE_CONT, "started\n");
7803 
7804 	for (; link != NULL; link = next_link) {
7805 		next_link = link->next;
7806 		sp = link->base_address;
7807 		ha = sp->ha;
7808 
7809 		if (sp->flags & SRB_UB_CALLBACK) {
7810 			QL_UB_LOCK(ha);
7811 			if (sp->flags & SRB_UB_IN_ISP) {
7812 				if (ha->ub_outcnt != 0) {
7813 					ha->ub_outcnt--;
7814 				}
7815 				QL_UB_UNLOCK(ha);
7816 				ql_isp_rcvbuf(ha);
7817 				QL_UB_LOCK(ha);
7818 			}
7819 			QL_UB_UNLOCK(ha);
7820 			ql_awaken_task_daemon(ha, sp, 0, 0);
7821 		} else {
7822 			/* Free outstanding command slot. */
7823 			if (sp->handle != 0) {
7824 				ha->outstanding_cmds[
7825 				    sp->handle & OSC_INDEX_MASK] = NULL;
7826 				sp->handle = 0;
7827 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7828 			}
7829 
7830 			/* Acquire device queue lock. */
7831 			lq = sp->lun_queue;
7832 			tq = lq->target_queue;
7833 			DEVICE_QUEUE_LOCK(tq);
7834 
7835 			/* Decrement outstanding commands on device. */
7836 			if (tq->outcnt != 0) {
7837 				tq->outcnt--;
7838 			}
7839 
7840 			if (sp->flags & SRB_FCP_CMD_PKT) {
7841 				if (sp->fcp->fcp_cntl.cntl_qtype ==
7842 				    FCP_QTYPE_UNTAGGED) {
7843 					/*
7844 					 * Clear the flag for this LUN so that
7845 					 * untagged commands can be submitted
7846 					 * for it.
7847 					 */
7848 					lq->flags &= ~LQF_UNTAGGED_PENDING;
7849 				}
7850 
7851 				if (lq->lun_outcnt != 0) {
7852 					lq->lun_outcnt--;
7853 				}
7854 			}
7855 
7856 			/* Reset port down retry count on good completion. */
7857 			if (sp->pkt->pkt_reason == CS_COMPLETE) {
7858 				tq->port_down_retry_count =
7859 				    ha->port_down_retry_count;
7860 				tq->qfull_retry_count = ha->qfull_retry_count;
7861 			}
7862 
7863 
7864 			/* Alter aborted status for fast timeout feature */
7865 			if (CFG_IST(ha, CFG_FAST_TIMEOUT) &&
7866 			    (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7867 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7868 			    sp->flags & SRB_RETRY &&
7869 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
7870 			    sp->wdg_q_time > 1)) {
7871 				EL(ha, "fast abort modify change\n");
7872 				sp->flags &= ~(SRB_RETRY);
7873 				sp->pkt->pkt_reason = CS_TIMEOUT;
7874 			}
7875 
7876 			/* Place request back on top of target command queue */
7877 			if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7878 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7879 			    sp->flags & SRB_RETRY &&
7880 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
7881 			    sp->wdg_q_time > 1)) {
7882 				sp->flags &= ~(SRB_ISP_STARTED |
7883 				    SRB_ISP_COMPLETED | SRB_RETRY);
7884 
7885 				/* Reset watchdog timer */
7886 				sp->wdg_q_time = sp->init_wdg_q_time;
7887 
7888 				/* Issue marker command on reset status. */
7889 				if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7890 				    (sp->pkt->pkt_reason == CS_RESET ||
7891 				    (CFG_IST(ha, CFG_CTRL_24258081) &&
7892 				    sp->pkt->pkt_reason == CS_ABORTED))) {
7893 					(void) ql_marker(ha, tq->loop_id, 0,
7894 					    MK_SYNC_ID);
7895 				}
7896 
7897 				ql_add_link_t(&lq->cmd, &sp->cmd);
7898 				sp->flags |= SRB_IN_DEVICE_QUEUE;
7899 				ql_next(ha, lq);
7900 			} else {
7901 				/* Remove command from watchdog queue. */
7902 				if (sp->flags & SRB_WATCHDOG_ENABLED) {
7903 					ql_remove_link(&tq->wdg, &sp->wdg);
7904 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
7905 				}
7906 
7907 				if (lq->cmd.first != NULL) {
7908 					ql_next(ha, lq);
7909 				} else {
7910 					/* Release LU queue specific lock. */
7911 					DEVICE_QUEUE_UNLOCK(tq);
7912 					if (ha->pha->pending_cmds.first !=
7913 					    NULL) {
7914 						ql_start_iocb(ha, NULL);
7915 					}
7916 				}
7917 
7918 				/* Sync buffers if required.  */
7919 				if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7920 					(void) ddi_dma_sync(
7921 					    sp->pkt->pkt_resp_dma,
7922 					    0, 0, DDI_DMA_SYNC_FORCPU);
7923 				}
7924 
7925 				/* Map ISP completion codes. */
7926 				sp->pkt->pkt_expln = FC_EXPLN_NONE;
7927 				sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7928 				switch (sp->pkt->pkt_reason) {
7929 				case CS_COMPLETE:
7930 					sp->pkt->pkt_state = FC_PKT_SUCCESS;
7931 					break;
7932 				case CS_RESET:
7933 					/* Issue marker command. */
7934 					if (!(ha->task_daemon_flags &
7935 					    LOOP_DOWN)) {
7936 						(void) ql_marker(ha,
7937 						    tq->loop_id, 0,
7938 						    MK_SYNC_ID);
7939 					}
7940 					sp->pkt->pkt_state =
7941 					    FC_PKT_PORT_OFFLINE;
7942 					sp->pkt->pkt_reason =
7943 					    FC_REASON_ABORTED;
7944 					break;
7945 				case CS_RESOUCE_UNAVAILABLE:
7946 					sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7947 					sp->pkt->pkt_reason =
7948 					    FC_REASON_PKT_BUSY;
7949 					break;
7950 
7951 				case CS_TIMEOUT:
7952 					sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7953 					sp->pkt->pkt_reason =
7954 					    FC_REASON_HW_ERROR;
7955 					break;
7956 				case CS_DATA_OVERRUN:
7957 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7958 					sp->pkt->pkt_reason =
7959 					    FC_REASON_OVERRUN;
7960 					break;
7961 				case CS_PORT_UNAVAILABLE:
7962 				case CS_PORT_LOGGED_OUT:
7963 					sp->pkt->pkt_state =
7964 					    FC_PKT_PORT_OFFLINE;
7965 					sp->pkt->pkt_reason =
7966 					    FC_REASON_LOGIN_REQUIRED;
7967 					ql_send_logo(ha, tq, NULL);
7968 					break;
7969 				case CS_PORT_CONFIG_CHG:
7970 					sp->pkt->pkt_state =
7971 					    FC_PKT_PORT_OFFLINE;
7972 					sp->pkt->pkt_reason =
7973 					    FC_REASON_OFFLINE;
7974 					break;
7975 				case CS_QUEUE_FULL:
7976 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7977 					sp->pkt->pkt_reason = FC_REASON_QFULL;
7978 					break;
7979 
7980 				case CS_ABORTED:
7981 					DEVICE_QUEUE_LOCK(tq);
7982 					if (tq->flags & (TQF_RSCN_RCVD |
7983 					    TQF_NEED_AUTHENTICATION)) {
7984 						sp->pkt->pkt_state =
7985 						    FC_PKT_PORT_OFFLINE;
7986 						sp->pkt->pkt_reason =
7987 						    FC_REASON_LOGIN_REQUIRED;
7988 					} else {
7989 						sp->pkt->pkt_state =
7990 						    FC_PKT_LOCAL_RJT;
7991 						sp->pkt->pkt_reason =
7992 						    FC_REASON_ABORTED;
7993 					}
7994 					DEVICE_QUEUE_UNLOCK(tq);
7995 					break;
7996 
7997 				case CS_TRANSPORT:
7998 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7999 					sp->pkt->pkt_reason =
8000 					    FC_PKT_TRAN_ERROR;
8001 					break;
8002 
8003 				case CS_DATA_UNDERRUN:
8004 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8005 					sp->pkt->pkt_reason =
8006 					    FC_REASON_UNDERRUN;
8007 					break;
8008 				case CS_DMA_ERROR:
8009 				case CS_BAD_PAYLOAD:
8010 				case CS_UNKNOWN:
8011 				case CS_CMD_FAILED:
8012 				default:
8013 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8014 					sp->pkt->pkt_reason =
8015 					    FC_REASON_HW_ERROR;
8016 					break;
8017 				}
8018 
8019 				/* Now call the pkt completion callback */
8020 				if (sp->flags & SRB_POLL) {
8021 					sp->flags &= ~SRB_POLL;
8022 				} else if (sp->pkt->pkt_comp) {
8023 					if (sp->pkt->pkt_tran_flags &
8024 					    FC_TRAN_IMMEDIATE_CB) {
8025 						(*sp->pkt->pkt_comp)(sp->pkt);
8026 					} else {
8027 						ql_awaken_task_daemon(ha, sp,
8028 						    0, 0);
8029 					}
8030 				}
8031 			}
8032 		}
8033 	}
8034 
8035 	QL_PRINT_3(CE_CONT, "done\n");
8036 }
8037 
8038 /*
8039  * ql_awaken_task_daemon
8040  *	Adds command completion callback to callback queue and/or
8041  *	awakens task daemon thread.
8042  *
8043  * Input:
8044  *	ha:		adapter state pointer.
8045  *	sp:		srb pointer.
8046  *	set_flags:	task daemon flags to set.
8047  *	reset_flags:	task daemon flags to reset.
8048  *
8049  * Context:
8050  *	Interrupt or Kernel context, no mailbox commands allowed.
8051  */
8052 void
8053 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
8054     uint32_t set_flags, uint32_t reset_flags)
8055 {
8056 	ql_adapter_state_t	*ha = vha->pha;
8057 
8058 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8059 
8060 	/* Acquire task daemon lock. */
8061 	TASK_DAEMON_LOCK(ha);
8062 
8063 	if (set_flags & ISP_ABORT_NEEDED) {
8064 		if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
8065 			set_flags &= ~ISP_ABORT_NEEDED;
8066 		}
8067 	}
8068 
8069 	ha->task_daemon_flags |= set_flags;
8070 	ha->task_daemon_flags &= ~reset_flags;
8071 
8072 	if (QL_DAEMON_SUSPENDED(ha)) {
8073 		if (sp != NULL) {
8074 			TASK_DAEMON_UNLOCK(ha);
8075 
8076 			/* Do callback. */
8077 			if (sp->flags & SRB_UB_CALLBACK) {
8078 				ql_unsol_callback(sp);
8079 			} else {
8080 				(*sp->pkt->pkt_comp)(sp->pkt);
8081 			}
8082 		} else {
8083 			if (!(curthread->t_flag & T_INTR_THREAD) &&
8084 			    !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
8085 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
8086 				ql_task_thread(ha);
8087 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
8088 			}
8089 
8090 			TASK_DAEMON_UNLOCK(ha);
8091 		}
8092 	} else {
8093 		if (sp != NULL) {
8094 			ql_add_link_b(&ha->callback_queue, &sp->cmd);
8095 		}
8096 
8097 		if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
8098 			cv_broadcast(&ha->cv_task_daemon);
8099 		}
8100 		TASK_DAEMON_UNLOCK(ha);
8101 	}
8102 
8103 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8104 }
8105 
8106 /*
8107  * ql_task_daemon
8108  *	Thread that is awaken by the driver when a
8109  *	background needs to be done.
8110  *
8111  * Input:
8112  *	arg = adapter state pointer.
8113  *
8114  * Context:
8115  *	Kernel context.
8116  */
8117 static void
8118 ql_task_daemon(void *arg)
8119 {
8120 	ql_adapter_state_t	*ha = (void *)arg;
8121 
8122 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8123 
8124 	CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
8125 	    "ql_task_daemon");
8126 
8127 	/* Acquire task daemon lock. */
8128 	TASK_DAEMON_LOCK(ha);
8129 
8130 	ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
8131 
8132 	while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
8133 		ql_task_thread(ha);
8134 
8135 		QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
8136 
8137 		/*
8138 		 * Before we wait on the conditional variable, we
8139 		 * need to check if STOP_FLG is set for us to terminate
8140 		 */
8141 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
8142 			break;
8143 		}
8144 
8145 		/*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
8146 		CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
8147 
8148 		ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
8149 
8150 		/* If killed, stop task daemon */
8151 		if (cv_wait_sig(&ha->cv_task_daemon,
8152 		    &ha->task_daemon_mutex) == 0) {
8153 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
8154 		}
8155 
8156 		ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
8157 
8158 		/*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
8159 		CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
8160 
8161 		QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
8162 	}
8163 
8164 	ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
8165 	    TASK_DAEMON_ALIVE_FLG);
8166 
8167 	/*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
8168 	CALLB_CPR_EXIT(&ha->cprinfo);
8169 
8170 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8171 
8172 	thread_exit();
8173 }
8174 
8175 /*
8176  * ql_task_thread
8177  *	Thread run by daemon.
8178  *
8179  * Input:
8180  *	ha = adapter state pointer.
8181  *	TASK_DAEMON_LOCK must be acquired prior to call.
8182  *
8183  * Context:
8184  *	Kernel context.
8185  */
8186 static void
8187 ql_task_thread(ql_adapter_state_t *ha)
8188 {
8189 	int			loop_again;
8190 	ql_srb_t		*sp;
8191 	ql_head_t		*head;
8192 	ql_link_t		*link;
8193 	caddr_t			msg;
8194 	ql_adapter_state_t	*vha;
8195 
8196 	do {
8197 		QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
8198 		    ha->instance, ha->task_daemon_flags);
8199 
8200 		loop_again = FALSE;
8201 
8202 		QL_PM_LOCK(ha);
8203 		if (ha->power_level != PM_LEVEL_D0) {
8204 			QL_PM_UNLOCK(ha);
8205 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8206 			break;
8207 		}
8208 		QL_PM_UNLOCK(ha);
8209 
8210 		/* IDC event. */
8211 		if (ha->task_daemon_flags & IDC_EVENT) {
8212 			ha->task_daemon_flags &= ~IDC_EVENT;
8213 			TASK_DAEMON_UNLOCK(ha);
8214 			ql_process_idc_event(ha);
8215 			TASK_DAEMON_LOCK(ha);
8216 			loop_again = TRUE;
8217 		}
8218 
8219 		if (ha->flags & ADAPTER_SUSPENDED || ha->task_daemon_flags &
8220 		    (TASK_DAEMON_STOP_FLG | DRIVER_STALL) ||
8221 		    (ha->flags & ONLINE) == 0) {
8222 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8223 			break;
8224 		}
8225 		ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8226 
8227 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8228 			TASK_DAEMON_UNLOCK(ha);
8229 			if (ha->log_parity_pause == B_TRUE) {
8230 				(void) ql_flash_errlog(ha,
8231 				    FLASH_ERRLOG_PARITY_ERR, 0,
8232 				    MSW(ha->parity_stat_err),
8233 				    LSW(ha->parity_stat_err));
8234 				ha->log_parity_pause = B_FALSE;
8235 			}
8236 			ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8237 			TASK_DAEMON_LOCK(ha);
8238 			loop_again = TRUE;
8239 		}
8240 
8241 		/* Idle Check. */
8242 		if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8243 			ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8244 			if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8245 				TASK_DAEMON_UNLOCK(ha);
8246 				ql_idle_check(ha);
8247 				TASK_DAEMON_LOCK(ha);
8248 				loop_again = TRUE;
8249 			}
8250 		}
8251 
8252 		/* Crystal+ port#0 bypass transition */
8253 		if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8254 			ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8255 			TASK_DAEMON_UNLOCK(ha);
8256 			(void) ql_initiate_lip(ha);
8257 			TASK_DAEMON_LOCK(ha);
8258 			loop_again = TRUE;
8259 		}
8260 
8261 		/* Abort queues needed. */
8262 		if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8263 			ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8264 			TASK_DAEMON_UNLOCK(ha);
8265 			ql_abort_queues(ha);
8266 			TASK_DAEMON_LOCK(ha);
8267 		}
8268 
8269 		/* Not suspended, awaken waiting routines. */
8270 		if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8271 		    ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8272 			ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8273 			cv_broadcast(&ha->cv_dr_suspended);
8274 			loop_again = TRUE;
8275 		}
8276 
8277 		/* Handle RSCN changes. */
8278 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8279 			if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8280 				vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8281 				TASK_DAEMON_UNLOCK(ha);
8282 				(void) ql_handle_rscn_update(vha);
8283 				TASK_DAEMON_LOCK(ha);
8284 				loop_again = TRUE;
8285 			}
8286 		}
8287 
8288 		/* Handle state changes. */
8289 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8290 			if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8291 			    !(ha->task_daemon_flags &
8292 			    TASK_DAEMON_POWERING_DOWN)) {
8293 				/* Report state change. */
8294 				EL(vha, "state change = %xh\n", vha->state);
8295 				vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8296 
8297 				if (vha->task_daemon_flags &
8298 				    COMMAND_WAIT_NEEDED) {
8299 					vha->task_daemon_flags &=
8300 					    ~COMMAND_WAIT_NEEDED;
8301 					if (!(ha->task_daemon_flags &
8302 					    COMMAND_WAIT_ACTIVE)) {
8303 						ha->task_daemon_flags |=
8304 						    COMMAND_WAIT_ACTIVE;
8305 						TASK_DAEMON_UNLOCK(ha);
8306 						ql_cmd_wait(ha);
8307 						TASK_DAEMON_LOCK(ha);
8308 						ha->task_daemon_flags &=
8309 						    ~COMMAND_WAIT_ACTIVE;
8310 					}
8311 				}
8312 
8313 				msg = NULL;
8314 				if (FC_PORT_STATE_MASK(vha->state) ==
8315 				    FC_STATE_OFFLINE) {
8316 					if (vha->task_daemon_flags &
8317 					    STATE_ONLINE) {
8318 						if (ha->topology &
8319 						    QL_LOOP_CONNECTION) {
8320 							msg = "Loop OFFLINE";
8321 						} else {
8322 							msg = "Link OFFLINE";
8323 						}
8324 					}
8325 					vha->task_daemon_flags &=
8326 					    ~STATE_ONLINE;
8327 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8328 				    FC_STATE_LOOP) {
8329 					if (!(vha->task_daemon_flags &
8330 					    STATE_ONLINE)) {
8331 						msg = "Loop ONLINE";
8332 					}
8333 					vha->task_daemon_flags |= STATE_ONLINE;
8334 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8335 				    FC_STATE_ONLINE) {
8336 					if (!(vha->task_daemon_flags &
8337 					    STATE_ONLINE)) {
8338 						msg = "Link ONLINE";
8339 					}
8340 					vha->task_daemon_flags |= STATE_ONLINE;
8341 				} else {
8342 					msg = "Unknown Link state";
8343 				}
8344 
8345 				if (msg != NULL) {
8346 					cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8347 					    "%s", QL_NAME, ha->instance,
8348 					    vha->vp_index, msg);
8349 				}
8350 
8351 				if (vha->flags & FCA_BOUND) {
8352 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8353 					    "cb state=%xh\n", ha->instance,
8354 					    vha->vp_index, vha->state);
8355 					TASK_DAEMON_UNLOCK(ha);
8356 					(vha->bind_info.port_statec_cb)
8357 					    (vha->bind_info.port_handle,
8358 					    vha->state);
8359 					TASK_DAEMON_LOCK(ha);
8360 				}
8361 				loop_again = TRUE;
8362 			}
8363 		}
8364 
8365 		if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8366 		    !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8367 			EL(ha, "processing LIP reset\n");
8368 			ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8369 			TASK_DAEMON_UNLOCK(ha);
8370 			for (vha = ha; vha != NULL; vha = vha->vp_next) {
8371 				if (vha->flags & FCA_BOUND) {
8372 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8373 					    "cb reset\n", ha->instance,
8374 					    vha->vp_index);
8375 					(vha->bind_info.port_statec_cb)
8376 					    (vha->bind_info.port_handle,
8377 					    FC_STATE_TARGET_PORT_RESET);
8378 				}
8379 			}
8380 			TASK_DAEMON_LOCK(ha);
8381 			loop_again = TRUE;
8382 		}
8383 
8384 		if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8385 		    FIRMWARE_UP)) {
8386 			/*
8387 			 * The firmware needs more unsolicited
8388 			 * buffers. We cannot allocate any new
8389 			 * buffers unless the ULP module requests
8390 			 * for new buffers. All we can do here is
8391 			 * to give received buffers from the pool
8392 			 * that is already allocated
8393 			 */
8394 			ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8395 			TASK_DAEMON_UNLOCK(ha);
8396 			ql_isp_rcvbuf(ha);
8397 			TASK_DAEMON_LOCK(ha);
8398 			loop_again = TRUE;
8399 		}
8400 
8401 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8402 			TASK_DAEMON_UNLOCK(ha);
8403 			(void) ql_abort_isp(ha);
8404 			TASK_DAEMON_LOCK(ha);
8405 			loop_again = TRUE;
8406 		}
8407 
8408 		if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8409 		    COMMAND_WAIT_NEEDED))) {
8410 			if (QL_IS_SET(ha->task_daemon_flags,
8411 			    RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8412 				ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8413 				if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8414 					ha->task_daemon_flags |= RESET_ACTIVE;
8415 					TASK_DAEMON_UNLOCK(ha);
8416 					for (vha = ha; vha != NULL;
8417 					    vha = vha->vp_next) {
8418 						ql_rst_aen(vha);
8419 					}
8420 					TASK_DAEMON_LOCK(ha);
8421 					ha->task_daemon_flags &= ~RESET_ACTIVE;
8422 					loop_again = TRUE;
8423 				}
8424 			}
8425 
8426 			if (QL_IS_SET(ha->task_daemon_flags,
8427 			    LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8428 				if (!(ha->task_daemon_flags &
8429 				    LOOP_RESYNC_ACTIVE)) {
8430 					ha->task_daemon_flags |=
8431 					    LOOP_RESYNC_ACTIVE;
8432 					TASK_DAEMON_UNLOCK(ha);
8433 					(void) ql_loop_resync(ha);
8434 					TASK_DAEMON_LOCK(ha);
8435 					loop_again = TRUE;
8436 				}
8437 			}
8438 		}
8439 
8440 		/* Port retry needed. */
8441 		if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8442 			ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8443 			ADAPTER_STATE_LOCK(ha);
8444 			ha->port_retry_timer = 0;
8445 			ADAPTER_STATE_UNLOCK(ha);
8446 
8447 			TASK_DAEMON_UNLOCK(ha);
8448 			ql_restart_queues(ha);
8449 			TASK_DAEMON_LOCK(ha);
8450 			loop_again = B_TRUE;
8451 		}
8452 
8453 		/* iiDMA setting needed? */
8454 		if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8455 			ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8456 
8457 			TASK_DAEMON_UNLOCK(ha);
8458 			ql_iidma(ha);
8459 			TASK_DAEMON_LOCK(ha);
8460 			loop_again = B_TRUE;
8461 		}
8462 
8463 		if (ha->task_daemon_flags & SEND_PLOGI) {
8464 			ha->task_daemon_flags &= ~SEND_PLOGI;
8465 			TASK_DAEMON_UNLOCK(ha);
8466 			(void) ql_n_port_plogi(ha);
8467 			TASK_DAEMON_LOCK(ha);
8468 		}
8469 
8470 		head = &ha->callback_queue;
8471 		if (head->first != NULL) {
8472 			sp = head->first->base_address;
8473 			link = &sp->cmd;
8474 
8475 			/* Dequeue command. */
8476 			ql_remove_link(head, link);
8477 
8478 			/* Release task daemon lock. */
8479 			TASK_DAEMON_UNLOCK(ha);
8480 
8481 			/* Do callback. */
8482 			if (sp->flags & SRB_UB_CALLBACK) {
8483 				ql_unsol_callback(sp);
8484 			} else {
8485 				(*sp->pkt->pkt_comp)(sp->pkt);
8486 			}
8487 
8488 			/* Acquire task daemon lock. */
8489 			TASK_DAEMON_LOCK(ha);
8490 
8491 			loop_again = TRUE;
8492 		}
8493 
8494 	} while (loop_again);
8495 }
8496 
8497 /*
8498  * ql_idle_check
8499  *	Test for adapter is alive and well.
8500  *
8501  * Input:
8502  *	ha:	adapter state pointer.
8503  *
8504  * Context:
8505  *	Kernel context.
8506  */
8507 static void
8508 ql_idle_check(ql_adapter_state_t *ha)
8509 {
8510 	ddi_devstate_t	state;
8511 	int		rval;
8512 	ql_mbx_data_t	mr;
8513 
8514 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8515 
8516 	/* Firmware Ready Test. */
8517 	rval = ql_get_firmware_state(ha, &mr);
8518 	if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8519 	    (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8520 		EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8521 		state = ddi_get_devstate(ha->dip);
8522 		if (state == DDI_DEVSTATE_UP) {
8523 			/*EMPTY*/
8524 			ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8525 			    DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8526 		}
8527 		TASK_DAEMON_LOCK(ha);
8528 		if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8529 			EL(ha, "fstate_ready, isp_abort_needed\n");
8530 			ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8531 		}
8532 		TASK_DAEMON_UNLOCK(ha);
8533 	}
8534 
8535 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8536 }
8537 
8538 /*
8539  * ql_unsol_callback
8540  *	Handle unsolicited buffer callbacks.
8541  *
8542  * Input:
8543  *	ha = adapter state pointer.
8544  *	sp = srb pointer.
8545  *
8546  * Context:
8547  *	Kernel context.
8548  */
8549 static void
8550 ql_unsol_callback(ql_srb_t *sp)
8551 {
8552 	fc_affected_id_t	*af;
8553 	fc_unsol_buf_t		*ubp;
8554 	uchar_t			r_ctl;
8555 	uchar_t			ls_code;
8556 	ql_tgt_t		*tq;
8557 	ql_adapter_state_t	*ha = sp->ha, *pha = sp->ha->pha;
8558 
8559 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8560 
8561 	ubp = ha->ub_array[sp->handle];
8562 	r_ctl = ubp->ub_frame.r_ctl;
8563 	ls_code = ubp->ub_buffer[0];
8564 
8565 	if (sp->lun_queue == NULL) {
8566 		tq = NULL;
8567 	} else {
8568 		tq = sp->lun_queue->target_queue;
8569 	}
8570 
8571 	QL_UB_LOCK(ha);
8572 	if (sp->flags & SRB_UB_FREE_REQUESTED ||
8573 	    pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8574 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8575 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8576 		sp->flags |= SRB_UB_IN_FCA;
8577 		QL_UB_UNLOCK(ha);
8578 		return;
8579 	}
8580 
8581 	/* Process RSCN */
8582 	if (sp->flags & SRB_UB_RSCN) {
8583 		int sendup = 1;
8584 
8585 		/*
8586 		 * Defer RSCN posting until commands return
8587 		 */
8588 		QL_UB_UNLOCK(ha);
8589 
8590 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8591 
8592 		/* Abort outstanding commands */
8593 		sendup = ql_process_rscn(ha, af);
8594 		if (sendup == 0) {
8595 
8596 			TASK_DAEMON_LOCK(ha);
8597 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8598 			TASK_DAEMON_UNLOCK(ha);
8599 
8600 			/*
8601 			 * Wait for commands to drain in F/W (doesn't take
8602 			 * more than a few milliseconds)
8603 			 */
8604 			ql_delay(ha, 10000);
8605 
8606 			QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8607 			    "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8608 			    af->aff_format, af->aff_d_id);
8609 			return;
8610 		}
8611 
8612 		QL_UB_LOCK(ha);
8613 
8614 		EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8615 		    af->aff_format, af->aff_d_id);
8616 	}
8617 
8618 	/* Process UNSOL LOGO */
8619 	if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8620 		QL_UB_UNLOCK(ha);
8621 
8622 		if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8623 			TASK_DAEMON_LOCK(ha);
8624 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8625 			TASK_DAEMON_UNLOCK(ha);
8626 			QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8627 			    "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8628 			return;
8629 		}
8630 
8631 		QL_UB_LOCK(ha);
8632 		EL(ha, "sending unsol logout for %xh to transport\n",
8633 		    ubp->ub_frame.s_id);
8634 	}
8635 
8636 	sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8637 	    SRB_UB_FCP);
8638 
8639 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8640 		(void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8641 		    ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8642 	}
8643 	QL_UB_UNLOCK(ha);
8644 
8645 	(ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8646 	    ubp, sp->ub_type);
8647 
8648 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8649 }
8650 
8651 /*
8652  * ql_send_logo
8653  *
8654  * Input:
8655  *	ha:	adapter state pointer.
8656  *	tq:	target queue pointer.
8657  *	done_q:	done queue pointer.
8658  *
8659  * Context:
8660  *	Interrupt or Kernel context, no mailbox commands allowed.
8661  */
8662 void
8663 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8664 {
8665 	fc_unsol_buf_t		*ubp;
8666 	ql_srb_t		*sp;
8667 	la_els_logo_t		*payload;
8668 	ql_adapter_state_t	*ha = vha->pha;
8669 
8670 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8671 	    tq->d_id.b24);
8672 
8673 	if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8674 		EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8675 		return;
8676 	}
8677 
8678 	if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8679 	    tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8680 
8681 		/* Locate a buffer to use. */
8682 		ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8683 		if (ubp == NULL) {
8684 			EL(vha, "Failed, get_unsolicited_buffer\n");
8685 			return;
8686 		}
8687 
8688 		DEVICE_QUEUE_LOCK(tq);
8689 		tq->flags |= TQF_NEED_AUTHENTICATION;
8690 		tq->logout_sent++;
8691 		DEVICE_QUEUE_UNLOCK(tq);
8692 
8693 		EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8694 
8695 		sp = ubp->ub_fca_private;
8696 
8697 		/* Set header. */
8698 		ubp->ub_frame.d_id = vha->d_id.b24;
8699 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8700 		ubp->ub_frame.s_id = tq->d_id.b24;
8701 		ubp->ub_frame.rsvd = 0;
8702 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8703 		    F_CTL_SEQ_INITIATIVE;
8704 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8705 		ubp->ub_frame.seq_cnt = 0;
8706 		ubp->ub_frame.df_ctl = 0;
8707 		ubp->ub_frame.seq_id = 0;
8708 		ubp->ub_frame.rx_id = 0xffff;
8709 		ubp->ub_frame.ox_id = 0xffff;
8710 
8711 		/* set payload. */
8712 		payload = (la_els_logo_t *)ubp->ub_buffer;
8713 		bzero(payload, sizeof (la_els_logo_t));
8714 		/* Make sure ls_code in payload is always big endian */
8715 		ubp->ub_buffer[0] = LA_ELS_LOGO;
8716 		ubp->ub_buffer[1] = 0;
8717 		ubp->ub_buffer[2] = 0;
8718 		ubp->ub_buffer[3] = 0;
8719 		bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8720 		    &payload->nport_ww_name.raw_wwn[0], 8);
8721 		payload->nport_id.port_id = tq->d_id.b24;
8722 
8723 		QL_UB_LOCK(ha);
8724 		sp->flags |= SRB_UB_CALLBACK;
8725 		QL_UB_UNLOCK(ha);
8726 		if (tq->lun_queues.first != NULL) {
8727 			sp->lun_queue = (tq->lun_queues.first)->base_address;
8728 		} else {
8729 			sp->lun_queue = ql_lun_queue(vha, tq, 0);
8730 		}
8731 		if (done_q) {
8732 			ql_add_link_b(done_q, &sp->cmd);
8733 		} else {
8734 			ql_awaken_task_daemon(ha, sp, 0, 0);
8735 		}
8736 	}
8737 
8738 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8739 }
8740 
8741 static int
8742 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8743 {
8744 	port_id_t	d_id;
8745 	ql_srb_t	*sp;
8746 	ql_link_t	*link;
8747 	int		sendup = 1;
8748 
8749 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8750 
8751 	DEVICE_QUEUE_LOCK(tq);
8752 	if (tq->outcnt) {
8753 		DEVICE_QUEUE_UNLOCK(tq);
8754 		sendup = 0;
8755 		(void) ql_abort_device(ha, tq, 1);
8756 		ql_delay(ha, 10000);
8757 	} else {
8758 		DEVICE_QUEUE_UNLOCK(tq);
8759 		TASK_DAEMON_LOCK(ha);
8760 
8761 		for (link = ha->pha->callback_queue.first; link != NULL;
8762 		    link = link->next) {
8763 			sp = link->base_address;
8764 			if (sp->flags & SRB_UB_CALLBACK) {
8765 				continue;
8766 			}
8767 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8768 
8769 			if (tq->d_id.b24 == d_id.b24) {
8770 				sendup = 0;
8771 				break;
8772 			}
8773 		}
8774 
8775 		TASK_DAEMON_UNLOCK(ha);
8776 	}
8777 
8778 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8779 
8780 	return (sendup);
8781 }
8782 
8783 static int
8784 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8785 {
8786 	fc_unsol_buf_t		*ubp;
8787 	ql_srb_t		*sp;
8788 	la_els_logi_t		*payload;
8789 	class_svc_param_t	*class3_param;
8790 
8791 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8792 
8793 	if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8794 	    LOOP_DOWN)) {
8795 		EL(ha, "Failed, tqf=%xh\n", tq->flags);
8796 		return (QL_FUNCTION_FAILED);
8797 	}
8798 
8799 	/* Locate a buffer to use. */
8800 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8801 	if (ubp == NULL) {
8802 		EL(ha, "Failed\n");
8803 		return (QL_FUNCTION_FAILED);
8804 	}
8805 
8806 	QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8807 	    ha->instance, tq->d_id.b24);
8808 
8809 	EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8810 
8811 	sp = ubp->ub_fca_private;
8812 
8813 	/* Set header. */
8814 	ubp->ub_frame.d_id = ha->d_id.b24;
8815 	ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8816 	ubp->ub_frame.s_id = tq->d_id.b24;
8817 	ubp->ub_frame.rsvd = 0;
8818 	ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8819 	    F_CTL_SEQ_INITIATIVE;
8820 	ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8821 	ubp->ub_frame.seq_cnt = 0;
8822 	ubp->ub_frame.df_ctl = 0;
8823 	ubp->ub_frame.seq_id = 0;
8824 	ubp->ub_frame.rx_id = 0xffff;
8825 	ubp->ub_frame.ox_id = 0xffff;
8826 
8827 	/* set payload. */
8828 	payload = (la_els_logi_t *)ubp->ub_buffer;
8829 	bzero(payload, sizeof (payload));
8830 
8831 	payload->ls_code.ls_code = LA_ELS_PLOGI;
8832 	payload->common_service.fcph_version = 0x2006;
8833 	payload->common_service.cmn_features = 0x8800;
8834 
8835 	CFG_IST(ha, CFG_CTRL_24258081) ?
8836 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8837 	    ha->init_ctrl_blk.cb24.max_frame_length[0],
8838 	    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8839 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8840 	    ha->init_ctrl_blk.cb.max_frame_length[0],
8841 	    ha->init_ctrl_blk.cb.max_frame_length[1]));
8842 
8843 	payload->common_service.conc_sequences = 0xff;
8844 	payload->common_service.relative_offset = 0x03;
8845 	payload->common_service.e_d_tov = 0x7d0;
8846 
8847 	bcopy((void *)&tq->port_name[0],
8848 	    (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8849 
8850 	bcopy((void *)&tq->node_name[0],
8851 	    (void *)&payload->node_ww_name.raw_wwn[0], 8);
8852 
8853 	class3_param = (class_svc_param_t *)&payload->class_3;
8854 	class3_param->class_valid_svc_opt = 0x8000;
8855 	class3_param->recipient_ctl = tq->class3_recipient_ctl;
8856 	class3_param->rcv_data_size = tq->class3_rcv_data_size;
8857 	class3_param->conc_sequences = tq->class3_conc_sequences;
8858 	class3_param->open_sequences_per_exch =
8859 	    tq->class3_open_sequences_per_exch;
8860 
8861 	QL_UB_LOCK(ha);
8862 	sp->flags |= SRB_UB_CALLBACK;
8863 	QL_UB_UNLOCK(ha);
8864 
8865 	ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8866 
8867 	if (done_q) {
8868 		ql_add_link_b(done_q, &sp->cmd);
8869 	} else {
8870 		ql_awaken_task_daemon(ha, sp, 0, 0);
8871 	}
8872 
8873 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8874 
8875 	return (QL_SUCCESS);
8876 }
8877 
8878 /*
8879  * Abort outstanding commands in the Firmware, clear internally
8880  * queued commands in the driver, Synchronize the target with
8881  * the Firmware
8882  */
8883 int
8884 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8885 {
8886 	ql_link_t	*link, *link2;
8887 	ql_lun_t	*lq;
8888 	int		rval = QL_SUCCESS;
8889 	ql_srb_t	*sp;
8890 	ql_head_t	done_q = { NULL, NULL };
8891 
8892 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8893 
8894 	/*
8895 	 * First clear, internally queued commands
8896 	 */
8897 	DEVICE_QUEUE_LOCK(tq);
8898 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8899 		lq = link->base_address;
8900 
8901 		link2 = lq->cmd.first;
8902 		while (link2 != NULL) {
8903 			sp = link2->base_address;
8904 			link2 = link2->next;
8905 
8906 			if (sp->flags & SRB_ABORT) {
8907 				continue;
8908 			}
8909 
8910 			/* Remove srb from device command queue. */
8911 			ql_remove_link(&lq->cmd, &sp->cmd);
8912 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8913 
8914 			/* Set ending status. */
8915 			sp->pkt->pkt_reason = CS_ABORTED;
8916 
8917 			/* Call done routine to handle completions. */
8918 			ql_add_link_b(&done_q, &sp->cmd);
8919 		}
8920 	}
8921 	DEVICE_QUEUE_UNLOCK(tq);
8922 
8923 	if (done_q.first != NULL) {
8924 		ql_done(done_q.first);
8925 	}
8926 
8927 	if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8928 		rval = ql_abort_target(ha, tq, 0);
8929 	}
8930 
8931 	if (rval != QL_SUCCESS) {
8932 		EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8933 	} else {
8934 		/*EMPTY*/
8935 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8936 		    ha->vp_index);
8937 	}
8938 
8939 	return (rval);
8940 }
8941 
8942 /*
8943  * ql_rcv_rscn_els
8944  *	Processes received RSCN extended link service.
8945  *
8946  * Input:
8947  *	ha:	adapter state pointer.
8948  *	mb:	array containing input mailbox registers.
8949  *	done_q:	done queue pointer.
8950  *
8951  * Context:
8952  *	Interrupt or Kernel context, no mailbox commands allowed.
8953  */
8954 void
8955 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8956 {
8957 	fc_unsol_buf_t		*ubp;
8958 	ql_srb_t		*sp;
8959 	fc_rscn_t		*rn;
8960 	fc_affected_id_t	*af;
8961 	port_id_t		d_id;
8962 
8963 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8964 
8965 	/* Locate a buffer to use. */
8966 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8967 	if (ubp != NULL) {
8968 		sp = ubp->ub_fca_private;
8969 
8970 		/* Set header. */
8971 		ubp->ub_frame.d_id = ha->d_id.b24;
8972 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8973 		ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8974 		ubp->ub_frame.rsvd = 0;
8975 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8976 		    F_CTL_SEQ_INITIATIVE;
8977 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8978 		ubp->ub_frame.seq_cnt = 0;
8979 		ubp->ub_frame.df_ctl = 0;
8980 		ubp->ub_frame.seq_id = 0;
8981 		ubp->ub_frame.rx_id = 0xffff;
8982 		ubp->ub_frame.ox_id = 0xffff;
8983 
8984 		/* set payload. */
8985 		rn = (fc_rscn_t *)ubp->ub_buffer;
8986 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8987 
8988 		rn->rscn_code = LA_ELS_RSCN;
8989 		rn->rscn_len = 4;
8990 		rn->rscn_payload_len = 8;
8991 		d_id.b.al_pa = LSB(mb[2]);
8992 		d_id.b.area = MSB(mb[2]);
8993 		d_id.b.domain =	LSB(mb[1]);
8994 		af->aff_d_id = d_id.b24;
8995 		af->aff_format = MSB(mb[1]);
8996 
8997 		EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8998 		    af->aff_d_id);
8999 
9000 		ql_update_rscn(ha, af);
9001 
9002 		QL_UB_LOCK(ha);
9003 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
9004 		QL_UB_UNLOCK(ha);
9005 		ql_add_link_b(done_q, &sp->cmd);
9006 	}
9007 
9008 	if (ubp == NULL) {
9009 		EL(ha, "Failed, get_unsolicited_buffer\n");
9010 	} else {
9011 		/*EMPTY*/
9012 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9013 	}
9014 }
9015 
9016 /*
9017  * ql_update_rscn
9018  *	Update devices from received RSCN.
9019  *
9020  * Input:
9021  *	ha:	adapter state pointer.
9022  *	af:	pointer to RSCN data.
9023  *
9024  * Context:
9025  *	Interrupt or Kernel context, no mailbox commands allowed.
9026  */
9027 static void
9028 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9029 {
9030 	ql_link_t	*link;
9031 	uint16_t	index;
9032 	ql_tgt_t	*tq;
9033 
9034 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9035 
9036 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9037 		port_id_t d_id;
9038 
9039 		d_id.r.rsvd_1 = 0;
9040 		d_id.b24 = af->aff_d_id;
9041 
9042 		tq = ql_d_id_to_queue(ha, d_id);
9043 		if (tq) {
9044 			EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
9045 			DEVICE_QUEUE_LOCK(tq);
9046 			tq->flags |= TQF_RSCN_RCVD;
9047 			DEVICE_QUEUE_UNLOCK(tq);
9048 		}
9049 		QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
9050 		    ha->instance);
9051 
9052 		return;
9053 	}
9054 
9055 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9056 		for (link = ha->dev[index].first; link != NULL;
9057 		    link = link->next) {
9058 			tq = link->base_address;
9059 
9060 			switch (af->aff_format) {
9061 			case FC_RSCN_FABRIC_ADDRESS:
9062 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9063 					EL(ha, "SD_RSCN_RCVD %xh RFA\n",
9064 					    tq->d_id.b24);
9065 					DEVICE_QUEUE_LOCK(tq);
9066 					tq->flags |= TQF_RSCN_RCVD;
9067 					DEVICE_QUEUE_UNLOCK(tq);
9068 				}
9069 				break;
9070 
9071 			case FC_RSCN_AREA_ADDRESS:
9072 				if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
9073 					EL(ha, "SD_RSCN_RCVD %xh RAA\n",
9074 					    tq->d_id.b24);
9075 					DEVICE_QUEUE_LOCK(tq);
9076 					tq->flags |= TQF_RSCN_RCVD;
9077 					DEVICE_QUEUE_UNLOCK(tq);
9078 				}
9079 				break;
9080 
9081 			case FC_RSCN_DOMAIN_ADDRESS:
9082 				if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
9083 					EL(ha, "SD_RSCN_RCVD %xh RDA\n",
9084 					    tq->d_id.b24);
9085 					DEVICE_QUEUE_LOCK(tq);
9086 					tq->flags |= TQF_RSCN_RCVD;
9087 					DEVICE_QUEUE_UNLOCK(tq);
9088 				}
9089 				break;
9090 
9091 			default:
9092 				break;
9093 			}
9094 		}
9095 	}
9096 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9097 }
9098 
9099 /*
9100  * ql_process_rscn
9101  *
9102  * Input:
9103  *	ha:	adapter state pointer.
9104  *	af:	RSCN payload pointer.
9105  *
9106  * Context:
9107  *	Kernel context.
9108  */
9109 static int
9110 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9111 {
9112 	int		sendit;
9113 	int		sendup = 1;
9114 	ql_link_t	*link;
9115 	uint16_t	index;
9116 	ql_tgt_t	*tq;
9117 
9118 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9119 
9120 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9121 		port_id_t d_id;
9122 
9123 		d_id.r.rsvd_1 = 0;
9124 		d_id.b24 = af->aff_d_id;
9125 
9126 		tq = ql_d_id_to_queue(ha, d_id);
9127 		if (tq) {
9128 			sendup = ql_process_rscn_for_device(ha, tq);
9129 		}
9130 
9131 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9132 
9133 		return (sendup);
9134 	}
9135 
9136 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9137 		for (link = ha->dev[index].first; link != NULL;
9138 		    link = link->next) {
9139 
9140 			tq = link->base_address;
9141 			if (tq == NULL) {
9142 				continue;
9143 			}
9144 
9145 			switch (af->aff_format) {
9146 			case FC_RSCN_FABRIC_ADDRESS:
9147 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9148 					sendit = ql_process_rscn_for_device(
9149 					    ha, tq);
9150 					if (sendup) {
9151 						sendup = sendit;
9152 					}
9153 				}
9154 				break;
9155 
9156 			case FC_RSCN_AREA_ADDRESS:
9157 				if ((tq->d_id.b24 & 0xffff00) ==
9158 				    af->aff_d_id) {
9159 					sendit = ql_process_rscn_for_device(
9160 					    ha, tq);
9161 
9162 					if (sendup) {
9163 						sendup = sendit;
9164 					}
9165 				}
9166 				break;
9167 
9168 			case FC_RSCN_DOMAIN_ADDRESS:
9169 				if ((tq->d_id.b24 & 0xff0000) ==
9170 				    af->aff_d_id) {
9171 					sendit = ql_process_rscn_for_device(
9172 					    ha, tq);
9173 
9174 					if (sendup) {
9175 						sendup = sendit;
9176 					}
9177 				}
9178 				break;
9179 
9180 			default:
9181 				break;
9182 			}
9183 		}
9184 	}
9185 
9186 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9187 
9188 	return (sendup);
9189 }
9190 
9191 /*
9192  * ql_process_rscn_for_device
9193  *
9194  * Input:
9195  *	ha:	adapter state pointer.
9196  *	tq:	target queue pointer.
9197  *
9198  * Context:
9199  *	Kernel context.
9200  */
9201 static int
9202 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9203 {
9204 	int sendup = 1;
9205 
9206 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9207 
9208 	DEVICE_QUEUE_LOCK(tq);
9209 
9210 	/*
9211 	 * Let FCP-2 compliant devices continue I/Os
9212 	 * with their low level recoveries.
9213 	 */
9214 	if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9215 	    (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9216 		/*
9217 		 * Cause ADISC to go out
9218 		 */
9219 		DEVICE_QUEUE_UNLOCK(tq);
9220 
9221 		(void) ql_get_port_database(ha, tq, PDF_NONE);
9222 
9223 		DEVICE_QUEUE_LOCK(tq);
9224 		tq->flags &= ~TQF_RSCN_RCVD;
9225 
9226 	} else if (tq->loop_id != PORT_NO_LOOP_ID) {
9227 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9228 			tq->flags |= TQF_NEED_AUTHENTICATION;
9229 		}
9230 
9231 		DEVICE_QUEUE_UNLOCK(tq);
9232 
9233 		(void) ql_abort_device(ha, tq, 1);
9234 
9235 		DEVICE_QUEUE_LOCK(tq);
9236 
9237 		if (tq->outcnt) {
9238 			sendup = 0;
9239 		} else {
9240 			tq->flags &= ~TQF_RSCN_RCVD;
9241 		}
9242 	} else {
9243 		tq->flags &= ~TQF_RSCN_RCVD;
9244 	}
9245 
9246 	if (sendup) {
9247 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9248 			tq->flags |= TQF_NEED_AUTHENTICATION;
9249 		}
9250 	}
9251 
9252 	DEVICE_QUEUE_UNLOCK(tq);
9253 
9254 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9255 
9256 	return (sendup);
9257 }
9258 
9259 static int
9260 ql_handle_rscn_update(ql_adapter_state_t *ha)
9261 {
9262 	int			rval;
9263 	ql_tgt_t		*tq;
9264 	uint16_t		index, loop_id;
9265 	ql_dev_id_list_t	*list;
9266 	uint32_t		list_size;
9267 	port_id_t		d_id;
9268 	ql_mbx_data_t		mr;
9269 	ql_head_t		done_q = { NULL, NULL };
9270 
9271 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9272 
9273 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9274 	list = kmem_zalloc(list_size, KM_SLEEP);
9275 	if (list == NULL) {
9276 		rval = QL_MEMORY_ALLOC_FAILED;
9277 		EL(ha, "kmem_zalloc failed=%xh\n", rval);
9278 		return (rval);
9279 	}
9280 
9281 	/*
9282 	 * Get data from RISC code d_id list to init each device queue.
9283 	 */
9284 	rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9285 	if (rval != QL_SUCCESS) {
9286 		kmem_free(list, list_size);
9287 		EL(ha, "get_id_list failed=%xh\n", rval);
9288 		return (rval);
9289 	}
9290 
9291 	/* Acquire adapter state lock. */
9292 	ADAPTER_STATE_LOCK(ha);
9293 
9294 	/* Check for new devices */
9295 	for (index = 0; index < mr.mb[1]; index++) {
9296 		ql_dev_list(ha, list, index, &d_id, &loop_id);
9297 
9298 		if (VALID_DEVICE_ID(ha, loop_id)) {
9299 			d_id.r.rsvd_1 = 0;
9300 
9301 			tq = ql_d_id_to_queue(ha, d_id);
9302 			if (tq != NULL) {
9303 				continue;
9304 			}
9305 
9306 			tq = ql_dev_init(ha, d_id, loop_id);
9307 
9308 			/* Test for fabric device. */
9309 			if (d_id.b.domain != ha->d_id.b.domain ||
9310 			    d_id.b.area != ha->d_id.b.area) {
9311 				tq->flags |= TQF_FABRIC_DEVICE;
9312 			}
9313 
9314 			ADAPTER_STATE_UNLOCK(ha);
9315 			if (ql_get_port_database(ha, tq, PDF_NONE) !=
9316 			    QL_SUCCESS) {
9317 				tq->loop_id = PORT_NO_LOOP_ID;
9318 			}
9319 			ADAPTER_STATE_LOCK(ha);
9320 
9321 			/*
9322 			 * Send up a PLOGI about the new device
9323 			 */
9324 			if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9325 				(void) ql_send_plogi(ha, tq, &done_q);
9326 			}
9327 		}
9328 	}
9329 
9330 	/* Release adapter state lock. */
9331 	ADAPTER_STATE_UNLOCK(ha);
9332 
9333 	if (done_q.first != NULL) {
9334 		ql_done(done_q.first);
9335 	}
9336 
9337 	kmem_free(list, list_size);
9338 
9339 	if (rval != QL_SUCCESS) {
9340 		EL(ha, "failed=%xh\n", rval);
9341 	} else {
9342 		/*EMPTY*/
9343 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9344 	}
9345 
9346 	return (rval);
9347 }
9348 
9349 /*
9350  * ql_free_unsolicited_buffer
9351  *	Frees allocated buffer.
9352  *
9353  * Input:
9354  *	ha = adapter state pointer.
9355  *	index = buffer array index.
9356  *	ADAPTER_STATE_LOCK must be already obtained.
9357  *
9358  * Context:
9359  *	Kernel context.
9360  */
9361 static void
9362 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9363 {
9364 	ql_srb_t	*sp;
9365 	int		status;
9366 
9367 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9368 
9369 	sp = ubp->ub_fca_private;
9370 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9371 		/* Disconnect IP from system buffers. */
9372 		if (ha->flags & IP_INITIALIZED) {
9373 			ADAPTER_STATE_UNLOCK(ha);
9374 			status = ql_shutdown_ip(ha);
9375 			ADAPTER_STATE_LOCK(ha);
9376 			if (status != QL_SUCCESS) {
9377 				cmn_err(CE_WARN,
9378 				    "!Qlogic %s(%d): Failed to shutdown IP",
9379 				    QL_NAME, ha->instance);
9380 				return;
9381 			}
9382 
9383 			ha->flags &= ~IP_ENABLED;
9384 		}
9385 
9386 		ql_free_phys(ha, &sp->ub_buffer);
9387 	} else {
9388 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9389 	}
9390 
9391 	kmem_free(sp, sizeof (ql_srb_t));
9392 	kmem_free(ubp, sizeof (fc_unsol_buf_t));
9393 
9394 	if (ha->ub_allocated != 0) {
9395 		ha->ub_allocated--;
9396 	}
9397 
9398 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9399 }
9400 
9401 /*
9402  * ql_get_unsolicited_buffer
9403  *	Locates a free unsolicited buffer.
9404  *
9405  * Input:
9406  *	ha = adapter state pointer.
9407  *	type = buffer type.
9408  *
9409  * Returns:
9410  *	Unsolicited buffer pointer.
9411  *
9412  * Context:
9413  *	Interrupt or Kernel context, no mailbox commands allowed.
9414  */
9415 fc_unsol_buf_t *
9416 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9417 {
9418 	fc_unsol_buf_t	*ubp;
9419 	ql_srb_t	*sp;
9420 	uint16_t	index;
9421 
9422 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9423 
9424 	/* Locate a buffer to use. */
9425 	ubp = NULL;
9426 
9427 	QL_UB_LOCK(ha);
9428 	for (index = 0; index < QL_UB_LIMIT; index++) {
9429 		ubp = ha->ub_array[index];
9430 		if (ubp != NULL) {
9431 			sp = ubp->ub_fca_private;
9432 			if ((sp->ub_type == type) &&
9433 			    (sp->flags & SRB_UB_IN_FCA) &&
9434 			    (!(sp->flags & (SRB_UB_CALLBACK |
9435 			    SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9436 				sp->flags |= SRB_UB_ACQUIRED;
9437 				ubp->ub_resp_flags = 0;
9438 				break;
9439 			}
9440 			ubp = NULL;
9441 		}
9442 	}
9443 	QL_UB_UNLOCK(ha);
9444 
9445 	if (ubp) {
9446 		ubp->ub_resp_token = NULL;
9447 		ubp->ub_class = FC_TRAN_CLASS3;
9448 	}
9449 
9450 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9451 
9452 	return (ubp);
9453 }
9454 
9455 /*
9456  * ql_ub_frame_hdr
9457  *	Processes received unsolicited buffers from ISP.
9458  *
9459  * Input:
9460  *	ha:	adapter state pointer.
9461  *	tq:	target queue pointer.
9462  *	index:	unsolicited buffer array index.
9463  *	done_q:	done queue pointer.
9464  *
9465  * Returns:
9466  *	ql local function return status code.
9467  *
9468  * Context:
9469  *	Interrupt or Kernel context, no mailbox commands allowed.
9470  */
9471 int
9472 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9473     ql_head_t *done_q)
9474 {
9475 	fc_unsol_buf_t	*ubp;
9476 	ql_srb_t	*sp;
9477 	uint16_t	loop_id;
9478 	int		rval = QL_FUNCTION_FAILED;
9479 
9480 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9481 
9482 	QL_UB_LOCK(ha);
9483 	if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9484 		EL(ha, "Invalid buffer index=%xh\n", index);
9485 		QL_UB_UNLOCK(ha);
9486 		return (rval);
9487 	}
9488 
9489 	sp = ubp->ub_fca_private;
9490 	if (sp->flags & SRB_UB_FREE_REQUESTED) {
9491 		EL(ha, "buffer freed index=%xh\n", index);
9492 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9493 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9494 
9495 		sp->flags |= SRB_UB_IN_FCA;
9496 
9497 		QL_UB_UNLOCK(ha);
9498 		return (rval);
9499 	}
9500 
9501 	if ((sp->handle == index) &&
9502 	    (sp->flags & SRB_UB_IN_ISP) &&
9503 	    (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9504 	    (!(sp->flags & SRB_UB_ACQUIRED))) {
9505 		/* set broadcast D_ID */
9506 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
9507 		    BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9508 		if (tq->ub_loop_id == loop_id) {
9509 			if (ha->topology & QL_FL_PORT) {
9510 				ubp->ub_frame.d_id = 0x000000;
9511 			} else {
9512 				ubp->ub_frame.d_id = 0xffffff;
9513 			}
9514 		} else {
9515 			ubp->ub_frame.d_id = ha->d_id.b24;
9516 		}
9517 		ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9518 		ubp->ub_frame.rsvd = 0;
9519 		ubp->ub_frame.s_id = tq->d_id.b24;
9520 		ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9521 		ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9522 		ubp->ub_frame.df_ctl = 0;
9523 		ubp->ub_frame.seq_id = tq->ub_seq_id;
9524 		ubp->ub_frame.rx_id = 0xffff;
9525 		ubp->ub_frame.ox_id = 0xffff;
9526 		ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9527 		    sp->ub_size : tq->ub_sequence_length;
9528 		ubp->ub_frame.ro = tq->ub_frame_ro;
9529 
9530 		tq->ub_sequence_length = (uint16_t)
9531 		    (tq->ub_sequence_length - ubp->ub_bufsize);
9532 		tq->ub_frame_ro += ubp->ub_bufsize;
9533 		tq->ub_seq_cnt++;
9534 
9535 		if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9536 			if (tq->ub_seq_cnt == 1) {
9537 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9538 				    F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9539 			} else {
9540 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9541 				    F_CTL_END_SEQ;
9542 			}
9543 			tq->ub_total_seg_cnt = 0;
9544 		} else if (tq->ub_seq_cnt == 1) {
9545 			ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9546 			    F_CTL_FIRST_SEQ;
9547 			ubp->ub_frame.df_ctl = 0x20;
9548 		}
9549 
9550 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9551 		    ha->instance, ubp->ub_frame.d_id);
9552 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9553 		    ha->instance, ubp->ub_frame.s_id);
9554 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9555 		    ha->instance, ubp->ub_frame.seq_cnt);
9556 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9557 		    ha->instance, ubp->ub_frame.seq_id);
9558 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9559 		    ha->instance, ubp->ub_frame.ro);
9560 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9561 		    ha->instance, ubp->ub_frame.f_ctl);
9562 		QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9563 		    ha->instance, ubp->ub_bufsize);
9564 		QL_DUMP_3(ubp->ub_buffer, 8,
9565 		    ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9566 
9567 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9568 		ql_add_link_b(done_q, &sp->cmd);
9569 		rval = QL_SUCCESS;
9570 	} else {
9571 		if (sp->handle != index) {
9572 			EL(ha, "Bad index=%xh, expect=%xh\n", index,
9573 			    sp->handle);
9574 		}
9575 		if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9576 			EL(ha, "buffer was already in driver, index=%xh\n",
9577 			    index);
9578 		}
9579 		if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9580 			EL(ha, "buffer was not an IP buffer, index=%xh\n",
9581 			    index);
9582 		}
9583 		if (sp->flags & SRB_UB_ACQUIRED) {
9584 			EL(ha, "buffer was being used by driver, index=%xh\n",
9585 			    index);
9586 		}
9587 	}
9588 	QL_UB_UNLOCK(ha);
9589 
9590 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9591 
9592 	return (rval);
9593 }
9594 
9595 /*
9596  * ql_timer
9597  *	One second timer function.
9598  *
9599  * Input:
9600  *	ql_hba.first = first link in adapter list.
9601  *
9602  * Context:
9603  *	Interrupt context, no mailbox commands allowed.
9604  */
9605 static void
9606 ql_timer(void *arg)
9607 {
9608 	ql_link_t		*link;
9609 	uint32_t		set_flags;
9610 	uint32_t		reset_flags;
9611 	ql_adapter_state_t	*ha = NULL, *vha;
9612 
9613 	QL_PRINT_6(CE_CONT, "started\n");
9614 
9615 	/* Acquire global state lock. */
9616 	GLOBAL_STATE_LOCK();
9617 	if (ql_timer_timeout_id == NULL) {
9618 		/* Release global state lock. */
9619 		GLOBAL_STATE_UNLOCK();
9620 		return;
9621 	}
9622 
9623 	for (link = ql_hba.first; link != NULL; link = link->next) {
9624 		ha = link->base_address;
9625 
9626 		/* Skip adapter if suspended of stalled. */
9627 		ADAPTER_STATE_LOCK(ha);
9628 		if (ha->flags & ADAPTER_SUSPENDED ||
9629 		    ha->task_daemon_flags & DRIVER_STALL) {
9630 			ADAPTER_STATE_UNLOCK(ha);
9631 			continue;
9632 		}
9633 		ha->flags |= ADAPTER_TIMER_BUSY;
9634 		ADAPTER_STATE_UNLOCK(ha);
9635 
9636 		QL_PM_LOCK(ha);
9637 		if (ha->power_level != PM_LEVEL_D0) {
9638 			QL_PM_UNLOCK(ha);
9639 
9640 			ADAPTER_STATE_LOCK(ha);
9641 			ha->flags &= ~ADAPTER_TIMER_BUSY;
9642 			ADAPTER_STATE_UNLOCK(ha);
9643 			continue;
9644 		}
9645 		ha->busy++;
9646 		QL_PM_UNLOCK(ha);
9647 
9648 		set_flags = 0;
9649 		reset_flags = 0;
9650 
9651 		/* Port retry timer handler. */
9652 		if (LOOP_READY(ha)) {
9653 			ADAPTER_STATE_LOCK(ha);
9654 			if (ha->port_retry_timer != 0) {
9655 				ha->port_retry_timer--;
9656 				if (ha->port_retry_timer == 0) {
9657 					set_flags |= PORT_RETRY_NEEDED;
9658 				}
9659 			}
9660 			ADAPTER_STATE_UNLOCK(ha);
9661 		}
9662 
9663 		/* Loop down timer handler. */
9664 		if (LOOP_RECONFIGURE(ha) == 0) {
9665 			if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9666 				ha->loop_down_timer--;
9667 				/*
9668 				 * give the firmware loop down dump flag
9669 				 * a chance to work.
9670 				 */
9671 				if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9672 					if (CFG_IST(ha,
9673 					    CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9674 						(void) ql_binary_fw_dump(ha,
9675 						    TRUE);
9676 					}
9677 					EL(ha, "loop_down_reset, "
9678 					    "isp_abort_needed\n");
9679 					set_flags |= ISP_ABORT_NEEDED;
9680 				}
9681 			}
9682 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9683 				/* Command abort time handler. */
9684 				if (ha->loop_down_timer ==
9685 				    ha->loop_down_abort_time) {
9686 					ADAPTER_STATE_LOCK(ha);
9687 					ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9688 					ADAPTER_STATE_UNLOCK(ha);
9689 					set_flags |= ABORT_QUEUES_NEEDED;
9690 					EL(ha, "loop_down_abort_time, "
9691 					    "abort_queues_needed\n");
9692 				}
9693 
9694 				/* Watchdog timer handler. */
9695 				if (ha->watchdog_timer == 0) {
9696 					ha->watchdog_timer = WATCHDOG_TIME;
9697 				} else if (LOOP_READY(ha)) {
9698 					ha->watchdog_timer--;
9699 					if (ha->watchdog_timer == 0) {
9700 						for (vha = ha; vha != NULL;
9701 						    vha = vha->vp_next) {
9702 							ql_watchdog(vha,
9703 							    &set_flags,
9704 							    &reset_flags);
9705 						}
9706 						ha->watchdog_timer =
9707 						    WATCHDOG_TIME;
9708 					}
9709 				}
9710 			}
9711 		}
9712 
9713 		/* Idle timer handler. */
9714 		if (!DRIVER_SUSPENDED(ha)) {
9715 			if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9716 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9717 				set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9718 #endif
9719 				ha->idle_timer = 0;
9720 			}
9721 			if (ha->send_plogi_timer != NULL) {
9722 				ha->send_plogi_timer--;
9723 				if (ha->send_plogi_timer == NULL) {
9724 					set_flags |= SEND_PLOGI;
9725 				}
9726 			}
9727 		}
9728 		ADAPTER_STATE_LOCK(ha);
9729 		if (ha->idc_restart_timer != 0) {
9730 			ha->idc_restart_timer--;
9731 			if (ha->idc_restart_timer == 0) {
9732 				ha->idc_restart_cnt = 0;
9733 				reset_flags |= DRIVER_STALL;
9734 			}
9735 		}
9736 		if (ha->idc_flash_acc_timer != 0) {
9737 			ha->idc_flash_acc_timer--;
9738 			if (ha->idc_flash_acc_timer == 0 &&
9739 			    ha->idc_flash_acc != 0) {
9740 				ha->idc_flash_acc = 1;
9741 				ha->idc_mb[0] = MBA_IDC_NOTIFICATION;
9742 				ha->idc_mb[1] = 0;
9743 				ha->idc_mb[2] = IDC_OPC_DRV_START;
9744 				set_flags |= IDC_EVENT;
9745 			}
9746 		}
9747 		ADAPTER_STATE_UNLOCK(ha);
9748 
9749 		if (set_flags != 0 || reset_flags != 0) {
9750 			ql_awaken_task_daemon(ha, NULL, set_flags,
9751 			    reset_flags);
9752 		}
9753 
9754 		if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9755 			ql_blink_led(ha);
9756 		}
9757 
9758 		/* Update the IO stats */
9759 		if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9760 			ha->xioctl->IOInputMByteCnt +=
9761 			    (ha->xioctl->IOInputByteCnt / 0x100000);
9762 			ha->xioctl->IOInputByteCnt %= 0x100000;
9763 		}
9764 
9765 		if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9766 			ha->xioctl->IOOutputMByteCnt +=
9767 			    (ha->xioctl->IOOutputByteCnt / 0x100000);
9768 			ha->xioctl->IOOutputByteCnt %= 0x100000;
9769 		}
9770 
9771 		if (CFG_IST(ha, CFG_CTRL_8021)) {
9772 			(void) ql_8021_idc_handler(ha);
9773 		}
9774 
9775 		ADAPTER_STATE_LOCK(ha);
9776 		ha->flags &= ~ADAPTER_TIMER_BUSY;
9777 		ADAPTER_STATE_UNLOCK(ha);
9778 
9779 		QL_PM_LOCK(ha);
9780 		ha->busy--;
9781 		QL_PM_UNLOCK(ha);
9782 	}
9783 
9784 	/* Restart timer, if not being stopped. */
9785 	if (ql_timer_timeout_id != NULL) {
9786 		ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9787 	}
9788 
9789 	/* Release global state lock. */
9790 	GLOBAL_STATE_UNLOCK();
9791 
9792 	QL_PRINT_6(CE_CONT, "done\n");
9793 }
9794 
9795 /*
9796  * ql_timeout_insert
9797  *	Function used to insert a command block onto the
9798  *	watchdog timer queue.
9799  *
9800  *	Note: Must insure that pkt_time is not zero
9801  *			before calling ql_timeout_insert.
9802  *
9803  * Input:
9804  *	ha:	adapter state pointer.
9805  *	tq:	target queue pointer.
9806  *	sp:	SRB pointer.
9807  *	DEVICE_QUEUE_LOCK must be already obtained.
9808  *
9809  * Context:
9810  *	Kernel context.
9811  */
9812 /* ARGSUSED */
9813 static void
9814 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9815 {
9816 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9817 
9818 	if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9819 		sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
9820 		/*
9821 		 * The WATCHDOG_TIME must be rounded up + 1.  As an example,
9822 		 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9823 		 * will expire in the next watchdog call, which could be in
9824 		 * 1 microsecond.
9825 		 *
9826 		 */
9827 		sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9828 		    WATCHDOG_TIME;
9829 		/*
9830 		 * Added an additional 10 to account for the
9831 		 * firmware timer drift which can occur with
9832 		 * very long timeout values.
9833 		 */
9834 		sp->wdg_q_time += 10;
9835 
9836 		/*
9837 		 * Add 6 more to insure watchdog does not timeout at the same
9838 		 * time as ISP RISC code timeout.
9839 		 */
9840 		sp->wdg_q_time += 6;
9841 
9842 		/* Save initial time for resetting watchdog time. */
9843 		sp->init_wdg_q_time = sp->wdg_q_time;
9844 
9845 		/* Insert command onto watchdog queue. */
9846 		ql_add_link_b(&tq->wdg, &sp->wdg);
9847 
9848 		sp->flags |= SRB_WATCHDOG_ENABLED;
9849 	} else {
9850 		sp->isp_timeout = 0;
9851 		sp->wdg_q_time = 0;
9852 		sp->init_wdg_q_time = 0;
9853 	}
9854 
9855 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9856 }
9857 
9858 /*
9859  * ql_watchdog
9860  *	Timeout handler that runs in interrupt context. The
9861  *	ql_adapter_state_t * argument is the parameter set up when the
9862  *	timeout was initialized (state structure pointer).
9863  *	Function used to update timeout values and if timeout
9864  *	has occurred command will be aborted.
9865  *
9866  * Input:
9867  *	ha:		adapter state pointer.
9868  *	set_flags:	task daemon flags to set.
9869  *	reset_flags:	task daemon flags to reset.
9870  *
9871  * Context:
9872  *	Interrupt context, no mailbox commands allowed.
9873  */
9874 static void
9875 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9876 {
9877 	ql_srb_t	*sp;
9878 	ql_link_t	*link;
9879 	ql_link_t	*next_cmd;
9880 	ql_link_t	*next_device;
9881 	ql_tgt_t	*tq;
9882 	ql_lun_t	*lq;
9883 	uint16_t	index;
9884 	int		q_sane;
9885 
9886 	QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9887 
9888 	/* Loop through all targets. */
9889 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9890 		for (link = ha->dev[index].first; link != NULL;
9891 		    link = next_device) {
9892 			tq = link->base_address;
9893 
9894 			/* Try to acquire device queue lock. */
9895 			if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9896 				next_device = NULL;
9897 				continue;
9898 			}
9899 
9900 			next_device = link->next;
9901 
9902 			if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9903 			    (tq->port_down_retry_count == 0)) {
9904 				/* Release device queue lock. */
9905 				DEVICE_QUEUE_UNLOCK(tq);
9906 				continue;
9907 			}
9908 
9909 			/* Find out if this device is in a sane state. */
9910 			if (tq->flags & (TQF_RSCN_RCVD |
9911 			    TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9912 				q_sane = 0;
9913 			} else {
9914 				q_sane = 1;
9915 			}
9916 			/* Loop through commands on watchdog queue. */
9917 			for (link = tq->wdg.first; link != NULL;
9918 			    link = next_cmd) {
9919 				next_cmd = link->next;
9920 				sp = link->base_address;
9921 				lq = sp->lun_queue;
9922 
9923 				/*
9924 				 * For SCSI commands, if everything seems to
9925 				 * be going fine and this packet is stuck
9926 				 * because of throttling at LUN or target
9927 				 * level then do not decrement the
9928 				 * sp->wdg_q_time
9929 				 */
9930 				if (ha->task_daemon_flags & STATE_ONLINE &&
9931 				    (sp->flags & SRB_ISP_STARTED) == 0 &&
9932 				    q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9933 				    lq->lun_outcnt >= ha->execution_throttle) {
9934 					continue;
9935 				}
9936 
9937 				if (sp->wdg_q_time != 0) {
9938 					sp->wdg_q_time--;
9939 
9940 					/* Timeout? */
9941 					if (sp->wdg_q_time != 0) {
9942 						continue;
9943 					}
9944 
9945 					ql_remove_link(&tq->wdg, &sp->wdg);
9946 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
9947 
9948 					if (sp->flags & SRB_ISP_STARTED) {
9949 						ql_cmd_timeout(ha, tq, sp,
9950 						    set_flags, reset_flags);
9951 
9952 						DEVICE_QUEUE_UNLOCK(tq);
9953 						tq = NULL;
9954 						next_cmd = NULL;
9955 						next_device = NULL;
9956 						index = DEVICE_HEAD_LIST_SIZE;
9957 					} else {
9958 						ql_cmd_timeout(ha, tq, sp,
9959 						    set_flags, reset_flags);
9960 					}
9961 				}
9962 			}
9963 
9964 			/* Release device queue lock. */
9965 			if (tq != NULL) {
9966 				DEVICE_QUEUE_UNLOCK(tq);
9967 			}
9968 		}
9969 	}
9970 
9971 	QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9972 }
9973 
9974 /*
9975  * ql_cmd_timeout
9976  *	Command timeout handler.
9977  *
9978  * Input:
9979  *	ha:		adapter state pointer.
9980  *	tq:		target queue pointer.
9981  *	sp:		SRB pointer.
9982  *	set_flags:	task daemon flags to set.
9983  *	reset_flags:	task daemon flags to reset.
9984  *
9985  * Context:
9986  *	Interrupt context, no mailbox commands allowed.
9987  */
9988 /* ARGSUSED */
9989 static void
9990 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9991     uint32_t *set_flags, uint32_t *reset_flags)
9992 {
9993 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9994 
9995 	if (!(sp->flags & SRB_ISP_STARTED)) {
9996 
9997 		EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9998 
9999 		REQUEST_RING_LOCK(ha);
10000 
10001 		/* if it's on a queue */
10002 		if (sp->cmd.head) {
10003 			/*
10004 			 * The pending_cmds que needs to be
10005 			 * protected by the ring lock
10006 			 */
10007 			ql_remove_link(sp->cmd.head, &sp->cmd);
10008 		}
10009 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10010 
10011 		/* Release device queue lock. */
10012 		REQUEST_RING_UNLOCK(ha);
10013 		DEVICE_QUEUE_UNLOCK(tq);
10014 
10015 		/* Set timeout status */
10016 		sp->pkt->pkt_reason = CS_TIMEOUT;
10017 
10018 		/* Ensure no retry */
10019 		sp->flags &= ~SRB_RETRY;
10020 
10021 		/* Call done routine to handle completion. */
10022 		ql_done(&sp->cmd);
10023 
10024 		DEVICE_QUEUE_LOCK(tq);
10025 	} else if (CFG_IST(ha, CFG_CTRL_8021)) {
10026 		int		rval;
10027 		uint32_t	index;
10028 
10029 		EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10030 		    "spf=%xh\n", (void *)sp,
10031 		    (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10032 		    sp->handle & OSC_INDEX_MASK, sp->flags);
10033 
10034 		DEVICE_QUEUE_UNLOCK(tq);
10035 
10036 		INTR_LOCK(ha);
10037 		ha->pha->xioctl->ControllerErrorCount++;
10038 		if (sp->handle) {
10039 			ha->pha->timeout_cnt++;
10040 			index = sp->handle & OSC_INDEX_MASK;
10041 			if (ha->pha->outstanding_cmds[index] == sp) {
10042 				sp->request_ring_ptr->entry_type =
10043 				    INVALID_ENTRY_TYPE;
10044 				sp->request_ring_ptr->entry_count = 0;
10045 				ha->pha->outstanding_cmds[index] = 0;
10046 			}
10047 			INTR_UNLOCK(ha);
10048 
10049 			rval = ql_abort_command(ha, sp);
10050 			if (rval == QL_FUNCTION_TIMEOUT ||
10051 			    rval == QL_LOCK_TIMEOUT ||
10052 			    rval == QL_FUNCTION_PARAMETER_ERROR ||
10053 			    ha->pha->timeout_cnt > TIMEOUT_THRESHOLD) {
10054 				*set_flags |= ISP_ABORT_NEEDED;
10055 				EL(ha, "abort status=%xh, tc=%xh, isp_abort_"
10056 				    "needed\n", rval, ha->pha->timeout_cnt);
10057 			}
10058 
10059 			sp->handle = 0;
10060 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10061 		} else {
10062 			INTR_UNLOCK(ha);
10063 		}
10064 
10065 		/* Set timeout status */
10066 		sp->pkt->pkt_reason = CS_TIMEOUT;
10067 
10068 		/* Ensure no retry */
10069 		sp->flags &= ~SRB_RETRY;
10070 
10071 		/* Call done routine to handle completion. */
10072 		ql_done(&sp->cmd);
10073 
10074 		DEVICE_QUEUE_LOCK(tq);
10075 
10076 	} else {
10077 		EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10078 		    "spf=%xh, isp_abort_needed\n", (void *)sp,
10079 		    (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10080 		    sp->handle & OSC_INDEX_MASK, sp->flags);
10081 
10082 		/* Release device queue lock. */
10083 		DEVICE_QUEUE_UNLOCK(tq);
10084 
10085 		INTR_LOCK(ha);
10086 		ha->pha->xioctl->ControllerErrorCount++;
10087 		INTR_UNLOCK(ha);
10088 
10089 		/* Set ISP needs to be reset */
10090 		sp->flags |= SRB_COMMAND_TIMEOUT;
10091 
10092 		if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
10093 			(void) ql_binary_fw_dump(ha, TRUE);
10094 		}
10095 
10096 		*set_flags |= ISP_ABORT_NEEDED;
10097 
10098 		DEVICE_QUEUE_LOCK(tq);
10099 	}
10100 
10101 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10102 }
10103 
10104 /*
10105  * ql_rst_aen
10106  *	Processes asynchronous reset.
10107  *
10108  * Input:
10109  *	ha = adapter state pointer.
10110  *
10111  * Context:
10112  *	Kernel context.
10113  */
10114 static void
10115 ql_rst_aen(ql_adapter_state_t *ha)
10116 {
10117 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10118 
10119 	/* Issue marker command. */
10120 	(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
10121 
10122 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10123 }
10124 
10125 /*
10126  * ql_cmd_wait
10127  *	Stall driver until all outstanding commands are returned.
10128  *
10129  * Input:
10130  *	ha = adapter state pointer.
10131  *
10132  * Context:
10133  *	Kernel context.
10134  */
10135 void
10136 ql_cmd_wait(ql_adapter_state_t *ha)
10137 {
10138 	uint16_t		index;
10139 	ql_link_t		*link;
10140 	ql_tgt_t		*tq;
10141 	ql_adapter_state_t	*vha;
10142 
10143 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10144 
10145 	/* Wait for all outstanding commands to be returned. */
10146 	(void) ql_wait_outstanding(ha);
10147 
10148 	/*
10149 	 * clear out internally queued commands
10150 	 */
10151 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10152 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10153 			for (link = vha->dev[index].first; link != NULL;
10154 			    link = link->next) {
10155 				tq = link->base_address;
10156 				if (tq &&
10157 				    (!(tq->prli_svc_param_word_3 &
10158 				    PRLI_W3_RETRY))) {
10159 					(void) ql_abort_device(vha, tq, 0);
10160 				}
10161 			}
10162 		}
10163 	}
10164 
10165 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10166 }
10167 
10168 /*
10169  * ql_wait_outstanding
10170  *	Wait for all outstanding commands to complete.
10171  *
10172  * Input:
10173  *	ha = adapter state pointer.
10174  *
10175  * Returns:
10176  *	index - the index for ql_srb into outstanding_cmds.
10177  *
10178  * Context:
10179  *	Kernel context.
10180  */
10181 static uint16_t
10182 ql_wait_outstanding(ql_adapter_state_t *ha)
10183 {
10184 	ql_srb_t	*sp;
10185 	uint16_t	index, count;
10186 
10187 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10188 
10189 	count = ql_osc_wait_count;
10190 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10191 		if (ha->pha->pending_cmds.first != NULL) {
10192 			ql_start_iocb(ha, NULL);
10193 			index = 1;
10194 		}
10195 		if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
10196 		    (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
10197 			if (count-- != 0) {
10198 				ql_delay(ha, 10000);
10199 				index = 0;
10200 			} else {
10201 				EL(ha, "failed, sp=%ph, oci=%d, hdl=%xh\n",
10202 				    (void *)sp, index, sp->handle);
10203 				break;
10204 			}
10205 		}
10206 	}
10207 
10208 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10209 
10210 	return (index);
10211 }
10212 
10213 /*
10214  * ql_restart_queues
10215  *	Restart device queues.
10216  *
10217  * Input:
10218  *	ha = adapter state pointer.
10219  *	DEVICE_QUEUE_LOCK must be released.
10220  *
10221  * Context:
10222  *	Interrupt or Kernel context, no mailbox commands allowed.
10223  */
10224 static void
10225 ql_restart_queues(ql_adapter_state_t *ha)
10226 {
10227 	ql_link_t		*link, *link2;
10228 	ql_tgt_t		*tq;
10229 	ql_lun_t		*lq;
10230 	uint16_t		index;
10231 	ql_adapter_state_t	*vha;
10232 
10233 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10234 
10235 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10236 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10237 			for (link = vha->dev[index].first; link != NULL;
10238 			    link = link->next) {
10239 				tq = link->base_address;
10240 
10241 				/* Acquire device queue lock. */
10242 				DEVICE_QUEUE_LOCK(tq);
10243 
10244 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
10245 
10246 				for (link2 = tq->lun_queues.first;
10247 				    link2 != NULL; link2 = link2->next) {
10248 					lq = link2->base_address;
10249 
10250 					if (lq->cmd.first != NULL) {
10251 						ql_next(vha, lq);
10252 						DEVICE_QUEUE_LOCK(tq);
10253 					}
10254 				}
10255 
10256 				/* Release device queue lock. */
10257 				DEVICE_QUEUE_UNLOCK(tq);
10258 			}
10259 		}
10260 	}
10261 
10262 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10263 }
10264 
10265 /*
10266  * ql_iidma
10267  *	Setup iiDMA parameters to firmware
10268  *
10269  * Input:
10270  *	ha = adapter state pointer.
10271  *	DEVICE_QUEUE_LOCK must be released.
10272  *
10273  * Context:
10274  *	Interrupt or Kernel context, no mailbox commands allowed.
10275  */
10276 static void
10277 ql_iidma(ql_adapter_state_t *ha)
10278 {
10279 	ql_link_t	*link;
10280 	ql_tgt_t	*tq;
10281 	uint16_t	index;
10282 	char		buf[256];
10283 	uint32_t	data;
10284 
10285 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10286 
10287 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10288 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10289 		return;
10290 	}
10291 
10292 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10293 		for (link = ha->dev[index].first; link != NULL;
10294 		    link = link->next) {
10295 			tq = link->base_address;
10296 
10297 			/* Acquire device queue lock. */
10298 			DEVICE_QUEUE_LOCK(tq);
10299 
10300 			if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10301 				DEVICE_QUEUE_UNLOCK(tq);
10302 				continue;
10303 			}
10304 
10305 			tq->flags &= ~TQF_IIDMA_NEEDED;
10306 
10307 			if ((tq->loop_id > LAST_N_PORT_HDL) ||
10308 			    (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10309 				DEVICE_QUEUE_UNLOCK(tq);
10310 				continue;
10311 			}
10312 
10313 			/* Get the iiDMA persistent data */
10314 			if (tq->iidma_rate == IIDMA_RATE_INIT) {
10315 				(void) sprintf(buf,
10316 				    "iidma-rate-%02x%02x%02x%02x%02x"
10317 				    "%02x%02x%02x", tq->port_name[0],
10318 				    tq->port_name[1], tq->port_name[2],
10319 				    tq->port_name[3], tq->port_name[4],
10320 				    tq->port_name[5], tq->port_name[6],
10321 				    tq->port_name[7]);
10322 
10323 				if ((data = ql_get_prop(ha, buf)) ==
10324 				    0xffffffff) {
10325 					tq->iidma_rate = IIDMA_RATE_NDEF;
10326 				} else {
10327 					switch (data) {
10328 					case IIDMA_RATE_1GB:
10329 					case IIDMA_RATE_2GB:
10330 					case IIDMA_RATE_4GB:
10331 					case IIDMA_RATE_10GB:
10332 						tq->iidma_rate = data;
10333 						break;
10334 					case IIDMA_RATE_8GB:
10335 						if (CFG_IST(ha,
10336 						    CFG_CTRL_25XX)) {
10337 							tq->iidma_rate = data;
10338 						} else {
10339 							tq->iidma_rate =
10340 							    IIDMA_RATE_4GB;
10341 						}
10342 						break;
10343 					default:
10344 						EL(ha, "invalid data for "
10345 						    "parameter: %s: %xh\n",
10346 						    buf, data);
10347 						tq->iidma_rate =
10348 						    IIDMA_RATE_NDEF;
10349 						break;
10350 					}
10351 				}
10352 			}
10353 
10354 			/* Set the firmware's iiDMA rate */
10355 			if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10356 			    !(CFG_IST(ha, CFG_CTRL_8081))) {
10357 				data = ql_iidma_rate(ha, tq->loop_id,
10358 				    &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10359 				if (data != QL_SUCCESS) {
10360 					EL(ha, "mbx failed: %xh\n", data);
10361 				}
10362 			}
10363 
10364 			/* Release device queue lock. */
10365 			DEVICE_QUEUE_UNLOCK(tq);
10366 		}
10367 	}
10368 
10369 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10370 }
10371 
10372 /*
10373  * ql_abort_queues
10374  *	Abort all commands on device queues.
10375  *
10376  * Input:
10377  *	ha = adapter state pointer.
10378  *
10379  * Context:
10380  *	Interrupt or Kernel context, no mailbox commands allowed.
10381  */
10382 static void
10383 ql_abort_queues(ql_adapter_state_t *ha)
10384 {
10385 	ql_link_t		*link;
10386 	ql_tgt_t		*tq;
10387 	ql_srb_t		*sp;
10388 	uint16_t		index;
10389 	ql_adapter_state_t	*vha;
10390 
10391 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10392 
10393 	/* Return all commands in outstanding command list. */
10394 	INTR_LOCK(ha);
10395 
10396 	/* Place all commands in outstanding cmd list on device queue. */
10397 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10398 		if (ha->pending_cmds.first != NULL) {
10399 			INTR_UNLOCK(ha);
10400 			ql_start_iocb(ha, NULL);
10401 			/* Delay for system */
10402 			ql_delay(ha, 10000);
10403 			INTR_LOCK(ha);
10404 			index = 1;
10405 		}
10406 		sp = ha->outstanding_cmds[index];
10407 
10408 		/* skip devices capable of FCP2 retrys */
10409 		if ((sp != NULL) &&
10410 		    ((tq = sp->lun_queue->target_queue) != NULL) &&
10411 		    (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10412 			ha->outstanding_cmds[index] = NULL;
10413 			sp->handle = 0;
10414 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10415 
10416 			INTR_UNLOCK(ha);
10417 
10418 			/* Set ending status. */
10419 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10420 			sp->flags |= SRB_ISP_COMPLETED;
10421 
10422 			/* Call done routine to handle completions. */
10423 			sp->cmd.next = NULL;
10424 			ql_done(&sp->cmd);
10425 
10426 			INTR_LOCK(ha);
10427 		}
10428 	}
10429 	INTR_UNLOCK(ha);
10430 
10431 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10432 		QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10433 		    vha->instance, vha->vp_index);
10434 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10435 			for (link = vha->dev[index].first; link != NULL;
10436 			    link = link->next) {
10437 				tq = link->base_address;
10438 				/* skip devices capable of FCP2 retrys */
10439 				if (!(tq->prli_svc_param_word_3 &
10440 				    PRLI_W3_RETRY)) {
10441 					/*
10442 					 * Set port unavailable status and
10443 					 * return all commands on a devices
10444 					 * queues.
10445 					 */
10446 					ql_abort_device_queues(ha, tq);
10447 				}
10448 			}
10449 		}
10450 	}
10451 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10452 }
10453 
10454 /*
10455  * ql_abort_device_queues
10456  *	Abort all commands on device queues.
10457  *
10458  * Input:
10459  *	ha = adapter state pointer.
10460  *
10461  * Context:
10462  *	Interrupt or Kernel context, no mailbox commands allowed.
10463  */
10464 static void
10465 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10466 {
10467 	ql_link_t	*lun_link, *cmd_link;
10468 	ql_srb_t	*sp;
10469 	ql_lun_t	*lq;
10470 
10471 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10472 
10473 	DEVICE_QUEUE_LOCK(tq);
10474 
10475 	for (lun_link = tq->lun_queues.first; lun_link != NULL;
10476 	    lun_link = lun_link->next) {
10477 		lq = lun_link->base_address;
10478 
10479 		cmd_link = lq->cmd.first;
10480 		while (cmd_link != NULL) {
10481 			sp = cmd_link->base_address;
10482 
10483 			if (sp->flags & SRB_ABORT) {
10484 				cmd_link = cmd_link->next;
10485 				continue;
10486 			}
10487 
10488 			/* Remove srb from device cmd queue. */
10489 			ql_remove_link(&lq->cmd, &sp->cmd);
10490 
10491 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10492 
10493 			DEVICE_QUEUE_UNLOCK(tq);
10494 
10495 			/* Set ending status. */
10496 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10497 
10498 			/* Call done routine to handle completion. */
10499 			ql_done(&sp->cmd);
10500 
10501 			/* Delay for system */
10502 			ql_delay(ha, 10000);
10503 
10504 			DEVICE_QUEUE_LOCK(tq);
10505 			cmd_link = lq->cmd.first;
10506 		}
10507 	}
10508 	DEVICE_QUEUE_UNLOCK(tq);
10509 
10510 	QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10511 }
10512 
10513 /*
10514  * ql_loop_resync
10515  *	Resync with fibre channel devices.
10516  *
10517  * Input:
10518  *	ha = adapter state pointer.
10519  *	DEVICE_QUEUE_LOCK must be released.
10520  *
10521  * Returns:
10522  *	ql local function return status code.
10523  *
10524  * Context:
10525  *	Kernel context.
10526  */
10527 static int
10528 ql_loop_resync(ql_adapter_state_t *ha)
10529 {
10530 	int rval;
10531 
10532 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10533 
10534 	if (ha->flags & IP_INITIALIZED) {
10535 		(void) ql_shutdown_ip(ha);
10536 	}
10537 
10538 	rval = ql_fw_ready(ha, 10);
10539 
10540 	TASK_DAEMON_LOCK(ha);
10541 	ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10542 	TASK_DAEMON_UNLOCK(ha);
10543 
10544 	/* Set loop online, if it really is. */
10545 	if (rval == QL_SUCCESS) {
10546 		ql_loop_online(ha);
10547 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10548 	} else {
10549 		EL(ha, "failed, rval = %xh\n", rval);
10550 	}
10551 
10552 	return (rval);
10553 }
10554 
10555 /*
10556  * ql_loop_online
10557  *	Set loop online status if it really is online.
10558  *
10559  * Input:
10560  *	ha = adapter state pointer.
10561  *	DEVICE_QUEUE_LOCK must be released.
10562  *
10563  * Context:
10564  *	Kernel context.
10565  */
10566 void
10567 ql_loop_online(ql_adapter_state_t *ha)
10568 {
10569 	ql_adapter_state_t	*vha;
10570 
10571 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10572 
10573 	/* Inform the FC Transport that the hardware is online. */
10574 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10575 		if (!(vha->task_daemon_flags &
10576 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10577 			/* Restart IP if it was shutdown. */
10578 			if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10579 			    !(vha->flags & IP_INITIALIZED)) {
10580 				(void) ql_initialize_ip(vha);
10581 				ql_isp_rcvbuf(vha);
10582 			}
10583 
10584 			if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10585 			    FC_PORT_STATE_MASK(vha->state) !=
10586 			    FC_STATE_ONLINE) {
10587 				vha->state = FC_PORT_SPEED_MASK(vha->state);
10588 				if (vha->topology & QL_LOOP_CONNECTION) {
10589 					vha->state |= FC_STATE_LOOP;
10590 				} else {
10591 					vha->state |= FC_STATE_ONLINE;
10592 				}
10593 				TASK_DAEMON_LOCK(ha);
10594 				vha->task_daemon_flags |= FC_STATE_CHANGE;
10595 				TASK_DAEMON_UNLOCK(ha);
10596 			}
10597 		}
10598 	}
10599 
10600 	ql_awaken_task_daemon(ha, NULL, 0, 0);
10601 
10602 	/* Restart device queues that may have been stopped. */
10603 	ql_restart_queues(ha);
10604 
10605 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10606 }
10607 
10608 /*
10609  * ql_fca_handle_to_state
10610  *	Verifies handle to be correct.
10611  *
10612  * Input:
10613  *	fca_handle = pointer to state structure.
10614  *
10615  * Returns:
10616  *	NULL = failure
10617  *
10618  * Context:
10619  *	Kernel context.
10620  */
10621 static ql_adapter_state_t *
10622 ql_fca_handle_to_state(opaque_t fca_handle)
10623 {
10624 #ifdef	QL_DEBUG_ROUTINES
10625 	ql_link_t		*link;
10626 	ql_adapter_state_t	*ha = NULL;
10627 	ql_adapter_state_t	*vha = NULL;
10628 
10629 	for (link = ql_hba.first; link != NULL; link = link->next) {
10630 		ha = link->base_address;
10631 		for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10632 			if ((opaque_t)vha == fca_handle) {
10633 				ha = vha;
10634 				break;
10635 			}
10636 		}
10637 		if ((opaque_t)ha == fca_handle) {
10638 			break;
10639 		} else {
10640 			ha = NULL;
10641 		}
10642 	}
10643 
10644 	if (ha == NULL) {
10645 		/*EMPTY*/
10646 		QL_PRINT_2(CE_CONT, "failed\n");
10647 	}
10648 
10649 #endif /* QL_DEBUG_ROUTINES */
10650 
10651 	return ((ql_adapter_state_t *)fca_handle);
10652 }
10653 
10654 /*
10655  * ql_d_id_to_queue
10656  *	Locate device queue that matches destination ID.
10657  *
10658  * Input:
10659  *	ha = adapter state pointer.
10660  *	d_id = destination ID
10661  *
10662  * Returns:
10663  *	NULL = failure
10664  *
10665  * Context:
10666  *	Interrupt or Kernel context, no mailbox commands allowed.
10667  */
10668 ql_tgt_t *
10669 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10670 {
10671 	uint16_t	index;
10672 	ql_tgt_t	*tq;
10673 	ql_link_t	*link;
10674 
10675 	/* Get head queue index. */
10676 	index = ql_alpa_to_index[d_id.b.al_pa];
10677 
10678 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
10679 		tq = link->base_address;
10680 		if (tq->d_id.b24 == d_id.b24 &&
10681 		    VALID_DEVICE_ID(ha, tq->loop_id)) {
10682 			return (tq);
10683 		}
10684 	}
10685 
10686 	return (NULL);
10687 }
10688 
10689 /*
10690  * ql_loop_id_to_queue
10691  *	Locate device queue that matches loop ID.
10692  *
10693  * Input:
10694  *	ha:		adapter state pointer.
10695  *	loop_id:	destination ID
10696  *
10697  * Returns:
10698  *	NULL = failure
10699  *
10700  * Context:
10701  *	Interrupt or Kernel context, no mailbox commands allowed.
10702  */
10703 ql_tgt_t *
10704 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10705 {
10706 	uint16_t	index;
10707 	ql_tgt_t	*tq;
10708 	ql_link_t	*link;
10709 
10710 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10711 		for (link = ha->dev[index].first; link != NULL;
10712 		    link = link->next) {
10713 			tq = link->base_address;
10714 			if (tq->loop_id == loop_id) {
10715 				return (tq);
10716 			}
10717 		}
10718 	}
10719 
10720 	return (NULL);
10721 }
10722 
10723 /*
10724  * ql_kstat_update
10725  *	Updates kernel statistics.
10726  *
10727  * Input:
10728  *	ksp - driver kernel statistics structure pointer.
10729  *	rw - function to perform
10730  *
10731  * Returns:
10732  *	0 or EACCES
10733  *
10734  * Context:
10735  *	Kernel context.
10736  */
10737 /* ARGSUSED */
10738 static int
10739 ql_kstat_update(kstat_t *ksp, int rw)
10740 {
10741 	int			rval;
10742 
10743 	QL_PRINT_3(CE_CONT, "started\n");
10744 
10745 	if (rw == KSTAT_WRITE) {
10746 		rval = EACCES;
10747 	} else {
10748 		rval = 0;
10749 	}
10750 
10751 	if (rval != 0) {
10752 		/*EMPTY*/
10753 		QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10754 	} else {
10755 		/*EMPTY*/
10756 		QL_PRINT_3(CE_CONT, "done\n");
10757 	}
10758 	return (rval);
10759 }
10760 
10761 /*
10762  * ql_load_flash
10763  *	Loads flash.
10764  *
10765  * Input:
10766  *	ha:	adapter state pointer.
10767  *	dp:	data pointer.
10768  *	size:	data length.
10769  *
10770  * Returns:
10771  *	ql local function return status code.
10772  *
10773  * Context:
10774  *	Kernel context.
10775  */
10776 int
10777 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10778 {
10779 	uint32_t	cnt;
10780 	int		rval;
10781 	uint32_t	size_to_offset;
10782 	uint32_t	size_to_compare;
10783 	int		erase_all;
10784 
10785 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
10786 		return (ql_24xx_load_flash(ha, dp, size, 0));
10787 	}
10788 
10789 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10790 
10791 	size_to_compare = 0x20000;
10792 	size_to_offset = 0;
10793 	erase_all = 0;
10794 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10795 		if (size == 0x80000) {
10796 			/* Request to flash the entire chip. */
10797 			size_to_compare = 0x80000;
10798 			erase_all = 1;
10799 		} else {
10800 			size_to_compare = 0x40000;
10801 			if (ql_flash_sbus_fpga) {
10802 				size_to_offset = 0x40000;
10803 			}
10804 		}
10805 	}
10806 	if (size > size_to_compare) {
10807 		rval = QL_FUNCTION_PARAMETER_ERROR;
10808 		EL(ha, "failed=%xh\n", rval);
10809 		return (rval);
10810 	}
10811 
10812 	GLOBAL_HW_LOCK();
10813 
10814 	/* Enable Flash Read/Write. */
10815 	ql_flash_enable(ha);
10816 
10817 	/* Erase flash prior to write. */
10818 	rval = ql_erase_flash(ha, erase_all);
10819 
10820 	if (rval == QL_SUCCESS) {
10821 		/* Write data to flash. */
10822 		for (cnt = 0; cnt < size; cnt++) {
10823 			/* Allow other system activity. */
10824 			if (cnt % 0x1000 == 0) {
10825 				ql_delay(ha, 10000);
10826 			}
10827 			rval = ql_program_flash_address(ha,
10828 			    cnt + size_to_offset, *dp++);
10829 			if (rval != QL_SUCCESS) {
10830 				break;
10831 			}
10832 		}
10833 	}
10834 
10835 	ql_flash_disable(ha);
10836 
10837 	GLOBAL_HW_UNLOCK();
10838 
10839 	if (rval != QL_SUCCESS) {
10840 		EL(ha, "failed=%xh\n", rval);
10841 	} else {
10842 		/*EMPTY*/
10843 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10844 	}
10845 	return (rval);
10846 }
10847 
10848 /*
10849  * ql_program_flash_address
10850  *	Program flash address.
10851  *
10852  * Input:
10853  *	ha = adapter state pointer.
10854  *	addr = flash byte address.
10855  *	data = data to be written to flash.
10856  *
10857  * Returns:
10858  *	ql local function return status code.
10859  *
10860  * Context:
10861  *	Kernel context.
10862  */
10863 static int
10864 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10865 {
10866 	int rval;
10867 
10868 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10869 
10870 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10871 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10872 		ql_write_flash_byte(ha, addr, data);
10873 	} else {
10874 		/* Write Program Command Sequence */
10875 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10876 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10877 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10878 		ql_write_flash_byte(ha, addr, data);
10879 	}
10880 
10881 	/* Wait for write to complete. */
10882 	rval = ql_poll_flash(ha, addr, data);
10883 
10884 	if (rval != QL_SUCCESS) {
10885 		EL(ha, "failed=%xh\n", rval);
10886 	} else {
10887 		/*EMPTY*/
10888 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10889 	}
10890 	return (rval);
10891 }
10892 
10893 /*
10894  * ql_erase_flash
10895  *	Erases entire flash.
10896  *
10897  * Input:
10898  *	ha = adapter state pointer.
10899  *
10900  * Returns:
10901  *	ql local function return status code.
10902  *
10903  * Context:
10904  *	Kernel context.
10905  */
10906 int
10907 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10908 {
10909 	int		rval;
10910 	uint32_t	erase_delay = 2000000;
10911 	uint32_t	sStartAddr;
10912 	uint32_t	ssize;
10913 	uint32_t	cnt;
10914 	uint8_t		*bfp;
10915 	uint8_t		*tmp;
10916 
10917 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10918 
10919 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10920 
10921 		if (ql_flash_sbus_fpga == 1) {
10922 			ssize = QL_SBUS_FCODE_SIZE;
10923 			sStartAddr = QL_FCODE_OFFSET;
10924 		} else {
10925 			ssize = QL_FPGA_SIZE;
10926 			sStartAddr = QL_FPGA_OFFSET;
10927 		}
10928 
10929 		erase_delay = 20000000;
10930 
10931 		bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10932 
10933 		/* Save the section of flash we're not updating to buffer */
10934 		tmp = bfp;
10935 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10936 			/* Allow other system activity. */
10937 			if (cnt % 0x1000 == 0) {
10938 				ql_delay(ha, 10000);
10939 			}
10940 			*tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10941 		}
10942 	}
10943 
10944 	/* Chip Erase Command Sequence */
10945 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10946 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10947 	ql_write_flash_byte(ha, 0x5555, 0x80);
10948 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10949 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10950 	ql_write_flash_byte(ha, 0x5555, 0x10);
10951 
10952 	ql_delay(ha, erase_delay);
10953 
10954 	/* Wait for erase to complete. */
10955 	rval = ql_poll_flash(ha, 0, 0x80);
10956 
10957 	if (rval != QL_SUCCESS) {
10958 		EL(ha, "failed=%xh\n", rval);
10959 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10960 			kmem_free(bfp, ssize);
10961 		}
10962 		return (rval);
10963 	}
10964 
10965 	/* restore the section we saved in the buffer */
10966 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10967 		/* Restore the section we saved off */
10968 		tmp = bfp;
10969 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10970 			/* Allow other system activity. */
10971 			if (cnt % 0x1000 == 0) {
10972 				ql_delay(ha, 10000);
10973 			}
10974 			rval = ql_program_flash_address(ha, cnt, *tmp++);
10975 			if (rval != QL_SUCCESS) {
10976 				break;
10977 			}
10978 		}
10979 
10980 		kmem_free(bfp, ssize);
10981 	}
10982 
10983 	if (rval != QL_SUCCESS) {
10984 		EL(ha, "failed=%xh\n", rval);
10985 	} else {
10986 		/*EMPTY*/
10987 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10988 	}
10989 	return (rval);
10990 }
10991 
10992 /*
10993  * ql_poll_flash
10994  *	Polls flash for completion.
10995  *
10996  * Input:
10997  *	ha = adapter state pointer.
10998  *	addr = flash byte address.
10999  *	data = data to be polled.
11000  *
11001  * Returns:
11002  *	ql local function return status code.
11003  *
11004  * Context:
11005  *	Kernel context.
11006  */
11007 int
11008 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
11009 {
11010 	uint8_t		flash_data;
11011 	uint32_t	cnt;
11012 	int		rval = QL_FUNCTION_FAILED;
11013 
11014 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11015 
11016 	poll_data = (uint8_t)(poll_data & BIT_7);
11017 
11018 	/* Wait for 30 seconds for command to finish. */
11019 	for (cnt = 30000000; cnt; cnt--) {
11020 		flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
11021 
11022 		if ((flash_data & BIT_7) == poll_data) {
11023 			rval = QL_SUCCESS;
11024 			break;
11025 		}
11026 		if (flash_data & BIT_5 && cnt > 2) {
11027 			cnt = 2;
11028 		}
11029 		drv_usecwait(1);
11030 	}
11031 
11032 	if (rval != QL_SUCCESS) {
11033 		EL(ha, "failed=%xh\n", rval);
11034 	} else {
11035 		/*EMPTY*/
11036 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11037 	}
11038 	return (rval);
11039 }
11040 
11041 /*
11042  * ql_flash_enable
11043  *	Setup flash for reading/writing.
11044  *
11045  * Input:
11046  *	ha = adapter state pointer.
11047  *
11048  * Context:
11049  *	Kernel context.
11050  */
11051 void
11052 ql_flash_enable(ql_adapter_state_t *ha)
11053 {
11054 	uint16_t	data;
11055 
11056 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11057 
11058 	/* Enable Flash Read/Write. */
11059 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11060 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11061 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11062 		data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
11063 		ddi_put16(ha->sbus_fpga_dev_handle,
11064 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11065 		/* Read reset command sequence */
11066 		ql_write_flash_byte(ha, 0xaaa, 0xaa);
11067 		ql_write_flash_byte(ha, 0x555, 0x55);
11068 		ql_write_flash_byte(ha, 0xaaa, 0x20);
11069 		ql_write_flash_byte(ha, 0x555, 0xf0);
11070 	} else {
11071 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
11072 		    ISP_FLASH_ENABLE);
11073 		WRT16_IO_REG(ha, ctrl_status, data);
11074 
11075 		/* Read/Reset Command Sequence */
11076 		ql_write_flash_byte(ha, 0x5555, 0xaa);
11077 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
11078 		ql_write_flash_byte(ha, 0x5555, 0xf0);
11079 	}
11080 	(void) ql_read_flash_byte(ha, 0);
11081 
11082 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11083 }
11084 
11085 /*
11086  * ql_flash_disable
11087  *	Disable flash and allow RISC to run.
11088  *
11089  * Input:
11090  *	ha = adapter state pointer.
11091  *
11092  * Context:
11093  *	Kernel context.
11094  */
11095 void
11096 ql_flash_disable(ql_adapter_state_t *ha)
11097 {
11098 	uint16_t	data;
11099 
11100 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11101 
11102 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11103 		/*
11104 		 * Lock the flash back up.
11105 		 */
11106 		ql_write_flash_byte(ha, 0x555, 0x90);
11107 		ql_write_flash_byte(ha, 0x555, 0x0);
11108 
11109 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11110 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11111 		data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
11112 		ddi_put16(ha->sbus_fpga_dev_handle,
11113 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11114 	} else {
11115 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
11116 		    ~ISP_FLASH_ENABLE);
11117 		WRT16_IO_REG(ha, ctrl_status, data);
11118 	}
11119 
11120 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11121 }
11122 
11123 /*
11124  * ql_write_flash_byte
11125  *	Write byte to flash.
11126  *
11127  * Input:
11128  *	ha = adapter state pointer.
11129  *	addr = flash byte address.
11130  *	data = data to be written.
11131  *
11132  * Context:
11133  *	Kernel context.
11134  */
11135 void
11136 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11137 {
11138 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11139 		ddi_put16(ha->sbus_fpga_dev_handle,
11140 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11141 		    LSW(addr));
11142 		ddi_put16(ha->sbus_fpga_dev_handle,
11143 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11144 		    MSW(addr));
11145 		ddi_put16(ha->sbus_fpga_dev_handle,
11146 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
11147 		    (uint16_t)data);
11148 	} else {
11149 		uint16_t bank_select;
11150 
11151 		/* Setup bit 16 of flash address. */
11152 		bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
11153 
11154 		if (CFG_IST(ha, CFG_CTRL_6322)) {
11155 			bank_select = (uint16_t)(bank_select & ~0xf0);
11156 			bank_select = (uint16_t)(bank_select |
11157 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11158 			WRT16_IO_REG(ha, ctrl_status, bank_select);
11159 		} else {
11160 			if (addr & BIT_16 && !(bank_select &
11161 			    ISP_FLASH_64K_BANK)) {
11162 				bank_select = (uint16_t)(bank_select |
11163 				    ISP_FLASH_64K_BANK);
11164 				WRT16_IO_REG(ha, ctrl_status, bank_select);
11165 			} else if (!(addr & BIT_16) && bank_select &
11166 			    ISP_FLASH_64K_BANK) {
11167 				bank_select = (uint16_t)(bank_select &
11168 				    ~ISP_FLASH_64K_BANK);
11169 				WRT16_IO_REG(ha, ctrl_status, bank_select);
11170 			}
11171 		}
11172 
11173 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
11174 			WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
11175 			WRT16_IO_REG(ha, flash_data, (uint16_t)data);
11176 		} else {
11177 			WRT16_IOMAP_REG(ha, flash_address, addr);
11178 			WRT16_IOMAP_REG(ha, flash_data, data);
11179 		}
11180 	}
11181 }
11182 
11183 /*
11184  * ql_read_flash_byte
11185  *	Reads byte from flash, but must read a word from chip.
11186  *
11187  * Input:
11188  *	ha = adapter state pointer.
11189  *	addr = flash byte address.
11190  *
11191  * Returns:
11192  *	byte from flash.
11193  *
11194  * Context:
11195  *	Kernel context.
11196  */
11197 uint8_t
11198 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
11199 {
11200 	uint8_t	data;
11201 
11202 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11203 		ddi_put16(ha->sbus_fpga_dev_handle,
11204 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11205 		    LSW(addr));
11206 		ddi_put16(ha->sbus_fpga_dev_handle,
11207 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11208 		    MSW(addr));
11209 		data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
11210 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
11211 	} else {
11212 		uint16_t	bank_select;
11213 
11214 		/* Setup bit 16 of flash address. */
11215 		bank_select = RD16_IO_REG(ha, ctrl_status);
11216 		if (CFG_IST(ha, CFG_CTRL_6322)) {
11217 			bank_select = (uint16_t)(bank_select & ~0xf0);
11218 			bank_select = (uint16_t)(bank_select |
11219 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11220 			WRT16_IO_REG(ha, ctrl_status, bank_select);
11221 		} else {
11222 			if (addr & BIT_16 &&
11223 			    !(bank_select & ISP_FLASH_64K_BANK)) {
11224 				bank_select = (uint16_t)(bank_select |
11225 				    ISP_FLASH_64K_BANK);
11226 				WRT16_IO_REG(ha, ctrl_status, bank_select);
11227 			} else if (!(addr & BIT_16) &&
11228 			    bank_select & ISP_FLASH_64K_BANK) {
11229 				bank_select = (uint16_t)(bank_select &
11230 				    ~ISP_FLASH_64K_BANK);
11231 				WRT16_IO_REG(ha, ctrl_status, bank_select);
11232 			}
11233 		}
11234 
11235 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
11236 			WRT16_IO_REG(ha, flash_address, addr);
11237 			data = (uint8_t)RD16_IO_REG(ha, flash_data);
11238 		} else {
11239 			WRT16_IOMAP_REG(ha, flash_address, addr);
11240 			data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
11241 		}
11242 	}
11243 
11244 	return (data);
11245 }
11246 
11247 /*
11248  * ql_24xx_flash_id
11249  *	Get flash IDs.
11250  *
11251  * Input:
11252  *	ha:		adapter state pointer.
11253  *
11254  * Returns:
11255  *	ql local function return status code.
11256  *
11257  * Context:
11258  *	Kernel context.
11259  */
11260 int
11261 ql_24xx_flash_id(ql_adapter_state_t *vha)
11262 {
11263 	int			rval;
11264 	uint32_t		fdata = 0;
11265 	ql_adapter_state_t	*ha = vha->pha;
11266 	ql_xioctl_t		*xp = ha->xioctl;
11267 
11268 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11269 
11270 	rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11271 
11272 	if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11273 		fdata = 0;
11274 		rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11275 		    (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11276 	}
11277 
11278 	if (rval != QL_SUCCESS) {
11279 		EL(ha, "24xx read_flash failed=%xh\n", rval);
11280 	} else if (fdata != 0) {
11281 		xp->fdesc.flash_manuf = LSB(LSW(fdata));
11282 		xp->fdesc.flash_id = MSB(LSW(fdata));
11283 		xp->fdesc.flash_len = LSB(MSW(fdata));
11284 	} else {
11285 		xp->fdesc.flash_manuf = ATMEL_FLASH;
11286 		xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11287 		xp->fdesc.flash_len = 0;
11288 	}
11289 
11290 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11291 
11292 	return (rval);
11293 }
11294 
11295 /*
11296  * ql_24xx_load_flash
11297  *	Loads flash.
11298  *
11299  * Input:
11300  *	ha = adapter state pointer.
11301  *	dp = data pointer.
11302  *	size = data length in bytes.
11303  *	faddr = 32bit word flash byte address.
11304  *
11305  * Returns:
11306  *	ql local function return status code.
11307  *
11308  * Context:
11309  *	Kernel context.
11310  */
11311 int
11312 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11313     uint32_t faddr)
11314 {
11315 	int			rval;
11316 	uint32_t		cnt, rest_addr, fdata, wc;
11317 	dma_mem_t		dmabuf = {0};
11318 	ql_adapter_state_t	*ha = vha->pha;
11319 	ql_xioctl_t		*xp = ha->xioctl;
11320 
11321 	QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11322 	    ha->instance, faddr, size);
11323 
11324 	/* start address must be 32 bit word aligned */
11325 	if ((faddr & 0x3) != 0) {
11326 		EL(ha, "incorrect buffer size alignment\n");
11327 		return (QL_FUNCTION_PARAMETER_ERROR);
11328 	}
11329 
11330 	/* Allocate DMA buffer */
11331 	if (CFG_IST(ha, CFG_CTRL_2581)) {
11332 		if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11333 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11334 		    QL_SUCCESS) {
11335 			EL(ha, "dma alloc failed, rval=%xh\n", rval);
11336 			return (rval);
11337 		}
11338 	}
11339 
11340 	GLOBAL_HW_LOCK();
11341 
11342 	/* Enable flash write */
11343 	if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11344 		GLOBAL_HW_UNLOCK();
11345 		EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11346 		ql_free_phys(ha, &dmabuf);
11347 		return (rval);
11348 	}
11349 
11350 	/* setup mask of address range within a sector */
11351 	rest_addr = (xp->fdesc.block_size - 1) >> 2;
11352 
11353 	faddr = faddr >> 2;	/* flash gets 32 bit words */
11354 
11355 	/*
11356 	 * Write data to flash.
11357 	 */
11358 	cnt = 0;
11359 	size = (size + 3) >> 2;	/* Round up & convert to dwords */
11360 
11361 	while (cnt < size) {
11362 		/* Beginning of a sector? */
11363 		if ((faddr & rest_addr) == 0) {
11364 			if (CFG_IST(ha, CFG_CTRL_8021)) {
11365 				fdata = ha->flash_data_addr | faddr;
11366 				rval = ql_8021_rom_erase(ha, fdata);
11367 				if (rval != QL_SUCCESS) {
11368 					EL(ha, "8021 erase sector status="
11369 					    "%xh, start=%xh, end=%xh"
11370 					    "\n", rval, fdata,
11371 					    fdata + rest_addr);
11372 					break;
11373 				}
11374 			} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11375 				fdata = ha->flash_data_addr | faddr;
11376 				rval = ql_flash_access(ha,
11377 				    FAC_ERASE_SECTOR, fdata, fdata +
11378 				    rest_addr, 0);
11379 				if (rval != QL_SUCCESS) {
11380 					EL(ha, "erase sector status="
11381 					    "%xh, start=%xh, end=%xh"
11382 					    "\n", rval, fdata,
11383 					    fdata + rest_addr);
11384 					break;
11385 				}
11386 			} else {
11387 				fdata = (faddr & ~rest_addr) << 2;
11388 				fdata = (fdata & 0xff00) |
11389 				    (fdata << 16 & 0xff0000) |
11390 				    (fdata >> 16 & 0xff);
11391 
11392 				if (rest_addr == 0x1fff) {
11393 					/* 32kb sector block erase */
11394 					rval = ql_24xx_write_flash(ha,
11395 					    FLASH_CONF_ADDR | 0x0352,
11396 					    fdata);
11397 				} else {
11398 					/* 64kb sector block erase */
11399 					rval = ql_24xx_write_flash(ha,
11400 					    FLASH_CONF_ADDR | 0x03d8,
11401 					    fdata);
11402 				}
11403 				if (rval != QL_SUCCESS) {
11404 					EL(ha, "Unable to flash sector"
11405 					    ": address=%xh\n", faddr);
11406 					break;
11407 				}
11408 			}
11409 		}
11410 
11411 		/* Write data */
11412 		if (CFG_IST(ha, CFG_CTRL_2581) &&
11413 		    ((faddr & 0x3f) == 0)) {
11414 			/*
11415 			 * Limit write up to sector boundary.
11416 			 */
11417 			wc = ((~faddr & (rest_addr>>1)) + 1);
11418 
11419 			if (size - cnt < wc) {
11420 				wc = size - cnt;
11421 			}
11422 
11423 			ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11424 			    (uint8_t *)dmabuf.bp, wc<<2,
11425 			    DDI_DEV_AUTOINCR);
11426 
11427 			rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11428 			    faddr, dmabuf.cookie.dmac_laddress, wc);
11429 			if (rval != QL_SUCCESS) {
11430 				EL(ha, "unable to dma to flash "
11431 				    "address=%xh\n", faddr << 2);
11432 				break;
11433 			}
11434 
11435 			cnt += wc;
11436 			faddr += wc;
11437 			dp += wc << 2;
11438 		} else {
11439 			fdata = *dp++;
11440 			fdata |= *dp++ << 8;
11441 			fdata |= *dp++ << 16;
11442 			fdata |= *dp++ << 24;
11443 			rval = ql_24xx_write_flash(ha,
11444 			    ha->flash_data_addr | faddr, fdata);
11445 			if (rval != QL_SUCCESS) {
11446 				EL(ha, "Unable to program flash "
11447 				    "address=%xh data=%xh\n", faddr,
11448 				    *dp);
11449 				break;
11450 			}
11451 			cnt++;
11452 			faddr++;
11453 
11454 			/* Allow other system activity. */
11455 			if (cnt % 0x1000 == 0) {
11456 				ql_delay(ha, 10000);
11457 			}
11458 		}
11459 	}
11460 
11461 	ql_24xx_protect_flash(ha);
11462 
11463 	ql_free_phys(ha, &dmabuf);
11464 
11465 	GLOBAL_HW_UNLOCK();
11466 
11467 	if (rval != QL_SUCCESS) {
11468 		EL(ha, "failed=%xh\n", rval);
11469 	} else {
11470 		/*EMPTY*/
11471 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11472 	}
11473 	return (rval);
11474 }
11475 
11476 /*
11477  * ql_24xx_read_flash
11478  *	Reads a 32bit word from ISP24xx NVRAM/FLASH.
11479  *
11480  * Input:
11481  *	ha:	adapter state pointer.
11482  *	faddr:	NVRAM/FLASH address.
11483  *	bp:	data pointer.
11484  *
11485  * Returns:
11486  *	ql local function return status code.
11487  *
11488  * Context:
11489  *	Kernel context.
11490  */
11491 int
11492 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11493 {
11494 	uint32_t		timer;
11495 	int			rval = QL_SUCCESS;
11496 	ql_adapter_state_t	*ha = vha->pha;
11497 
11498 	if (CFG_IST(ha, CFG_CTRL_8021)) {
11499 		if ((rval = ql_8021_rom_read(ha, faddr, bp)) != QL_SUCCESS) {
11500 			EL(ha, "8021 access error\n");
11501 		}
11502 		return (rval);
11503 	}
11504 
11505 	/* Clear access error flag */
11506 	WRT32_IO_REG(ha, ctrl_status,
11507 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11508 
11509 	WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11510 
11511 	/* Wait for READ cycle to complete. */
11512 	for (timer = 300000; timer; timer--) {
11513 		if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11514 			break;
11515 		}
11516 		drv_usecwait(10);
11517 	}
11518 
11519 	if (timer == 0) {
11520 		EL(ha, "failed, timeout\n");
11521 		rval = QL_FUNCTION_TIMEOUT;
11522 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11523 		EL(ha, "failed, access error\n");
11524 		rval = QL_FUNCTION_FAILED;
11525 	}
11526 
11527 	*bp = RD32_IO_REG(ha, flash_data);
11528 
11529 	return (rval);
11530 }
11531 
11532 /*
11533  * ql_24xx_write_flash
11534  *	Writes a 32bit word to ISP24xx NVRAM/FLASH.
11535  *
11536  * Input:
11537  *	ha:	adapter state pointer.
11538  *	addr:	NVRAM/FLASH address.
11539  *	value:	data.
11540  *
11541  * Returns:
11542  *	ql local function return status code.
11543  *
11544  * Context:
11545  *	Kernel context.
11546  */
11547 int
11548 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11549 {
11550 	uint32_t		timer, fdata;
11551 	int			rval = QL_SUCCESS;
11552 	ql_adapter_state_t	*ha = vha->pha;
11553 
11554 	if (CFG_IST(ha, CFG_CTRL_8021)) {
11555 		if ((rval = ql_8021_rom_write(ha, addr, data)) != QL_SUCCESS) {
11556 			EL(ha, "8021 access error\n");
11557 		}
11558 		return (rval);
11559 	}
11560 	/* Clear access error flag */
11561 	WRT32_IO_REG(ha, ctrl_status,
11562 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11563 
11564 	WRT32_IO_REG(ha, flash_data, data);
11565 	RD32_IO_REG(ha, flash_data);		/* PCI Posting. */
11566 	WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11567 
11568 	/* Wait for Write cycle to complete. */
11569 	for (timer = 3000000; timer; timer--) {
11570 		if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11571 			/* Check flash write in progress. */
11572 			if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11573 				(void) ql_24xx_read_flash(ha,
11574 				    FLASH_CONF_ADDR | 0x005, &fdata);
11575 				if (!(fdata & BIT_0)) {
11576 					break;
11577 				}
11578 			} else {
11579 				break;
11580 			}
11581 		}
11582 		drv_usecwait(10);
11583 	}
11584 	if (timer == 0) {
11585 		EL(ha, "failed, timeout\n");
11586 		rval = QL_FUNCTION_TIMEOUT;
11587 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11588 		EL(ha, "access error\n");
11589 		rval = QL_FUNCTION_FAILED;
11590 	}
11591 
11592 	return (rval);
11593 }
11594 /*
11595  * ql_24xx_unprotect_flash
11596  *	Enable writes
11597  *
11598  * Input:
11599  *	ha:	adapter state pointer.
11600  *
11601  * Returns:
11602  *	ql local function return status code.
11603  *
11604  * Context:
11605  *	Kernel context.
11606  */
11607 int
11608 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11609 {
11610 	int			rval;
11611 	uint32_t		fdata;
11612 	ql_adapter_state_t	*ha = vha->pha;
11613 	ql_xioctl_t		*xp = ha->xioctl;
11614 
11615 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11616 
11617 	if (CFG_IST(ha, CFG_CTRL_8021)) {
11618 		(void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11619 		rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11620 		if (rval != QL_SUCCESS) {
11621 			EL(ha, "8021 access error\n");
11622 		}
11623 		return (rval);
11624 	}
11625 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11626 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11627 			if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11628 			    0)) != QL_SUCCESS) {
11629 				EL(ha, "status=%xh\n", rval);
11630 			}
11631 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11632 			    ha->instance);
11633 			return (rval);
11634 		}
11635 	} else {
11636 		/* Enable flash write. */
11637 		WRT32_IO_REG(ha, ctrl_status,
11638 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11639 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11640 	}
11641 
11642 	/*
11643 	 * Remove block write protection (SST and ST) and
11644 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11645 	 * Unprotect sectors.
11646 	 */
11647 	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11648 	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11649 
11650 	if (xp->fdesc.unprotect_sector_cmd != 0) {
11651 		for (fdata = 0; fdata < 0x10; fdata++) {
11652 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11653 			    0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11654 		}
11655 
11656 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11657 		    xp->fdesc.unprotect_sector_cmd, 0x00400f);
11658 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11659 		    xp->fdesc.unprotect_sector_cmd, 0x00600f);
11660 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11661 		    xp->fdesc.unprotect_sector_cmd, 0x00800f);
11662 	}
11663 
11664 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11665 
11666 	return (QL_SUCCESS);
11667 }
11668 
11669 /*
11670  * ql_24xx_protect_flash
11671  *	Disable writes
11672  *
11673  * Input:
11674  *	ha:	adapter state pointer.
11675  *
11676  * Context:
11677  *	Kernel context.
11678  */
11679 void
11680 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11681 {
11682 	int			rval;
11683 	uint32_t		fdata;
11684 	ql_adapter_state_t	*ha = vha->pha;
11685 	ql_xioctl_t		*xp = ha->xioctl;
11686 
11687 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11688 
11689 	if (CFG_IST(ha, CFG_CTRL_8021)) {
11690 		(void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11691 		rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_disable_bits);
11692 		if (rval != QL_SUCCESS) {
11693 			EL(ha, "8021 access error\n");
11694 		}
11695 		return;
11696 	}
11697 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11698 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11699 			if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11700 			    0)) != QL_SUCCESS) {
11701 				EL(ha, "status=%xh\n", rval);
11702 			}
11703 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11704 			    ha->instance);
11705 			return;
11706 		}
11707 	} else {
11708 		/* Enable flash write. */
11709 		WRT32_IO_REG(ha, ctrl_status,
11710 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11711 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11712 	}
11713 
11714 	/*
11715 	 * Protect sectors.
11716 	 * Set block write protection (SST and ST) and
11717 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11718 	 */
11719 	if (xp->fdesc.protect_sector_cmd != 0) {
11720 		for (fdata = 0; fdata < 0x10; fdata++) {
11721 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11722 			    0x330 | xp->fdesc.protect_sector_cmd, fdata);
11723 		}
11724 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11725 		    xp->fdesc.protect_sector_cmd, 0x00400f);
11726 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11727 		    xp->fdesc.protect_sector_cmd, 0x00600f);
11728 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11729 		    xp->fdesc.protect_sector_cmd, 0x00800f);
11730 
11731 		/* TODO: ??? */
11732 		(void) ql_24xx_write_flash(ha,
11733 		    FLASH_CONF_ADDR | 0x101, 0x80);
11734 	} else {
11735 		(void) ql_24xx_write_flash(ha,
11736 		    FLASH_CONF_ADDR | 0x101, 0x9c);
11737 	}
11738 
11739 	/* Disable flash write. */
11740 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11741 		WRT32_IO_REG(ha, ctrl_status,
11742 		    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11743 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11744 	}
11745 
11746 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11747 }
11748 
11749 /*
11750  * ql_dump_firmware
11751  *	Save RISC code state information.
11752  *
11753  * Input:
11754  *	ha = adapter state pointer.
11755  *
11756  * Returns:
11757  *	QL local function return status code.
11758  *
11759  * Context:
11760  *	Kernel context.
11761  */
11762 static int
11763 ql_dump_firmware(ql_adapter_state_t *vha)
11764 {
11765 	int			rval;
11766 	clock_t			timer = drv_usectohz(30000000);
11767 	ql_adapter_state_t	*ha = vha->pha;
11768 
11769 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11770 
11771 	QL_DUMP_LOCK(ha);
11772 
11773 	if (ha->ql_dump_state & QL_DUMPING ||
11774 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11775 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11776 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11777 		QL_DUMP_UNLOCK(ha);
11778 		return (QL_SUCCESS);
11779 	}
11780 
11781 	QL_DUMP_UNLOCK(ha);
11782 
11783 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11784 
11785 	/*
11786 	 * Wait for all outstanding commands to complete
11787 	 */
11788 	(void) ql_wait_outstanding(ha);
11789 
11790 	/* Dump firmware. */
11791 	rval = ql_binary_fw_dump(ha, TRUE);
11792 
11793 	/* Do abort to force restart. */
11794 	ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11795 	EL(ha, "restarting, isp_abort_needed\n");
11796 
11797 	/* Acquire task daemon lock. */
11798 	TASK_DAEMON_LOCK(ha);
11799 
11800 	/* Wait for suspension to end. */
11801 	while (ha->task_daemon_flags & QL_SUSPENDED) {
11802 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11803 
11804 		/* 30 seconds from now */
11805 		if (cv_reltimedwait(&ha->cv_dr_suspended,
11806 		    &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
11807 			/*
11808 			 * The timeout time 'timer' was
11809 			 * reached without the condition
11810 			 * being signaled.
11811 			 */
11812 			break;
11813 		}
11814 	}
11815 
11816 	/* Release task daemon lock. */
11817 	TASK_DAEMON_UNLOCK(ha);
11818 
11819 	if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11820 		/*EMPTY*/
11821 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11822 	} else {
11823 		EL(ha, "failed, rval = %xh\n", rval);
11824 	}
11825 	return (rval);
11826 }
11827 
11828 /*
11829  * ql_binary_fw_dump
11830  *	Dumps binary data from firmware.
11831  *
11832  * Input:
11833  *	ha = adapter state pointer.
11834  *	lock_needed = mailbox lock needed.
11835  *
11836  * Returns:
11837  *	ql local function return status code.
11838  *
11839  * Context:
11840  *	Interrupt or Kernel context, no mailbox commands allowed.
11841  */
11842 int
11843 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11844 {
11845 	clock_t			timer;
11846 	mbx_cmd_t		mc;
11847 	mbx_cmd_t		*mcp = &mc;
11848 	int			rval = QL_SUCCESS;
11849 	ql_adapter_state_t	*ha = vha->pha;
11850 
11851 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11852 
11853 	if (CFG_IST(ha, CFG_CTRL_8021)) {
11854 		EL(ha, "8021 not supported\n");
11855 		return (QL_NOT_SUPPORTED);
11856 	}
11857 
11858 	QL_DUMP_LOCK(ha);
11859 
11860 	if (ha->ql_dump_state & QL_DUMPING ||
11861 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11862 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11863 		EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11864 		QL_DUMP_UNLOCK(ha);
11865 		return (QL_DATA_EXISTS);
11866 	}
11867 
11868 	ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11869 	ha->ql_dump_state |= QL_DUMPING;
11870 
11871 	QL_DUMP_UNLOCK(ha);
11872 
11873 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11874 
11875 		/* Insert Time Stamp */
11876 		rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11877 		    FTO_INSERT_TIME_STAMP);
11878 		if (rval != QL_SUCCESS) {
11879 			EL(ha, "f/w extended trace insert"
11880 			    "time stamp failed: %xh\n", rval);
11881 		}
11882 	}
11883 
11884 	if (lock_needed == TRUE) {
11885 		/* Acquire mailbox register lock. */
11886 		MBX_REGISTER_LOCK(ha);
11887 		timer = (ha->mcp->timeout + 2) * drv_usectohz(1000000);
11888 
11889 		/* Check for mailbox available, if not wait for signal. */
11890 		while (ha->mailbox_flags & MBX_BUSY_FLG) {
11891 			ha->mailbox_flags = (uint8_t)
11892 			    (ha->mailbox_flags | MBX_WANT_FLG);
11893 
11894 			/* 30 seconds from now */
11895 			if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11896 			    timer, TR_CLOCK_TICK) == -1) {
11897 				/*
11898 				 * The timeout time 'timer' was
11899 				 * reached without the condition
11900 				 * being signaled.
11901 				 */
11902 
11903 				/* Release mailbox register lock. */
11904 				MBX_REGISTER_UNLOCK(ha);
11905 
11906 				EL(ha, "failed, rval = %xh\n",
11907 				    QL_FUNCTION_TIMEOUT);
11908 				return (QL_FUNCTION_TIMEOUT);
11909 			}
11910 		}
11911 
11912 		/* Set busy flag. */
11913 		ha->mailbox_flags = (uint8_t)
11914 		    (ha->mailbox_flags | MBX_BUSY_FLG);
11915 		mcp->timeout = 120;
11916 		ha->mcp = mcp;
11917 
11918 		/* Release mailbox register lock. */
11919 		MBX_REGISTER_UNLOCK(ha);
11920 	}
11921 
11922 	/* Free previous dump buffer. */
11923 	if (ha->ql_dump_ptr != NULL) {
11924 		kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11925 		ha->ql_dump_ptr = NULL;
11926 	}
11927 
11928 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11929 		ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11930 		    ha->fw_ext_memory_size);
11931 	} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11932 		ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11933 		    ha->fw_ext_memory_size);
11934 	} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11935 		ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
11936 		    ha->fw_ext_memory_size);
11937 	} else {
11938 		ha->ql_dump_size = sizeof (ql_fw_dump_t);
11939 	}
11940 
11941 	if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11942 	    NULL) {
11943 		rval = QL_MEMORY_ALLOC_FAILED;
11944 	} else {
11945 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11946 			rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11947 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11948 			rval = ql_81xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11949 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11950 			rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11951 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
11952 			rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11953 		} else {
11954 			rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11955 		}
11956 	}
11957 
11958 	/* Reset ISP chip. */
11959 	ql_reset_chip(ha);
11960 
11961 	QL_DUMP_LOCK(ha);
11962 
11963 	if (rval != QL_SUCCESS) {
11964 		if (ha->ql_dump_ptr != NULL) {
11965 			kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11966 			ha->ql_dump_ptr = NULL;
11967 		}
11968 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11969 		    QL_DUMP_UPLOADED);
11970 		EL(ha, "failed, rval = %xh\n", rval);
11971 	} else {
11972 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11973 		ha->ql_dump_state |= QL_DUMP_VALID;
11974 		EL(ha, "done\n");
11975 	}
11976 
11977 	QL_DUMP_UNLOCK(ha);
11978 
11979 	return (rval);
11980 }
11981 
11982 /*
11983  * ql_ascii_fw_dump
11984  *	Converts firmware binary dump to ascii.
11985  *
11986  * Input:
11987  *	ha = adapter state pointer.
11988  *	bptr = buffer pointer.
11989  *
11990  * Returns:
11991  *	Amount of data buffer used.
11992  *
11993  * Context:
11994  *	Kernel context.
11995  */
11996 size_t
11997 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11998 {
11999 	uint32_t		cnt;
12000 	caddr_t			bp;
12001 	int			mbox_cnt;
12002 	ql_adapter_state_t	*ha = vha->pha;
12003 	ql_fw_dump_t		*fw = ha->ql_dump_ptr;
12004 
12005 	if (CFG_IST(ha, CFG_CTRL_2422)) {
12006 		return (ql_24xx_ascii_fw_dump(ha, bufp));
12007 	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
12008 		return (ql_2581_ascii_fw_dump(ha, bufp));
12009 	}
12010 
12011 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12012 
12013 	if (CFG_IST(ha, CFG_CTRL_2300)) {
12014 		(void) sprintf(bufp, "\nISP 2300IP ");
12015 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
12016 		(void) sprintf(bufp, "\nISP 6322FLX ");
12017 	} else {
12018 		(void) sprintf(bufp, "\nISP 2200IP ");
12019 	}
12020 
12021 	bp = bufp + strlen(bufp);
12022 	(void) sprintf(bp, "Firmware Version %d.%d.%d\n",
12023 	    ha->fw_major_version, ha->fw_minor_version,
12024 	    ha->fw_subminor_version);
12025 
12026 	(void) strcat(bufp, "\nPBIU Registers:");
12027 	bp = bufp + strlen(bufp);
12028 	for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
12029 		if (cnt % 8 == 0) {
12030 			*bp++ = '\n';
12031 		}
12032 		(void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
12033 		bp = bp + 6;
12034 	}
12035 
12036 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12037 		(void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
12038 		    "registers:");
12039 		bp = bufp + strlen(bufp);
12040 		for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
12041 			if (cnt % 8 == 0) {
12042 				*bp++ = '\n';
12043 			}
12044 			(void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
12045 			bp = bp + 6;
12046 		}
12047 	}
12048 
12049 	(void) strcat(bp, "\n\nMailbox Registers:");
12050 	bp = bufp + strlen(bufp);
12051 	mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
12052 	for (cnt = 0; cnt < mbox_cnt; cnt++) {
12053 		if (cnt % 8 == 0) {
12054 			*bp++ = '\n';
12055 		}
12056 		(void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
12057 		bp = bp + 6;
12058 	}
12059 
12060 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12061 		(void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
12062 		bp = bufp + strlen(bufp);
12063 		for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
12064 			if (cnt % 8 == 0) {
12065 				*bp++ = '\n';
12066 			}
12067 			(void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
12068 			bp = bp + 6;
12069 		}
12070 	}
12071 
12072 	(void) strcat(bp, "\n\nDMA Registers:");
12073 	bp = bufp + strlen(bufp);
12074 	for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
12075 		if (cnt % 8 == 0) {
12076 			*bp++ = '\n';
12077 		}
12078 		(void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
12079 		bp = bp + 6;
12080 	}
12081 
12082 	(void) strcat(bp, "\n\nRISC Hardware Registers:");
12083 	bp = bufp + strlen(bufp);
12084 	for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
12085 		if (cnt % 8 == 0) {
12086 			*bp++ = '\n';
12087 		}
12088 		(void) sprintf(bp, "%04x  ", fw->risc_hdw_reg[cnt]);
12089 		bp = bp + 6;
12090 	}
12091 
12092 	(void) strcat(bp, "\n\nRISC GP0 Registers:");
12093 	bp = bufp + strlen(bufp);
12094 	for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
12095 		if (cnt % 8 == 0) {
12096 			*bp++ = '\n';
12097 		}
12098 		(void) sprintf(bp, "%04x  ", fw->risc_gp0_reg[cnt]);
12099 		bp = bp + 6;
12100 	}
12101 
12102 	(void) strcat(bp, "\n\nRISC GP1 Registers:");
12103 	bp = bufp + strlen(bufp);
12104 	for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
12105 		if (cnt % 8 == 0) {
12106 			*bp++ = '\n';
12107 		}
12108 		(void) sprintf(bp, "%04x  ", fw->risc_gp1_reg[cnt]);
12109 		bp = bp + 6;
12110 	}
12111 
12112 	(void) strcat(bp, "\n\nRISC GP2 Registers:");
12113 	bp = bufp + strlen(bufp);
12114 	for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
12115 		if (cnt % 8 == 0) {
12116 			*bp++ = '\n';
12117 		}
12118 		(void) sprintf(bp, "%04x  ", fw->risc_gp2_reg[cnt]);
12119 		bp = bp + 6;
12120 	}
12121 
12122 	(void) strcat(bp, "\n\nRISC GP3 Registers:");
12123 	bp = bufp + strlen(bufp);
12124 	for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
12125 		if (cnt % 8 == 0) {
12126 			*bp++ = '\n';
12127 		}
12128 		(void) sprintf(bp, "%04x  ", fw->risc_gp3_reg[cnt]);
12129 		bp = bp + 6;
12130 	}
12131 
12132 	(void) strcat(bp, "\n\nRISC GP4 Registers:");
12133 	bp = bufp + strlen(bufp);
12134 	for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
12135 		if (cnt % 8 == 0) {
12136 			*bp++ = '\n';
12137 		}
12138 		(void) sprintf(bp, "%04x  ", fw->risc_gp4_reg[cnt]);
12139 		bp = bp + 6;
12140 	}
12141 
12142 	(void) strcat(bp, "\n\nRISC GP5 Registers:");
12143 	bp = bufp + strlen(bufp);
12144 	for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
12145 		if (cnt % 8 == 0) {
12146 			*bp++ = '\n';
12147 		}
12148 		(void) sprintf(bp, "%04x  ", fw->risc_gp5_reg[cnt]);
12149 		bp = bp + 6;
12150 	}
12151 
12152 	(void) strcat(bp, "\n\nRISC GP6 Registers:");
12153 	bp = bufp + strlen(bufp);
12154 	for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
12155 		if (cnt % 8 == 0) {
12156 			*bp++ = '\n';
12157 		}
12158 		(void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
12159 		bp = bp + 6;
12160 	}
12161 
12162 	(void) strcat(bp, "\n\nRISC GP7 Registers:");
12163 	bp = bufp + strlen(bufp);
12164 	for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
12165 		if (cnt % 8 == 0) {
12166 			*bp++ = '\n';
12167 		}
12168 		(void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
12169 		bp = bp + 6;
12170 	}
12171 
12172 	(void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
12173 	bp = bufp + strlen(bufp);
12174 	for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
12175 		if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
12176 		    CFG_CTRL_6322)) == 0))) {
12177 			break;
12178 		}
12179 		if (cnt % 8 == 0) {
12180 			*bp++ = '\n';
12181 		}
12182 		(void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
12183 		bp = bp + 6;
12184 	}
12185 
12186 	(void) strcat(bp, "\n\nFPM B0 Registers:");
12187 	bp = bufp + strlen(bufp);
12188 	for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
12189 		if (cnt % 8 == 0) {
12190 			*bp++ = '\n';
12191 		}
12192 		(void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
12193 		bp = bp + 6;
12194 	}
12195 
12196 	(void) strcat(bp, "\n\nFPM B1 Registers:");
12197 	bp = bufp + strlen(bufp);
12198 	for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
12199 		if (cnt % 8 == 0) {
12200 			*bp++ = '\n';
12201 		}
12202 		(void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
12203 		bp = bp + 6;
12204 	}
12205 
12206 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12207 		(void) strcat(bp, "\n\nCode RAM Dump:");
12208 		bp = bufp + strlen(bufp);
12209 		for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
12210 			if (cnt % 8 == 0) {
12211 				(void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
12212 				bp = bp + 8;
12213 			}
12214 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
12215 			bp = bp + 6;
12216 		}
12217 
12218 		(void) strcat(bp, "\n\nStack RAM Dump:");
12219 		bp = bufp + strlen(bufp);
12220 		for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
12221 			if (cnt % 8 == 0) {
12222 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
12223 				bp = bp + 8;
12224 			}
12225 			(void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
12226 			bp = bp + 6;
12227 		}
12228 
12229 		(void) strcat(bp, "\n\nData RAM Dump:");
12230 		bp = bufp + strlen(bufp);
12231 		for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
12232 			if (cnt % 8 == 0) {
12233 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
12234 				bp = bp + 8;
12235 			}
12236 			(void) sprintf(bp, "%04x  ", fw->data_ram[cnt]);
12237 			bp = bp + 6;
12238 		}
12239 	} else {
12240 		(void) strcat(bp, "\n\nRISC SRAM:");
12241 		bp = bufp + strlen(bufp);
12242 		for (cnt = 0; cnt < 0xf000; cnt++) {
12243 			if (cnt % 8 == 0) {
12244 				(void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
12245 				bp = bp + 7;
12246 			}
12247 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
12248 			bp = bp + 6;
12249 		}
12250 	}
12251 
12252 	(void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
12253 	bp += strlen(bp);
12254 
12255 	(void) sprintf(bp, "\n\nRequest Queue");
12256 	bp += strlen(bp);
12257 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12258 		if (cnt % 8 == 0) {
12259 			(void) sprintf(bp, "\n%08x: ", cnt);
12260 			bp += strlen(bp);
12261 		}
12262 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12263 		bp += strlen(bp);
12264 	}
12265 
12266 	(void) sprintf(bp, "\n\nResponse Queue");
12267 	bp += strlen(bp);
12268 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12269 		if (cnt % 8 == 0) {
12270 			(void) sprintf(bp, "\n%08x: ", cnt);
12271 			bp += strlen(bp);
12272 		}
12273 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12274 		bp += strlen(bp);
12275 	}
12276 
12277 	(void) sprintf(bp, "\n");
12278 
12279 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12280 
12281 	return (strlen(bufp));
12282 }
12283 
12284 /*
12285  * ql_24xx_ascii_fw_dump
12286  *	Converts ISP24xx firmware binary dump to ascii.
12287  *
12288  * Input:
12289  *	ha = adapter state pointer.
12290  *	bptr = buffer pointer.
12291  *
12292  * Returns:
12293  *	Amount of data buffer used.
12294  *
12295  * Context:
12296  *	Kernel context.
12297  */
12298 static size_t
12299 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12300 {
12301 	uint32_t		cnt;
12302 	caddr_t			bp = bufp;
12303 	ql_24xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12304 
12305 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12306 
12307 	(void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12308 	    ha->fw_major_version, ha->fw_minor_version,
12309 	    ha->fw_subminor_version, ha->fw_attributes);
12310 	bp += strlen(bp);
12311 
12312 	(void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12313 
12314 	(void) strcat(bp, "\nHost Interface Registers");
12315 	bp += strlen(bp);
12316 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12317 		if (cnt % 8 == 0) {
12318 			(void) sprintf(bp++, "\n");
12319 		}
12320 
12321 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12322 		bp += 9;
12323 	}
12324 
12325 	(void) sprintf(bp, "\n\nMailbox Registers");
12326 	bp += strlen(bp);
12327 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12328 		if (cnt % 16 == 0) {
12329 			(void) sprintf(bp++, "\n");
12330 		}
12331 
12332 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12333 		bp += 5;
12334 	}
12335 
12336 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12337 	bp += strlen(bp);
12338 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12339 		if (cnt % 8 == 0) {
12340 			(void) sprintf(bp++, "\n");
12341 		}
12342 
12343 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12344 		bp += 9;
12345 	}
12346 
12347 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12348 	bp += strlen(bp);
12349 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12350 		if (cnt % 8 == 0) {
12351 			(void) sprintf(bp++, "\n");
12352 		}
12353 
12354 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12355 		bp += 9;
12356 	}
12357 
12358 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12359 	bp += strlen(bp);
12360 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12361 		if (cnt % 8 == 0) {
12362 			(void) sprintf(bp++, "\n");
12363 		}
12364 
12365 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12366 		bp += 9;
12367 	}
12368 
12369 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12370 	bp += strlen(bp);
12371 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12372 		if (cnt % 8 == 0) {
12373 			(void) sprintf(bp++, "\n");
12374 		}
12375 
12376 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12377 		bp += 9;
12378 	}
12379 
12380 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12381 	bp += strlen(bp);
12382 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12383 		if (cnt % 8 == 0) {
12384 			(void) sprintf(bp++, "\n");
12385 		}
12386 
12387 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12388 		bp += 9;
12389 	}
12390 
12391 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12392 	bp += strlen(bp);
12393 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12394 		if (cnt % 8 == 0) {
12395 			(void) sprintf(bp++, "\n");
12396 		}
12397 
12398 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12399 		bp += 9;
12400 	}
12401 
12402 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12403 	bp += strlen(bp);
12404 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12405 		if (cnt % 8 == 0) {
12406 			(void) sprintf(bp++, "\n");
12407 		}
12408 
12409 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12410 		bp += 9;
12411 	}
12412 
12413 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12414 	bp += strlen(bp);
12415 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12416 		if (cnt % 8 == 0) {
12417 			(void) sprintf(bp++, "\n");
12418 		}
12419 
12420 		(void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12421 		bp += 9;
12422 	}
12423 
12424 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12425 	bp += strlen(bp);
12426 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12427 		if (cnt % 8 == 0) {
12428 			(void) sprintf(bp++, "\n");
12429 		}
12430 
12431 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12432 		bp += 9;
12433 	}
12434 
12435 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12436 	bp += strlen(bp);
12437 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12438 		if (cnt % 8 == 0) {
12439 			(void) sprintf(bp++, "\n");
12440 		}
12441 
12442 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12443 		bp += 9;
12444 	}
12445 
12446 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12447 	bp += strlen(bp);
12448 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12449 		if (cnt % 8 == 0) {
12450 			(void) sprintf(bp++, "\n");
12451 		}
12452 
12453 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12454 		bp += 9;
12455 	}
12456 
12457 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12458 	bp += strlen(bp);
12459 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12460 		if (cnt % 8 == 0) {
12461 			(void) sprintf(bp++, "\n");
12462 		}
12463 
12464 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12465 		bp += 9;
12466 	}
12467 
12468 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12469 	bp += strlen(bp);
12470 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12471 		if (cnt % 8 == 0) {
12472 			(void) sprintf(bp++, "\n");
12473 		}
12474 
12475 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12476 		bp += 9;
12477 	}
12478 
12479 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12480 	bp += strlen(bp);
12481 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12482 		if (cnt % 8 == 0) {
12483 			(void) sprintf(bp++, "\n");
12484 		}
12485 
12486 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12487 		bp += 9;
12488 	}
12489 
12490 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12491 	bp += strlen(bp);
12492 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12493 		if (cnt % 8 == 0) {
12494 			(void) sprintf(bp++, "\n");
12495 		}
12496 
12497 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12498 		bp += 9;
12499 	}
12500 
12501 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12502 	bp += strlen(bp);
12503 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12504 		if (cnt % 8 == 0) {
12505 			(void) sprintf(bp++, "\n");
12506 		}
12507 
12508 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12509 		bp += 9;
12510 	}
12511 
12512 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12513 	bp += strlen(bp);
12514 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12515 		if (cnt % 8 == 0) {
12516 			(void) sprintf(bp++, "\n");
12517 		}
12518 
12519 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12520 		bp += 9;
12521 	}
12522 
12523 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12524 	bp += strlen(bp);
12525 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12526 		if (cnt % 8 == 0) {
12527 			(void) sprintf(bp++, "\n");
12528 		}
12529 
12530 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12531 		bp += 9;
12532 	}
12533 
12534 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12535 	bp += strlen(bp);
12536 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12537 		if (cnt % 8 == 0) {
12538 			(void) sprintf(bp++, "\n");
12539 		}
12540 
12541 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12542 		bp += 9;
12543 	}
12544 
12545 	(void) sprintf(bp, "\n\nRISC GP Registers");
12546 	bp += strlen(bp);
12547 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12548 		if (cnt % 8 == 0) {
12549 			(void) sprintf(bp++, "\n");
12550 		}
12551 
12552 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12553 		bp += 9;
12554 	}
12555 
12556 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12557 	bp += strlen(bp);
12558 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12559 		if (cnt % 8 == 0) {
12560 			(void) sprintf(bp++, "\n");
12561 		}
12562 
12563 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12564 		bp += 9;
12565 	}
12566 
12567 	(void) sprintf(bp, "\n\nLMC Registers");
12568 	bp += strlen(bp);
12569 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12570 		if (cnt % 8 == 0) {
12571 			(void) sprintf(bp++, "\n");
12572 		}
12573 
12574 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12575 		bp += 9;
12576 	}
12577 
12578 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12579 	bp += strlen(bp);
12580 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12581 		if (cnt % 8 == 0) {
12582 			(void) sprintf(bp++, "\n");
12583 		}
12584 
12585 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12586 		bp += 9;
12587 	}
12588 
12589 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12590 	bp += strlen(bp);
12591 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12592 		if (cnt % 8 == 0) {
12593 			(void) sprintf(bp++, "\n");
12594 		}
12595 
12596 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12597 		bp += 9;
12598 	}
12599 
12600 	(void) sprintf(bp, "\n\nCode RAM");
12601 	bp += strlen(bp);
12602 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12603 		if (cnt % 8 == 0) {
12604 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12605 			bp += 11;
12606 		}
12607 
12608 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12609 		bp += 9;
12610 	}
12611 
12612 	(void) sprintf(bp, "\n\nExternal Memory");
12613 	bp += strlen(bp);
12614 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12615 		if (cnt % 8 == 0) {
12616 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12617 			bp += 11;
12618 		}
12619 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12620 		bp += 9;
12621 	}
12622 
12623 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12624 	bp += strlen(bp);
12625 
12626 	(void) sprintf(bp, "\n\nRequest Queue");
12627 	bp += strlen(bp);
12628 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12629 		if (cnt % 8 == 0) {
12630 			(void) sprintf(bp, "\n%08x: ", cnt);
12631 			bp += strlen(bp);
12632 		}
12633 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12634 		bp += strlen(bp);
12635 	}
12636 
12637 	(void) sprintf(bp, "\n\nResponse Queue");
12638 	bp += strlen(bp);
12639 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12640 		if (cnt % 8 == 0) {
12641 			(void) sprintf(bp, "\n%08x: ", cnt);
12642 			bp += strlen(bp);
12643 		}
12644 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12645 		bp += strlen(bp);
12646 	}
12647 
12648 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12649 	    (ha->fwexttracebuf.bp != NULL)) {
12650 		uint32_t cnt_b = 0;
12651 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12652 
12653 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12654 		bp += strlen(bp);
12655 		/* show data address as a byte address, data as long words */
12656 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12657 			cnt_b = cnt * 4;
12658 			if (cnt_b % 32 == 0) {
12659 				(void) sprintf(bp, "\n%08x: ",
12660 				    (int)(w64 + cnt_b));
12661 				bp += 11;
12662 			}
12663 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12664 			bp += 9;
12665 		}
12666 	}
12667 
12668 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12669 	    (ha->fwfcetracebuf.bp != NULL)) {
12670 		uint32_t cnt_b = 0;
12671 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12672 
12673 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12674 		bp += strlen(bp);
12675 		/* show data address as a byte address, data as long words */
12676 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12677 			cnt_b = cnt * 4;
12678 			if (cnt_b % 32 == 0) {
12679 				(void) sprintf(bp, "\n%08x: ",
12680 				    (int)(w64 + cnt_b));
12681 				bp += 11;
12682 			}
12683 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12684 			bp += 9;
12685 		}
12686 	}
12687 
12688 	(void) sprintf(bp, "\n\n");
12689 	bp += strlen(bp);
12690 
12691 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12692 
12693 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12694 
12695 	return (cnt);
12696 }
12697 
12698 /*
12699  * ql_2581_ascii_fw_dump
12700  *	Converts ISP25xx or ISP81xx firmware binary dump to ascii.
12701  *
12702  * Input:
12703  *	ha = adapter state pointer.
12704  *	bptr = buffer pointer.
12705  *
12706  * Returns:
12707  *	Amount of data buffer used.
12708  *
12709  * Context:
12710  *	Kernel context.
12711  */
12712 static size_t
12713 ql_2581_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12714 {
12715 	uint32_t		cnt;
12716 	uint32_t		cnt1;
12717 	caddr_t			bp = bufp;
12718 	ql_25xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12719 
12720 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12721 
12722 	(void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12723 	    ha->fw_major_version, ha->fw_minor_version,
12724 	    ha->fw_subminor_version, ha->fw_attributes);
12725 	bp += strlen(bp);
12726 
12727 	(void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12728 	bp += strlen(bp);
12729 
12730 	(void) sprintf(bp, "\nHostRisc Registers");
12731 	bp += strlen(bp);
12732 	for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12733 		if (cnt % 8 == 0) {
12734 			(void) sprintf(bp++, "\n");
12735 		}
12736 		(void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12737 		bp += 9;
12738 	}
12739 
12740 	(void) sprintf(bp, "\n\nPCIe Registers");
12741 	bp += strlen(bp);
12742 	for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12743 		if (cnt % 8 == 0) {
12744 			(void) sprintf(bp++, "\n");
12745 		}
12746 		(void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12747 		bp += 9;
12748 	}
12749 
12750 	(void) strcat(bp, "\n\nHost Interface Registers");
12751 	bp += strlen(bp);
12752 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12753 		if (cnt % 8 == 0) {
12754 			(void) sprintf(bp++, "\n");
12755 		}
12756 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12757 		bp += 9;
12758 	}
12759 
12760 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12761 	bp += strlen(bp);
12762 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12763 		if (cnt % 8 == 0) {
12764 			(void) sprintf(bp++, "\n");
12765 		}
12766 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12767 		bp += 9;
12768 	}
12769 
12770 	(void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12771 	    fw->risc_io);
12772 	bp += strlen(bp);
12773 
12774 	(void) sprintf(bp, "\n\nMailbox Registers");
12775 	bp += strlen(bp);
12776 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12777 		if (cnt % 16 == 0) {
12778 			(void) sprintf(bp++, "\n");
12779 		}
12780 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12781 		bp += 5;
12782 	}
12783 
12784 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12785 	bp += strlen(bp);
12786 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12787 		if (cnt % 8 == 0) {
12788 			(void) sprintf(bp++, "\n");
12789 		}
12790 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12791 		bp += 9;
12792 	}
12793 
12794 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12795 	bp += strlen(bp);
12796 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12797 		if (cnt % 8 == 0) {
12798 			(void) sprintf(bp++, "\n");
12799 		}
12800 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12801 		bp += 9;
12802 	}
12803 
12804 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12805 	bp += strlen(bp);
12806 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12807 		if (cnt % 8 == 0) {
12808 			(void) sprintf(bp++, "\n");
12809 		}
12810 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12811 		bp += 9;
12812 	}
12813 
12814 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12815 	bp += strlen(bp);
12816 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12817 		if (cnt % 8 == 0) {
12818 			(void) sprintf(bp++, "\n");
12819 		}
12820 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12821 		bp += 9;
12822 	}
12823 
12824 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12825 	bp += strlen(bp);
12826 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12827 		if (cnt % 8 == 0) {
12828 			(void) sprintf(bp++, "\n");
12829 		}
12830 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12831 		bp += 9;
12832 	}
12833 
12834 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12835 	bp += strlen(bp);
12836 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12837 		if (cnt % 8 == 0) {
12838 			(void) sprintf(bp++, "\n");
12839 		}
12840 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12841 		bp += 9;
12842 	}
12843 
12844 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12845 	bp += strlen(bp);
12846 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12847 		if (cnt % 8 == 0) {
12848 			(void) sprintf(bp++, "\n");
12849 		}
12850 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12851 		bp += 9;
12852 	}
12853 
12854 	(void) sprintf(bp, "\n\nASEQ GP Registers");
12855 	bp += strlen(bp);
12856 	for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12857 		if (cnt % 8 == 0) {
12858 			(void) sprintf(bp++, "\n");
12859 		}
12860 		(void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12861 		bp += 9;
12862 	}
12863 
12864 	(void) sprintf(bp, "\n\nASEQ-0 Registers");
12865 	bp += strlen(bp);
12866 	for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12867 		if (cnt % 8 == 0) {
12868 			(void) sprintf(bp++, "\n");
12869 		}
12870 		(void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12871 		bp += 9;
12872 	}
12873 
12874 	(void) sprintf(bp, "\n\nASEQ-1 Registers");
12875 	bp += strlen(bp);
12876 	for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12877 		if (cnt % 8 == 0) {
12878 			(void) sprintf(bp++, "\n");
12879 		}
12880 		(void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12881 		bp += 9;
12882 	}
12883 
12884 	(void) sprintf(bp, "\n\nASEQ-2 Registers");
12885 	bp += strlen(bp);
12886 	for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12887 		if (cnt % 8 == 0) {
12888 			(void) sprintf(bp++, "\n");
12889 		}
12890 		(void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12891 		bp += 9;
12892 	}
12893 
12894 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12895 	bp += strlen(bp);
12896 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12897 		if (cnt % 8 == 0) {
12898 			(void) sprintf(bp++, "\n");
12899 		}
12900 		(void)  sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12901 		bp += 9;
12902 	}
12903 
12904 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12905 	bp += strlen(bp);
12906 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12907 		if (cnt % 8 == 0) {
12908 			(void) sprintf(bp++, "\n");
12909 		}
12910 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12911 		bp += 9;
12912 	}
12913 
12914 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12915 	bp += strlen(bp);
12916 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12917 		if (cnt % 8 == 0) {
12918 			(void) sprintf(bp++, "\n");
12919 		}
12920 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12921 		bp += 9;
12922 	}
12923 
12924 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12925 	bp += strlen(bp);
12926 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12927 		if (cnt % 8 == 0) {
12928 			(void) sprintf(bp++, "\n");
12929 		}
12930 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12931 		bp += 9;
12932 	}
12933 
12934 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12935 	bp += strlen(bp);
12936 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12937 		if (cnt % 8 == 0) {
12938 			(void) sprintf(bp++, "\n");
12939 		}
12940 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12941 		bp += 9;
12942 	}
12943 
12944 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12945 	bp += strlen(bp);
12946 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12947 		if (cnt % 8 == 0) {
12948 			(void) sprintf(bp++, "\n");
12949 		}
12950 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12951 		bp += 9;
12952 	}
12953 
12954 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12955 	bp += strlen(bp);
12956 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12957 		if (cnt % 8 == 0) {
12958 			(void) sprintf(bp++, "\n");
12959 		}
12960 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12961 		bp += 9;
12962 	}
12963 
12964 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12965 	bp += strlen(bp);
12966 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12967 		if (cnt % 8 == 0) {
12968 			(void) sprintf(bp++, "\n");
12969 		}
12970 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12971 		bp += 9;
12972 	}
12973 
12974 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12975 	bp += strlen(bp);
12976 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12977 		if (cnt % 8 == 0) {
12978 			(void) sprintf(bp++, "\n");
12979 		}
12980 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12981 		bp += 9;
12982 	}
12983 
12984 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12985 	bp += strlen(bp);
12986 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12987 		if (cnt % 8 == 0) {
12988 			(void) sprintf(bp++, "\n");
12989 		}
12990 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12991 		bp += 9;
12992 	}
12993 
12994 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12995 	bp += strlen(bp);
12996 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12997 		if (cnt % 8 == 0) {
12998 			(void) sprintf(bp++, "\n");
12999 		}
13000 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
13001 		bp += 9;
13002 	}
13003 
13004 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
13005 	bp += strlen(bp);
13006 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
13007 		if (cnt % 8 == 0) {
13008 			(void) sprintf(bp++, "\n");
13009 		}
13010 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
13011 		bp += 9;
13012 	}
13013 
13014 	(void) sprintf(bp, "\n\nRISC GP Registers");
13015 	bp += strlen(bp);
13016 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13017 		if (cnt % 8 == 0) {
13018 			(void) sprintf(bp++, "\n");
13019 		}
13020 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13021 		bp += 9;
13022 	}
13023 
13024 	(void) sprintf(bp, "\n\nLMC Registers");
13025 	bp += strlen(bp);
13026 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13027 		if (cnt % 8 == 0) {
13028 			(void) sprintf(bp++, "\n");
13029 		}
13030 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13031 		bp += 9;
13032 	}
13033 
13034 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
13035 	bp += strlen(bp);
13036 	cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13037 	    (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fpm_hdw_reg)) :
13038 	    (uint32_t)(sizeof (fw->fpm_hdw_reg));
13039 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13040 		if (cnt % 8 == 0) {
13041 			(void) sprintf(bp++, "\n");
13042 		}
13043 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13044 		bp += 9;
13045 	}
13046 
13047 	(void) sprintf(bp, "\n\nFB Hardware Registers");
13048 	bp += strlen(bp);
13049 	cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13050 	    (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fb_hdw_reg)) :
13051 	    (uint32_t)(sizeof (fw->fb_hdw_reg));
13052 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13053 		if (cnt % 8 == 0) {
13054 			(void) sprintf(bp++, "\n");
13055 		}
13056 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
13057 		bp += 9;
13058 	}
13059 
13060 	(void) sprintf(bp, "\n\nCode RAM");
13061 	bp += strlen(bp);
13062 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
13063 		if (cnt % 8 == 0) {
13064 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13065 			bp += 11;
13066 		}
13067 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13068 		bp += 9;
13069 	}
13070 
13071 	(void) sprintf(bp, "\n\nExternal Memory");
13072 	bp += strlen(bp);
13073 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13074 		if (cnt % 8 == 0) {
13075 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13076 			bp += 11;
13077 		}
13078 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
13079 		bp += 9;
13080 	}
13081 
13082 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13083 	bp += strlen(bp);
13084 
13085 	(void) sprintf(bp, "\n\nRequest Queue");
13086 	bp += strlen(bp);
13087 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
13088 		if (cnt % 8 == 0) {
13089 			(void) sprintf(bp, "\n%08x: ", cnt);
13090 			bp += strlen(bp);
13091 		}
13092 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
13093 		bp += strlen(bp);
13094 	}
13095 
13096 	(void) sprintf(bp, "\n\nResponse Queue");
13097 	bp += strlen(bp);
13098 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
13099 		if (cnt % 8 == 0) {
13100 			(void) sprintf(bp, "\n%08x: ", cnt);
13101 			bp += strlen(bp);
13102 		}
13103 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
13104 		bp += strlen(bp);
13105 	}
13106 
13107 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13108 	    (ha->fwexttracebuf.bp != NULL)) {
13109 		uint32_t cnt_b = 0;
13110 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13111 
13112 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13113 		bp += strlen(bp);
13114 		/* show data address as a byte address, data as long words */
13115 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13116 			cnt_b = cnt * 4;
13117 			if (cnt_b % 32 == 0) {
13118 				(void) sprintf(bp, "\n%08x: ",
13119 				    (int)(w64 + cnt_b));
13120 				bp += 11;
13121 			}
13122 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13123 			bp += 9;
13124 		}
13125 	}
13126 
13127 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13128 	    (ha->fwfcetracebuf.bp != NULL)) {
13129 		uint32_t cnt_b = 0;
13130 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13131 
13132 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13133 		bp += strlen(bp);
13134 		/* show data address as a byte address, data as long words */
13135 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13136 			cnt_b = cnt * 4;
13137 			if (cnt_b % 32 == 0) {
13138 				(void) sprintf(bp, "\n%08x: ",
13139 				    (int)(w64 + cnt_b));
13140 				bp += 11;
13141 			}
13142 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13143 			bp += 9;
13144 		}
13145 	}
13146 
13147 	(void) sprintf(bp, "\n\n");
13148 	bp += strlen(bp);
13149 
13150 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13151 
13152 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
13153 
13154 	return (cnt);
13155 }
13156 
13157 /*
13158  * ql_2200_binary_fw_dump
13159  *
13160  * Input:
13161  *	ha:	adapter state pointer.
13162  *	fw:	firmware dump context pointer.
13163  *
13164  * Returns:
13165  *	ql local function return status code.
13166  *
13167  * Context:
13168  *	Interrupt or Kernel context, no mailbox commands allowed.
13169  */
13170 static int
13171 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13172 {
13173 	uint32_t	cnt;
13174 	uint16_t	risc_address;
13175 	clock_t		timer;
13176 	mbx_cmd_t	mc;
13177 	mbx_cmd_t	*mcp = &mc;
13178 	int		rval = QL_SUCCESS;
13179 
13180 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13181 
13182 	/* Disable ISP interrupts. */
13183 	WRT16_IO_REG(ha, ictrl, 0);
13184 	ADAPTER_STATE_LOCK(ha);
13185 	ha->flags &= ~INTERRUPTS_ENABLED;
13186 	ADAPTER_STATE_UNLOCK(ha);
13187 
13188 	/* Release mailbox registers. */
13189 	WRT16_IO_REG(ha, semaphore, 0);
13190 
13191 	/* Pause RISC. */
13192 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13193 	timer = 30000;
13194 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13195 		if (timer-- != 0) {
13196 			drv_usecwait(MILLISEC);
13197 		} else {
13198 			rval = QL_FUNCTION_TIMEOUT;
13199 			break;
13200 		}
13201 	}
13202 
13203 	if (rval == QL_SUCCESS) {
13204 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13205 		    sizeof (fw->pbiu_reg) / 2, 16);
13206 
13207 		/* In 2200 we only read 8 mailboxes */
13208 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
13209 		    8, 16);
13210 
13211 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
13212 		    sizeof (fw->dma_reg) / 2, 16);
13213 
13214 		WRT16_IO_REG(ha, ctrl_status, 0);
13215 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13216 		    sizeof (fw->risc_hdw_reg) / 2, 16);
13217 
13218 		WRT16_IO_REG(ha, pcr, 0x2000);
13219 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13220 		    sizeof (fw->risc_gp0_reg) / 2, 16);
13221 
13222 		WRT16_IO_REG(ha, pcr, 0x2100);
13223 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13224 		    sizeof (fw->risc_gp1_reg) / 2, 16);
13225 
13226 		WRT16_IO_REG(ha, pcr, 0x2200);
13227 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13228 		    sizeof (fw->risc_gp2_reg) / 2, 16);
13229 
13230 		WRT16_IO_REG(ha, pcr, 0x2300);
13231 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13232 		    sizeof (fw->risc_gp3_reg) / 2, 16);
13233 
13234 		WRT16_IO_REG(ha, pcr, 0x2400);
13235 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13236 		    sizeof (fw->risc_gp4_reg) / 2, 16);
13237 
13238 		WRT16_IO_REG(ha, pcr, 0x2500);
13239 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13240 		    sizeof (fw->risc_gp5_reg) / 2, 16);
13241 
13242 		WRT16_IO_REG(ha, pcr, 0x2600);
13243 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13244 		    sizeof (fw->risc_gp6_reg) / 2, 16);
13245 
13246 		WRT16_IO_REG(ha, pcr, 0x2700);
13247 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13248 		    sizeof (fw->risc_gp7_reg) / 2, 16);
13249 
13250 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13251 		/* 2200 has only 16 registers */
13252 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13253 		    ha->iobase + 0x80, 16, 16);
13254 
13255 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13256 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13257 		    sizeof (fw->fpm_b0_reg) / 2, 16);
13258 
13259 		WRT16_IO_REG(ha, ctrl_status, 0x30);
13260 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13261 		    sizeof (fw->fpm_b1_reg) / 2, 16);
13262 
13263 		/* Select FPM registers. */
13264 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13265 
13266 		/* FPM Soft Reset. */
13267 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13268 
13269 		/* Select frame buffer registers. */
13270 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13271 
13272 		/* Reset frame buffer FIFOs. */
13273 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
13274 
13275 		/* Select RISC module registers. */
13276 		WRT16_IO_REG(ha, ctrl_status, 0);
13277 
13278 		/* Reset RISC module. */
13279 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13280 
13281 		/* Reset ISP semaphore. */
13282 		WRT16_IO_REG(ha, semaphore, 0);
13283 
13284 		/* Release RISC module. */
13285 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13286 
13287 		/* Wait for RISC to recover from reset. */
13288 		timer = 30000;
13289 		while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13290 			if (timer-- != 0) {
13291 				drv_usecwait(MILLISEC);
13292 			} else {
13293 				rval = QL_FUNCTION_TIMEOUT;
13294 				break;
13295 			}
13296 		}
13297 
13298 		/* Disable RISC pause on FPM parity error. */
13299 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13300 	}
13301 
13302 	if (rval == QL_SUCCESS) {
13303 		/* Pause RISC. */
13304 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13305 		timer = 30000;
13306 		while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13307 			if (timer-- != 0) {
13308 				drv_usecwait(MILLISEC);
13309 			} else {
13310 				rval = QL_FUNCTION_TIMEOUT;
13311 				break;
13312 			}
13313 		}
13314 	}
13315 
13316 	if (rval == QL_SUCCESS) {
13317 		/* Set memory configuration and timing. */
13318 		WRT16_IO_REG(ha, mctr, 0xf2);
13319 
13320 		/* Release RISC. */
13321 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13322 
13323 		/* Get RISC SRAM. */
13324 		risc_address = 0x1000;
13325 		WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_WORD);
13326 		for (cnt = 0; cnt < 0xf000; cnt++) {
13327 			WRT16_IO_REG(ha, mailbox_in[1], risc_address++);
13328 			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13329 			for (timer = 6000000; timer != 0; timer--) {
13330 				/* Check for pending interrupts. */
13331 				if (INTERRUPT_PENDING(ha)) {
13332 					if (RD16_IO_REG(ha, semaphore) &
13333 					    BIT_0) {
13334 						WRT16_IO_REG(ha, hccr,
13335 						    HC_CLR_RISC_INT);
13336 						mcp->mb[0] = RD16_IO_REG(ha,
13337 						    mailbox_out[0]);
13338 						fw->risc_ram[cnt] =
13339 						    RD16_IO_REG(ha,
13340 						    mailbox_out[2]);
13341 						WRT16_IO_REG(ha,
13342 						    semaphore, 0);
13343 						break;
13344 					}
13345 					WRT16_IO_REG(ha, hccr,
13346 					    HC_CLR_RISC_INT);
13347 				}
13348 				drv_usecwait(5);
13349 			}
13350 
13351 			if (timer == 0) {
13352 				rval = QL_FUNCTION_TIMEOUT;
13353 			} else {
13354 				rval = mcp->mb[0];
13355 			}
13356 
13357 			if (rval != QL_SUCCESS) {
13358 				break;
13359 			}
13360 		}
13361 	}
13362 
13363 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13364 
13365 	return (rval);
13366 }
13367 
13368 /*
13369  * ql_2300_binary_fw_dump
13370  *
13371  * Input:
13372  *	ha:	adapter state pointer.
13373  *	fw:	firmware dump context pointer.
13374  *
13375  * Returns:
13376  *	ql local function return status code.
13377  *
13378  * Context:
13379  *	Interrupt or Kernel context, no mailbox commands allowed.
13380  */
13381 static int
13382 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13383 {
13384 	clock_t	timer;
13385 	int	rval = QL_SUCCESS;
13386 
13387 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13388 
13389 	/* Disable ISP interrupts. */
13390 	WRT16_IO_REG(ha, ictrl, 0);
13391 	ADAPTER_STATE_LOCK(ha);
13392 	ha->flags &= ~INTERRUPTS_ENABLED;
13393 	ADAPTER_STATE_UNLOCK(ha);
13394 
13395 	/* Release mailbox registers. */
13396 	WRT16_IO_REG(ha, semaphore, 0);
13397 
13398 	/* Pause RISC. */
13399 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13400 	timer = 30000;
13401 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13402 		if (timer-- != 0) {
13403 			drv_usecwait(MILLISEC);
13404 		} else {
13405 			rval = QL_FUNCTION_TIMEOUT;
13406 			break;
13407 		}
13408 	}
13409 
13410 	if (rval == QL_SUCCESS) {
13411 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13412 		    sizeof (fw->pbiu_reg) / 2, 16);
13413 
13414 		(void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13415 		    sizeof (fw->risc_host_reg) / 2, 16);
13416 
13417 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13418 		    sizeof (fw->mailbox_reg) / 2, 16);
13419 
13420 		WRT16_IO_REG(ha, ctrl_status, 0x40);
13421 		(void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13422 		    sizeof (fw->resp_dma_reg) / 2, 16);
13423 
13424 		WRT16_IO_REG(ha, ctrl_status, 0x50);
13425 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13426 		    sizeof (fw->dma_reg) / 2, 16);
13427 
13428 		WRT16_IO_REG(ha, ctrl_status, 0);
13429 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13430 		    sizeof (fw->risc_hdw_reg) / 2, 16);
13431 
13432 		WRT16_IO_REG(ha, pcr, 0x2000);
13433 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13434 		    sizeof (fw->risc_gp0_reg) / 2, 16);
13435 
13436 		WRT16_IO_REG(ha, pcr, 0x2200);
13437 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13438 		    sizeof (fw->risc_gp1_reg) / 2, 16);
13439 
13440 		WRT16_IO_REG(ha, pcr, 0x2400);
13441 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13442 		    sizeof (fw->risc_gp2_reg) / 2, 16);
13443 
13444 		WRT16_IO_REG(ha, pcr, 0x2600);
13445 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13446 		    sizeof (fw->risc_gp3_reg) / 2, 16);
13447 
13448 		WRT16_IO_REG(ha, pcr, 0x2800);
13449 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13450 		    sizeof (fw->risc_gp4_reg) / 2, 16);
13451 
13452 		WRT16_IO_REG(ha, pcr, 0x2A00);
13453 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13454 		    sizeof (fw->risc_gp5_reg) / 2, 16);
13455 
13456 		WRT16_IO_REG(ha, pcr, 0x2C00);
13457 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13458 		    sizeof (fw->risc_gp6_reg) / 2, 16);
13459 
13460 		WRT16_IO_REG(ha, pcr, 0x2E00);
13461 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13462 		    sizeof (fw->risc_gp7_reg) / 2, 16);
13463 
13464 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13465 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13466 		    ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13467 
13468 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13469 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13470 		    sizeof (fw->fpm_b0_reg) / 2, 16);
13471 
13472 		WRT16_IO_REG(ha, ctrl_status, 0x30);
13473 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13474 		    sizeof (fw->fpm_b1_reg) / 2, 16);
13475 
13476 		/* Select FPM registers. */
13477 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13478 
13479 		/* FPM Soft Reset. */
13480 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13481 
13482 		/* Select frame buffer registers. */
13483 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13484 
13485 		/* Reset frame buffer FIFOs. */
13486 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
13487 
13488 		/* Select RISC module registers. */
13489 		WRT16_IO_REG(ha, ctrl_status, 0);
13490 
13491 		/* Reset RISC module. */
13492 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13493 
13494 		/* Reset ISP semaphore. */
13495 		WRT16_IO_REG(ha, semaphore, 0);
13496 
13497 		/* Release RISC module. */
13498 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13499 
13500 		/* Wait for RISC to recover from reset. */
13501 		timer = 30000;
13502 		while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13503 			if (timer-- != 0) {
13504 				drv_usecwait(MILLISEC);
13505 			} else {
13506 				rval = QL_FUNCTION_TIMEOUT;
13507 				break;
13508 			}
13509 		}
13510 
13511 		/* Disable RISC pause on FPM parity error. */
13512 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13513 	}
13514 
13515 	/* Get RISC SRAM. */
13516 	if (rval == QL_SUCCESS) {
13517 		rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13518 	}
13519 	/* Get STACK SRAM. */
13520 	if (rval == QL_SUCCESS) {
13521 		rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13522 	}
13523 	/* Get DATA SRAM. */
13524 	if (rval == QL_SUCCESS) {
13525 		rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13526 	}
13527 
13528 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13529 
13530 	return (rval);
13531 }
13532 
13533 /*
13534  * ql_24xx_binary_fw_dump
13535  *
13536  * Input:
13537  *	ha:	adapter state pointer.
13538  *	fw:	firmware dump context pointer.
13539  *
13540  * Returns:
13541  *	ql local function return status code.
13542  *
13543  * Context:
13544  *	Interrupt or Kernel context, no mailbox commands allowed.
13545  */
13546 static int
13547 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13548 {
13549 	uint32_t	*reg32;
13550 	void		*bp;
13551 	clock_t		timer;
13552 	int		rval = QL_SUCCESS;
13553 
13554 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13555 
13556 	fw->hccr = RD32_IO_REG(ha, hccr);
13557 
13558 	/* Pause RISC. */
13559 	if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13560 		/* Disable ISP interrupts. */
13561 		WRT16_IO_REG(ha, ictrl, 0);
13562 
13563 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13564 		for (timer = 30000;
13565 		    (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13566 		    rval == QL_SUCCESS; timer--) {
13567 			if (timer) {
13568 				drv_usecwait(100);
13569 			} else {
13570 				rval = QL_FUNCTION_TIMEOUT;
13571 			}
13572 		}
13573 	}
13574 
13575 	if (rval == QL_SUCCESS) {
13576 		/* Host interface registers. */
13577 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13578 		    sizeof (fw->host_reg) / 4, 32);
13579 
13580 		/* Disable ISP interrupts. */
13581 		WRT32_IO_REG(ha, ictrl, 0);
13582 		RD32_IO_REG(ha, ictrl);
13583 		ADAPTER_STATE_LOCK(ha);
13584 		ha->flags &= ~INTERRUPTS_ENABLED;
13585 		ADAPTER_STATE_UNLOCK(ha);
13586 
13587 		/* Shadow registers. */
13588 
13589 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13590 		RD32_IO_REG(ha, io_base_addr);
13591 
13592 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13593 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13594 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13595 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13596 
13597 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13598 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13599 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13600 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13601 
13602 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13603 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13604 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13605 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13606 
13607 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13608 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13609 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13610 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13611 
13612 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13613 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13614 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13615 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13616 
13617 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13618 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13619 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13620 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13621 
13622 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13623 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13624 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13625 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13626 
13627 		/* Mailbox registers. */
13628 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13629 		    sizeof (fw->mailbox_reg) / 2, 16);
13630 
13631 		/* Transfer sequence registers. */
13632 
13633 		/* XSEQ GP */
13634 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13635 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13636 		    16, 32);
13637 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13638 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13639 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13640 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13641 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13642 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13643 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13644 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13645 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13646 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13647 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13648 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13649 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13650 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13651 
13652 		/* XSEQ-0 */
13653 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13654 		(void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13655 		    sizeof (fw->xseq_0_reg) / 4, 32);
13656 
13657 		/* XSEQ-1 */
13658 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13659 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13660 		    sizeof (fw->xseq_1_reg) / 4, 32);
13661 
13662 		/* Receive sequence registers. */
13663 
13664 		/* RSEQ GP */
13665 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13666 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13667 		    16, 32);
13668 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13669 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13670 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13671 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13672 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13673 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13674 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13675 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13676 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13677 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13678 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13679 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13680 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13681 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13682 
13683 		/* RSEQ-0 */
13684 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13685 		(void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13686 		    sizeof (fw->rseq_0_reg) / 4, 32);
13687 
13688 		/* RSEQ-1 */
13689 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13690 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13691 		    sizeof (fw->rseq_1_reg) / 4, 32);
13692 
13693 		/* RSEQ-2 */
13694 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13695 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13696 		    sizeof (fw->rseq_2_reg) / 4, 32);
13697 
13698 		/* Command DMA registers. */
13699 
13700 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13701 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13702 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13703 
13704 		/* Queues. */
13705 
13706 		/* RequestQ0 */
13707 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13708 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13709 		    8, 32);
13710 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13711 
13712 		/* ResponseQ0 */
13713 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13714 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13715 		    8, 32);
13716 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13717 
13718 		/* RequestQ1 */
13719 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13720 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13721 		    8, 32);
13722 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13723 
13724 		/* Transmit DMA registers. */
13725 
13726 		/* XMT0 */
13727 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13728 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13729 		    16, 32);
13730 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13731 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13732 
13733 		/* XMT1 */
13734 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13735 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13736 		    16, 32);
13737 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13738 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13739 
13740 		/* XMT2 */
13741 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13742 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13743 		    16, 32);
13744 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13745 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13746 
13747 		/* XMT3 */
13748 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13749 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13750 		    16, 32);
13751 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13752 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13753 
13754 		/* XMT4 */
13755 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13756 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13757 		    16, 32);
13758 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13759 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13760 
13761 		/* XMT Common */
13762 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13763 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13764 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13765 
13766 		/* Receive DMA registers. */
13767 
13768 		/* RCVThread0 */
13769 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
13770 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13771 		    ha->iobase + 0xC0, 16, 32);
13772 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
13773 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13774 
13775 		/* RCVThread1 */
13776 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
13777 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13778 		    ha->iobase + 0xC0, 16, 32);
13779 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
13780 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13781 
13782 		/* RISC registers. */
13783 
13784 		/* RISC GP */
13785 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13786 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13787 		    16, 32);
13788 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13789 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13790 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13791 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13792 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13793 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13794 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13795 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13796 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13797 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13798 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13799 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13800 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13801 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13802 
13803 		/* Local memory controller registers. */
13804 
13805 		/* LMC */
13806 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
13807 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13808 		    16, 32);
13809 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
13810 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13811 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
13812 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13813 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
13814 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13815 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
13816 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13817 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
13818 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13819 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
13820 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13821 
13822 		/* Fibre Protocol Module registers. */
13823 
13824 		/* FPM hardware */
13825 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
13826 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13827 		    16, 32);
13828 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
13829 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13830 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
13831 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13832 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
13833 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13834 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
13835 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13836 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
13837 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13838 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
13839 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13840 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
13841 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13842 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
13843 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13844 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
13845 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13846 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13847 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13848 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13849 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13850 
13851 		/* Frame Buffer registers. */
13852 
13853 		/* FB hardware */
13854 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
13855 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13856 		    16, 32);
13857 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
13858 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13859 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
13860 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13861 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
13862 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13863 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
13864 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13865 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
13866 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13867 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
13868 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13869 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
13870 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13871 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
13872 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13873 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
13874 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13875 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13876 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13877 	}
13878 
13879 	/* Get the request queue */
13880 	if (rval == QL_SUCCESS) {
13881 		uint32_t	cnt;
13882 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
13883 
13884 		/* Sync DMA buffer. */
13885 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13886 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13887 		    DDI_DMA_SYNC_FORKERNEL);
13888 
13889 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13890 			fw->req_q[cnt] = *w32++;
13891 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13892 		}
13893 	}
13894 
13895 	/* Get the response queue */
13896 	if (rval == QL_SUCCESS) {
13897 		uint32_t	cnt;
13898 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
13899 
13900 		/* Sync DMA buffer. */
13901 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13902 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13903 		    DDI_DMA_SYNC_FORKERNEL);
13904 
13905 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13906 			fw->rsp_q[cnt] = *w32++;
13907 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13908 		}
13909 	}
13910 
13911 	/* Reset RISC. */
13912 	ql_reset_chip(ha);
13913 
13914 	/* Memory. */
13915 	if (rval == QL_SUCCESS) {
13916 		/* Code RAM. */
13917 		rval = ql_read_risc_ram(ha, 0x20000,
13918 		    sizeof (fw->code_ram) / 4, fw->code_ram);
13919 	}
13920 	if (rval == QL_SUCCESS) {
13921 		/* External Memory. */
13922 		rval = ql_read_risc_ram(ha, 0x100000,
13923 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
13924 	}
13925 
13926 	/* Get the extended trace buffer */
13927 	if (rval == QL_SUCCESS) {
13928 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13929 		    (ha->fwexttracebuf.bp != NULL)) {
13930 			uint32_t	cnt;
13931 			uint32_t	*w32 = ha->fwexttracebuf.bp;
13932 
13933 			/* Sync DMA buffer. */
13934 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13935 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13936 
13937 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13938 				fw->ext_trace_buf[cnt] = *w32++;
13939 			}
13940 		}
13941 	}
13942 
13943 	/* Get the FC event trace buffer */
13944 	if (rval == QL_SUCCESS) {
13945 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13946 		    (ha->fwfcetracebuf.bp != NULL)) {
13947 			uint32_t	cnt;
13948 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
13949 
13950 			/* Sync DMA buffer. */
13951 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13952 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13953 
13954 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13955 				fw->fce_trace_buf[cnt] = *w32++;
13956 			}
13957 		}
13958 	}
13959 
13960 	if (rval != QL_SUCCESS) {
13961 		EL(ha, "failed=%xh\n", rval);
13962 	} else {
13963 		/*EMPTY*/
13964 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13965 	}
13966 
13967 	return (rval);
13968 }
13969 
13970 /*
13971  * ql_25xx_binary_fw_dump
13972  *
13973  * Input:
13974  *	ha:	adapter state pointer.
13975  *	fw:	firmware dump context pointer.
13976  *
13977  * Returns:
13978  *	ql local function return status code.
13979  *
13980  * Context:
13981  *	Interrupt or Kernel context, no mailbox commands allowed.
13982  */
13983 static int
13984 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13985 {
13986 	uint32_t	*reg32;
13987 	void		*bp;
13988 	clock_t		timer;
13989 	int		rval = QL_SUCCESS;
13990 
13991 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13992 
13993 	fw->r2h_status = RD32_IO_REG(ha, risc2host);
13994 
13995 	/* Pause RISC. */
13996 	if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13997 		/* Disable ISP interrupts. */
13998 		WRT16_IO_REG(ha, ictrl, 0);
13999 
14000 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14001 		for (timer = 30000;
14002 		    (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
14003 		    rval == QL_SUCCESS; timer--) {
14004 			if (timer) {
14005 				drv_usecwait(100);
14006 				if (timer % 10000 == 0) {
14007 					EL(ha, "risc pause %d\n", timer);
14008 				}
14009 			} else {
14010 				EL(ha, "risc pause timeout\n");
14011 				rval = QL_FUNCTION_TIMEOUT;
14012 			}
14013 		}
14014 	}
14015 
14016 	if (rval == QL_SUCCESS) {
14017 
14018 		/* Host Interface registers */
14019 
14020 		/* HostRisc registers. */
14021 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
14022 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14023 		    16, 32);
14024 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
14025 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14026 
14027 		/* PCIe registers. */
14028 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14029 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14030 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14031 		    3, 32);
14032 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14033 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14034 
14035 		/* Host interface registers. */
14036 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14037 		    sizeof (fw->host_reg) / 4, 32);
14038 
14039 		/* Disable ISP interrupts. */
14040 
14041 		WRT32_IO_REG(ha, ictrl, 0);
14042 		RD32_IO_REG(ha, ictrl);
14043 		ADAPTER_STATE_LOCK(ha);
14044 		ha->flags &= ~INTERRUPTS_ENABLED;
14045 		ADAPTER_STATE_UNLOCK(ha);
14046 
14047 		/* Shadow registers. */
14048 
14049 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14050 		RD32_IO_REG(ha, io_base_addr);
14051 
14052 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14053 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
14054 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14055 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14056 
14057 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14058 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
14059 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14060 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14061 
14062 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14063 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
14064 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14065 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14066 
14067 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14068 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
14069 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14070 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14071 
14072 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14073 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
14074 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14075 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14076 
14077 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14078 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
14079 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14080 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14081 
14082 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14083 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
14084 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14085 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14086 
14087 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14088 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
14089 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14090 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14091 
14092 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14093 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
14094 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14095 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14096 
14097 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14098 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
14099 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14100 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14101 
14102 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14103 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14104 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14105 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14106 
14107 		/* RISC I/O register. */
14108 
14109 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
14110 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14111 		    1, 32);
14112 
14113 		/* Mailbox registers. */
14114 
14115 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14116 		    sizeof (fw->mailbox_reg) / 2, 16);
14117 
14118 		/* Transfer sequence registers. */
14119 
14120 		/* XSEQ GP */
14121 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14122 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14123 		    16, 32);
14124 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14125 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14126 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14127 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14128 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14129 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14130 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14131 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14132 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14133 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14134 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14135 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14136 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14137 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14138 
14139 		/* XSEQ-0 */
14140 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14141 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14142 		    16, 32);
14143 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14144 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14145 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14146 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14147 
14148 		/* XSEQ-1 */
14149 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14150 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14151 		    16, 32);
14152 
14153 		/* Receive sequence registers. */
14154 
14155 		/* RSEQ GP */
14156 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14157 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14158 		    16, 32);
14159 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14160 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14161 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14162 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14163 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14164 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14165 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14166 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14167 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14168 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14169 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14170 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14171 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14172 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14173 
14174 		/* RSEQ-0 */
14175 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14176 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14177 		    16, 32);
14178 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14179 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14180 
14181 		/* RSEQ-1 */
14182 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14183 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14184 		    sizeof (fw->rseq_1_reg) / 4, 32);
14185 
14186 		/* RSEQ-2 */
14187 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14188 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14189 		    sizeof (fw->rseq_2_reg) / 4, 32);
14190 
14191 		/* Auxiliary sequencer registers. */
14192 
14193 		/* ASEQ GP */
14194 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
14195 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14196 		    16, 32);
14197 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
14198 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14199 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
14200 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14201 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
14202 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14203 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
14204 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14205 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
14206 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14207 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
14208 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14209 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
14210 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14211 
14212 		/* ASEQ-0 */
14213 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14214 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14215 		    16, 32);
14216 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14217 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14218 
14219 		/* ASEQ-1 */
14220 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14221 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14222 		    16, 32);
14223 
14224 		/* ASEQ-2 */
14225 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14226 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14227 		    16, 32);
14228 
14229 		/* Command DMA registers. */
14230 
14231 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
14232 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14233 		    sizeof (fw->cmd_dma_reg) / 4, 32);
14234 
14235 		/* Queues. */
14236 
14237 		/* RequestQ0 */
14238 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
14239 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14240 		    8, 32);
14241 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14242 
14243 		/* ResponseQ0 */
14244 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
14245 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14246 		    8, 32);
14247 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14248 
14249 		/* RequestQ1 */
14250 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
14251 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14252 		    8, 32);
14253 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14254 
14255 		/* Transmit DMA registers. */
14256 
14257 		/* XMT0 */
14258 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
14259 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14260 		    16, 32);
14261 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
14262 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14263 
14264 		/* XMT1 */
14265 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
14266 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14267 		    16, 32);
14268 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
14269 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14270 
14271 		/* XMT2 */
14272 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
14273 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14274 		    16, 32);
14275 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
14276 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14277 
14278 		/* XMT3 */
14279 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
14280 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14281 		    16, 32);
14282 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
14283 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14284 
14285 		/* XMT4 */
14286 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
14287 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14288 		    16, 32);
14289 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
14290 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14291 
14292 		/* XMT Common */
14293 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14294 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14295 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14296 
14297 		/* Receive DMA registers. */
14298 
14299 		/* RCVThread0 */
14300 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
14301 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14302 		    ha->iobase + 0xC0, 16, 32);
14303 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
14304 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14305 
14306 		/* RCVThread1 */
14307 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14308 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14309 		    ha->iobase + 0xC0, 16, 32);
14310 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14311 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14312 
14313 		/* RISC registers. */
14314 
14315 		/* RISC GP */
14316 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14317 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14318 		    16, 32);
14319 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14320 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14321 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14322 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14323 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14324 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14325 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14326 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14327 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14328 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14329 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14330 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14331 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14332 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14333 
14334 		/* Local memory controller (LMC) registers. */
14335 
14336 		/* LMC */
14337 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14338 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14339 		    16, 32);
14340 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14341 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14342 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14343 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14344 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14345 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14346 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14347 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14348 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14349 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14350 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14351 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14352 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
14353 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14354 
14355 		/* Fibre Protocol Module registers. */
14356 
14357 		/* FPM hardware */
14358 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14359 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14360 		    16, 32);
14361 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14362 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14363 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14364 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14365 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14366 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14367 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14368 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14369 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14370 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14371 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14372 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14373 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14374 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14375 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14376 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14377 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14378 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14379 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14380 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14381 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14382 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14383 
14384 		/* Frame Buffer registers. */
14385 
14386 		/* FB hardware */
14387 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
14388 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14389 		    16, 32);
14390 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
14391 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14392 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14393 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14394 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14395 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14396 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14397 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14398 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14399 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14400 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14401 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14402 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14403 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14404 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14405 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14406 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14407 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14408 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14409 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14410 		WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14411 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14412 	}
14413 
14414 	/* Get the request queue */
14415 	if (rval == QL_SUCCESS) {
14416 		uint32_t	cnt;
14417 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
14418 
14419 		/* Sync DMA buffer. */
14420 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14421 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14422 		    DDI_DMA_SYNC_FORKERNEL);
14423 
14424 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14425 			fw->req_q[cnt] = *w32++;
14426 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14427 		}
14428 	}
14429 
14430 	/* Get the respons queue */
14431 	if (rval == QL_SUCCESS) {
14432 		uint32_t	cnt;
14433 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
14434 
14435 		/* Sync DMA buffer. */
14436 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14437 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14438 		    DDI_DMA_SYNC_FORKERNEL);
14439 
14440 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14441 			fw->rsp_q[cnt] = *w32++;
14442 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14443 		}
14444 	}
14445 
14446 	/* Reset RISC. */
14447 
14448 	ql_reset_chip(ha);
14449 
14450 	/* Memory. */
14451 
14452 	if (rval == QL_SUCCESS) {
14453 		/* Code RAM. */
14454 		rval = ql_read_risc_ram(ha, 0x20000,
14455 		    sizeof (fw->code_ram) / 4, fw->code_ram);
14456 	}
14457 	if (rval == QL_SUCCESS) {
14458 		/* External Memory. */
14459 		rval = ql_read_risc_ram(ha, 0x100000,
14460 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
14461 	}
14462 
14463 	/* Get the FC event trace buffer */
14464 	if (rval == QL_SUCCESS) {
14465 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14466 		    (ha->fwfcetracebuf.bp != NULL)) {
14467 			uint32_t	cnt;
14468 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
14469 
14470 			/* Sync DMA buffer. */
14471 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14472 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14473 
14474 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14475 				fw->fce_trace_buf[cnt] = *w32++;
14476 			}
14477 		}
14478 	}
14479 
14480 	/* Get the extended trace buffer */
14481 	if (rval == QL_SUCCESS) {
14482 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14483 		    (ha->fwexttracebuf.bp != NULL)) {
14484 			uint32_t	cnt;
14485 			uint32_t	*w32 = ha->fwexttracebuf.bp;
14486 
14487 			/* Sync DMA buffer. */
14488 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14489 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14490 
14491 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14492 				fw->ext_trace_buf[cnt] = *w32++;
14493 			}
14494 		}
14495 	}
14496 
14497 	if (rval != QL_SUCCESS) {
14498 		EL(ha, "failed=%xh\n", rval);
14499 	} else {
14500 		/*EMPTY*/
14501 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14502 	}
14503 
14504 	return (rval);
14505 }
14506 
14507 /*
14508  * ql_81xx_binary_fw_dump
14509  *
14510  * Input:
14511  *	ha:	adapter state pointer.
14512  *	fw:	firmware dump context pointer.
14513  *
14514  * Returns:
14515  *	ql local function return status code.
14516  *
14517  * Context:
14518  *	Interrupt or Kernel context, no mailbox commands allowed.
14519  */
14520 static int
14521 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
14522 {
14523 	uint32_t	*reg32;
14524 	void		*bp;
14525 	clock_t		timer;
14526 	int		rval = QL_SUCCESS;
14527 
14528 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14529 
14530 	fw->r2h_status = RD32_IO_REG(ha, risc2host);
14531 
14532 	/* Pause RISC. */
14533 	if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
14534 		/* Disable ISP interrupts. */
14535 		WRT16_IO_REG(ha, ictrl, 0);
14536 
14537 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14538 		for (timer = 30000;
14539 		    (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
14540 		    rval == QL_SUCCESS; timer--) {
14541 			if (timer) {
14542 				drv_usecwait(100);
14543 				if (timer % 10000 == 0) {
14544 					EL(ha, "risc pause %d\n", timer);
14545 				}
14546 			} else {
14547 				EL(ha, "risc pause timeout\n");
14548 				rval = QL_FUNCTION_TIMEOUT;
14549 			}
14550 		}
14551 	}
14552 
14553 	if (rval == QL_SUCCESS) {
14554 
14555 		/* Host Interface registers */
14556 
14557 		/* HostRisc registers. */
14558 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
14559 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14560 		    16, 32);
14561 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
14562 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14563 
14564 		/* PCIe registers. */
14565 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14566 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14567 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14568 		    3, 32);
14569 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14570 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14571 
14572 		/* Host interface registers. */
14573 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14574 		    sizeof (fw->host_reg) / 4, 32);
14575 
14576 		/* Disable ISP interrupts. */
14577 
14578 		WRT32_IO_REG(ha, ictrl, 0);
14579 		RD32_IO_REG(ha, ictrl);
14580 		ADAPTER_STATE_LOCK(ha);
14581 		ha->flags &= ~INTERRUPTS_ENABLED;
14582 		ADAPTER_STATE_UNLOCK(ha);
14583 
14584 		/* Shadow registers. */
14585 
14586 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14587 		RD32_IO_REG(ha, io_base_addr);
14588 
14589 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14590 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
14591 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14592 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14593 
14594 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14595 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
14596 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14597 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14598 
14599 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14600 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
14601 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14602 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14603 
14604 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14605 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
14606 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14607 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14608 
14609 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14610 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
14611 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14612 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14613 
14614 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14615 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
14616 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14617 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14618 
14619 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14620 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
14621 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14622 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14623 
14624 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14625 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
14626 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14627 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14628 
14629 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14630 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
14631 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14632 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14633 
14634 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14635 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
14636 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14637 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14638 
14639 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14640 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14641 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14642 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14643 
14644 		/* RISC I/O register. */
14645 
14646 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
14647 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14648 		    1, 32);
14649 
14650 		/* Mailbox registers. */
14651 
14652 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14653 		    sizeof (fw->mailbox_reg) / 2, 16);
14654 
14655 		/* Transfer sequence registers. */
14656 
14657 		/* XSEQ GP */
14658 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14659 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14660 		    16, 32);
14661 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14662 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14663 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14664 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14665 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14666 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14667 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14668 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14669 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14670 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14671 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14672 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14673 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14674 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14675 
14676 		/* XSEQ-0 */
14677 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14678 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14679 		    16, 32);
14680 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14681 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14682 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14683 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14684 
14685 		/* XSEQ-1 */
14686 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14687 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14688 		    16, 32);
14689 
14690 		/* Receive sequence registers. */
14691 
14692 		/* RSEQ GP */
14693 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14694 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14695 		    16, 32);
14696 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14697 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14698 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14699 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14700 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14701 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14702 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14703 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14704 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14705 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14706 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14707 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14708 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14709 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14710 
14711 		/* RSEQ-0 */
14712 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14713 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14714 		    16, 32);
14715 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14716 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14717 
14718 		/* RSEQ-1 */
14719 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14720 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14721 		    sizeof (fw->rseq_1_reg) / 4, 32);
14722 
14723 		/* RSEQ-2 */
14724 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14725 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14726 		    sizeof (fw->rseq_2_reg) / 4, 32);
14727 
14728 		/* Auxiliary sequencer registers. */
14729 
14730 		/* ASEQ GP */
14731 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
14732 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14733 		    16, 32);
14734 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
14735 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14736 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
14737 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14738 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
14739 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14740 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
14741 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14742 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
14743 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14744 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
14745 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14746 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
14747 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14748 
14749 		/* ASEQ-0 */
14750 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14751 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14752 		    16, 32);
14753 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14754 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14755 
14756 		/* ASEQ-1 */
14757 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14758 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14759 		    16, 32);
14760 
14761 		/* ASEQ-2 */
14762 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14763 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14764 		    16, 32);
14765 
14766 		/* Command DMA registers. */
14767 
14768 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
14769 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14770 		    sizeof (fw->cmd_dma_reg) / 4, 32);
14771 
14772 		/* Queues. */
14773 
14774 		/* RequestQ0 */
14775 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
14776 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14777 		    8, 32);
14778 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14779 
14780 		/* ResponseQ0 */
14781 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
14782 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14783 		    8, 32);
14784 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14785 
14786 		/* RequestQ1 */
14787 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
14788 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14789 		    8, 32);
14790 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14791 
14792 		/* Transmit DMA registers. */
14793 
14794 		/* XMT0 */
14795 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
14796 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14797 		    16, 32);
14798 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
14799 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14800 
14801 		/* XMT1 */
14802 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
14803 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14804 		    16, 32);
14805 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
14806 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14807 
14808 		/* XMT2 */
14809 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
14810 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14811 		    16, 32);
14812 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
14813 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14814 
14815 		/* XMT3 */
14816 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
14817 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14818 		    16, 32);
14819 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
14820 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14821 
14822 		/* XMT4 */
14823 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
14824 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14825 		    16, 32);
14826 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
14827 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14828 
14829 		/* XMT Common */
14830 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14831 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14832 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14833 
14834 		/* Receive DMA registers. */
14835 
14836 		/* RCVThread0 */
14837 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
14838 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14839 		    ha->iobase + 0xC0, 16, 32);
14840 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
14841 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14842 
14843 		/* RCVThread1 */
14844 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14845 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14846 		    ha->iobase + 0xC0, 16, 32);
14847 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14848 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14849 
14850 		/* RISC registers. */
14851 
14852 		/* RISC GP */
14853 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14854 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14855 		    16, 32);
14856 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14857 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14858 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14859 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14860 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14861 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14862 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14863 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14864 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14865 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14866 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14867 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14868 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14869 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14870 
14871 		/* Local memory controller (LMC) registers. */
14872 
14873 		/* LMC */
14874 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14875 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14876 		    16, 32);
14877 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14878 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14879 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14880 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14881 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14882 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14883 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14884 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14885 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14886 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14887 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14888 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14889 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
14890 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14891 
14892 		/* Fibre Protocol Module registers. */
14893 
14894 		/* FPM hardware */
14895 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14896 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14897 		    16, 32);
14898 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14899 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14900 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14901 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14902 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14903 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14904 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14905 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14906 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14907 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14908 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14909 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14910 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14911 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14912 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14913 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14914 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14915 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14916 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14917 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14918 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14919 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14920 		WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14921 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14922 		WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14923 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14924 
14925 		/* Frame Buffer registers. */
14926 
14927 		/* FB hardware */
14928 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
14929 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14930 		    16, 32);
14931 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
14932 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14933 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14934 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14935 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14936 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14937 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14938 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14939 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14940 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14941 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14942 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14943 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14944 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14945 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14946 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14947 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14948 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14949 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14950 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14951 		WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14952 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14953 		WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14954 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14955 	}
14956 
14957 	/* Get the request queue */
14958 	if (rval == QL_SUCCESS) {
14959 		uint32_t	cnt;
14960 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
14961 
14962 		/* Sync DMA buffer. */
14963 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14964 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14965 		    DDI_DMA_SYNC_FORKERNEL);
14966 
14967 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14968 			fw->req_q[cnt] = *w32++;
14969 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14970 		}
14971 	}
14972 
14973 	/* Get the response queue */
14974 	if (rval == QL_SUCCESS) {
14975 		uint32_t	cnt;
14976 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
14977 
14978 		/* Sync DMA buffer. */
14979 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14980 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14981 		    DDI_DMA_SYNC_FORKERNEL);
14982 
14983 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14984 			fw->rsp_q[cnt] = *w32++;
14985 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14986 		}
14987 	}
14988 
14989 	/* Reset RISC. */
14990 
14991 	ql_reset_chip(ha);
14992 
14993 	/* Memory. */
14994 
14995 	if (rval == QL_SUCCESS) {
14996 		/* Code RAM. */
14997 		rval = ql_read_risc_ram(ha, 0x20000,
14998 		    sizeof (fw->code_ram) / 4, fw->code_ram);
14999 	}
15000 	if (rval == QL_SUCCESS) {
15001 		/* External Memory. */
15002 		rval = ql_read_risc_ram(ha, 0x100000,
15003 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
15004 	}
15005 
15006 	/* Get the FC event trace buffer */
15007 	if (rval == QL_SUCCESS) {
15008 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
15009 		    (ha->fwfcetracebuf.bp != NULL)) {
15010 			uint32_t	cnt;
15011 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
15012 
15013 			/* Sync DMA buffer. */
15014 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
15015 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
15016 
15017 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
15018 				fw->fce_trace_buf[cnt] = *w32++;
15019 			}
15020 		}
15021 	}
15022 
15023 	/* Get the extended trace buffer */
15024 	if (rval == QL_SUCCESS) {
15025 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
15026 		    (ha->fwexttracebuf.bp != NULL)) {
15027 			uint32_t	cnt;
15028 			uint32_t	*w32 = ha->fwexttracebuf.bp;
15029 
15030 			/* Sync DMA buffer. */
15031 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
15032 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
15033 
15034 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
15035 				fw->ext_trace_buf[cnt] = *w32++;
15036 			}
15037 		}
15038 	}
15039 
15040 	if (rval != QL_SUCCESS) {
15041 		EL(ha, "failed=%xh\n", rval);
15042 	} else {
15043 		/*EMPTY*/
15044 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15045 	}
15046 
15047 	return (rval);
15048 }
15049 
15050 /*
15051  * ql_read_risc_ram
15052  *	Reads RISC RAM one word at a time.
15053  *	Risc interrupts must be disabled when this routine is called.
15054  *
15055  * Input:
15056  *	ha:	adapter state pointer.
15057  *	risc_address:	RISC code start address.
15058  *	len:		Number of words.
15059  *	buf:		buffer pointer.
15060  *
15061  * Returns:
15062  *	ql local function return status code.
15063  *
15064  * Context:
15065  *	Interrupt or Kernel context, no mailbox commands allowed.
15066  */
15067 static int
15068 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
15069     void *buf)
15070 {
15071 	uint32_t	cnt;
15072 	uint16_t	stat;
15073 	clock_t		timer;
15074 	uint16_t	*buf16 = (uint16_t *)buf;
15075 	uint32_t	*buf32 = (uint32_t *)buf;
15076 	int		rval = QL_SUCCESS;
15077 
15078 	for (cnt = 0; cnt < len; cnt++, risc_address++) {
15079 		WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_EXTENDED);
15080 		WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
15081 		WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
15082 		if (CFG_IST(ha, CFG_CTRL_8021)) {
15083 			WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
15084 		} else if (CFG_IST(ha, CFG_CTRL_242581)) {
15085 			WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
15086 		} else {
15087 			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
15088 		}
15089 		for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
15090 			if (INTERRUPT_PENDING(ha)) {
15091 				stat = (uint16_t)
15092 				    (RD16_IO_REG(ha, risc2host) & 0xff);
15093 				if ((stat == 1) || (stat == 0x10)) {
15094 					if (CFG_IST(ha, CFG_CTRL_24258081)) {
15095 						buf32[cnt] = SHORT_TO_LONG(
15096 						    RD16_IO_REG(ha,
15097 						    mailbox_out[2]),
15098 						    RD16_IO_REG(ha,
15099 						    mailbox_out[3]));
15100 					} else {
15101 						buf16[cnt] =
15102 						    RD16_IO_REG(ha,
15103 						    mailbox_out[2]);
15104 					}
15105 
15106 					break;
15107 				} else if ((stat == 2) || (stat == 0x11)) {
15108 					rval = RD16_IO_REG(ha, mailbox_out[0]);
15109 					break;
15110 				}
15111 				if (CFG_IST(ha, CFG_CTRL_8021)) {
15112 					ql_8021_clr_hw_intr(ha);
15113 					ql_8021_clr_fw_intr(ha);
15114 				} else if (CFG_IST(ha, CFG_CTRL_242581)) {
15115 					WRT32_IO_REG(ha, hccr,
15116 					    HC24_CLR_RISC_INT);
15117 					RD32_IO_REG(ha, hccr);
15118 				} else {
15119 					WRT16_IO_REG(ha, hccr,
15120 					    HC_CLR_RISC_INT);
15121 				}
15122 			}
15123 			drv_usecwait(5);
15124 		}
15125 		if (CFG_IST(ha, CFG_CTRL_8021)) {
15126 			ql_8021_clr_hw_intr(ha);
15127 			ql_8021_clr_fw_intr(ha);
15128 		} else if (CFG_IST(ha, CFG_CTRL_242581)) {
15129 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
15130 			RD32_IO_REG(ha, hccr);
15131 		} else {
15132 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
15133 			WRT16_IO_REG(ha, semaphore, 0);
15134 		}
15135 
15136 		if (timer == 0) {
15137 			rval = QL_FUNCTION_TIMEOUT;
15138 		}
15139 	}
15140 
15141 	return (rval);
15142 }
15143 
15144 /*
15145  * ql_read_regs
15146  *	Reads adapter registers to buffer.
15147  *
15148  * Input:
15149  *	ha:	adapter state pointer.
15150  *	buf:	buffer pointer.
15151  *	reg:	start address.
15152  *	count:	number of registers.
15153  *	wds:	register size.
15154  *
15155  * Context:
15156  *	Interrupt or Kernel context, no mailbox commands allowed.
15157  */
15158 static void *
15159 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
15160     uint8_t wds)
15161 {
15162 	uint32_t	*bp32, *reg32;
15163 	uint16_t	*bp16, *reg16;
15164 	uint8_t		*bp8, *reg8;
15165 
15166 	switch (wds) {
15167 	case 32:
15168 		bp32 = buf;
15169 		reg32 = reg;
15170 		while (count--) {
15171 			*bp32++ = RD_REG_DWORD(ha, reg32++);
15172 		}
15173 		return (bp32);
15174 	case 16:
15175 		bp16 = buf;
15176 		reg16 = reg;
15177 		while (count--) {
15178 			*bp16++ = RD_REG_WORD(ha, reg16++);
15179 		}
15180 		return (bp16);
15181 	case 8:
15182 		bp8 = buf;
15183 		reg8 = reg;
15184 		while (count--) {
15185 			*bp8++ = RD_REG_BYTE(ha, reg8++);
15186 		}
15187 		return (bp8);
15188 	default:
15189 		EL(ha, "Unknown word size=%d\n", wds);
15190 		return (buf);
15191 	}
15192 }
15193 
15194 static int
15195 ql_save_config_regs(dev_info_t *dip)
15196 {
15197 	ql_adapter_state_t	*ha;
15198 	int			ret;
15199 	ql_config_space_t	chs;
15200 	caddr_t			prop = "ql-config-space";
15201 
15202 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15203 	if (ha == NULL) {
15204 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15205 		    ddi_get_instance(dip));
15206 		return (DDI_FAILURE);
15207 	}
15208 
15209 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15210 
15211 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15212 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
15213 	    1) {
15214 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15215 		return (DDI_SUCCESS);
15216 	}
15217 
15218 	chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
15219 	chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
15220 	    PCI_CONF_HEADER);
15221 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15222 		chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
15223 		    PCI_BCNF_BCNTRL);
15224 	}
15225 
15226 	chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
15227 	    PCI_CONF_CACHE_LINESZ);
15228 
15229 	chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15230 	    PCI_CONF_LATENCY_TIMER);
15231 
15232 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15233 		chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15234 		    PCI_BCNF_LATENCY_TIMER);
15235 	}
15236 
15237 	chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
15238 	chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
15239 	chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
15240 	chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
15241 	chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
15242 	chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
15243 
15244 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15245 	ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
15246 	    (uchar_t *)&chs, sizeof (ql_config_space_t));
15247 
15248 	if (ret != DDI_PROP_SUCCESS) {
15249 		cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
15250 		    QL_NAME, ddi_get_instance(dip), prop);
15251 		return (DDI_FAILURE);
15252 	}
15253 
15254 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15255 
15256 	return (DDI_SUCCESS);
15257 }
15258 
15259 static int
15260 ql_restore_config_regs(dev_info_t *dip)
15261 {
15262 	ql_adapter_state_t	*ha;
15263 	uint_t			elements;
15264 	ql_config_space_t	*chs_p;
15265 	caddr_t			prop = "ql-config-space";
15266 
15267 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15268 	if (ha == NULL) {
15269 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15270 		    ddi_get_instance(dip));
15271 		return (DDI_FAILURE);
15272 	}
15273 
15274 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15275 
15276 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15277 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
15278 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
15279 	    (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
15280 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15281 		return (DDI_FAILURE);
15282 	}
15283 
15284 	ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
15285 
15286 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15287 		ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
15288 		    chs_p->chs_bridge_control);
15289 	}
15290 
15291 	ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
15292 	    chs_p->chs_cache_line_size);
15293 
15294 	ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
15295 	    chs_p->chs_latency_timer);
15296 
15297 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15298 		ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
15299 		    chs_p->chs_sec_latency_timer);
15300 	}
15301 
15302 	ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
15303 	ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
15304 	ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
15305 	ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
15306 	ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
15307 	ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
15308 
15309 	ddi_prop_free(chs_p);
15310 
15311 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15312 	if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
15313 		cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
15314 		    QL_NAME, ddi_get_instance(dip), prop);
15315 	}
15316 
15317 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15318 
15319 	return (DDI_SUCCESS);
15320 }
15321 
15322 uint8_t
15323 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
15324 {
15325 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15326 		return (ddi_get8(ha->sbus_config_handle,
15327 		    (uint8_t *)(ha->sbus_config_base + off)));
15328 	}
15329 
15330 #ifdef KERNEL_32
15331 	return (pci_config_getb(ha->pci_handle, off));
15332 #else
15333 	return (pci_config_get8(ha->pci_handle, off));
15334 #endif
15335 }
15336 
15337 uint16_t
15338 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
15339 {
15340 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15341 		return (ddi_get16(ha->sbus_config_handle,
15342 		    (uint16_t *)(ha->sbus_config_base + off)));
15343 	}
15344 
15345 #ifdef KERNEL_32
15346 	return (pci_config_getw(ha->pci_handle, off));
15347 #else
15348 	return (pci_config_get16(ha->pci_handle, off));
15349 #endif
15350 }
15351 
15352 uint32_t
15353 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
15354 {
15355 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15356 		return (ddi_get32(ha->sbus_config_handle,
15357 		    (uint32_t *)(ha->sbus_config_base + off)));
15358 	}
15359 
15360 #ifdef KERNEL_32
15361 	return (pci_config_getl(ha->pci_handle, off));
15362 #else
15363 	return (pci_config_get32(ha->pci_handle, off));
15364 #endif
15365 }
15366 
15367 void
15368 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
15369 {
15370 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15371 		ddi_put8(ha->sbus_config_handle,
15372 		    (uint8_t *)(ha->sbus_config_base + off), val);
15373 	} else {
15374 #ifdef KERNEL_32
15375 		pci_config_putb(ha->pci_handle, off, val);
15376 #else
15377 		pci_config_put8(ha->pci_handle, off, val);
15378 #endif
15379 	}
15380 }
15381 
15382 void
15383 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
15384 {
15385 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15386 		ddi_put16(ha->sbus_config_handle,
15387 		    (uint16_t *)(ha->sbus_config_base + off), val);
15388 	} else {
15389 #ifdef KERNEL_32
15390 		pci_config_putw(ha->pci_handle, off, val);
15391 #else
15392 		pci_config_put16(ha->pci_handle, off, val);
15393 #endif
15394 	}
15395 }
15396 
15397 void
15398 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
15399 {
15400 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15401 		ddi_put32(ha->sbus_config_handle,
15402 		    (uint32_t *)(ha->sbus_config_base + off), val);
15403 	} else {
15404 #ifdef KERNEL_32
15405 		pci_config_putl(ha->pci_handle, off, val);
15406 #else
15407 		pci_config_put32(ha->pci_handle, off, val);
15408 #endif
15409 	}
15410 }
15411 
15412 /*
15413  * ql_halt
15414  *	Waits for commands that are running to finish and
15415  *	if they do not, commands are aborted.
15416  *	Finally the adapter is reset.
15417  *
15418  * Input:
15419  *	ha:	adapter state pointer.
15420  *	pwr:	power state.
15421  *
15422  * Context:
15423  *	Kernel context.
15424  */
15425 static void
15426 ql_halt(ql_adapter_state_t *ha, int pwr)
15427 {
15428 	uint32_t	cnt;
15429 	ql_tgt_t	*tq;
15430 	ql_srb_t	*sp;
15431 	uint16_t	index;
15432 	ql_link_t	*link;
15433 
15434 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15435 
15436 	/* Wait for all commands running to finish. */
15437 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
15438 		for (link = ha->dev[index].first; link != NULL;
15439 		    link = link->next) {
15440 			tq = link->base_address;
15441 			(void) ql_abort_device(ha, tq, 0);
15442 
15443 			/* Wait for 30 seconds for commands to finish. */
15444 			for (cnt = 3000; cnt != 0; cnt--) {
15445 				/* Acquire device queue lock. */
15446 				DEVICE_QUEUE_LOCK(tq);
15447 				if (tq->outcnt == 0) {
15448 					/* Release device queue lock. */
15449 					DEVICE_QUEUE_UNLOCK(tq);
15450 					break;
15451 				} else {
15452 					/* Release device queue lock. */
15453 					DEVICE_QUEUE_UNLOCK(tq);
15454 					ql_delay(ha, 10000);
15455 				}
15456 			}
15457 
15458 			/* Finish any commands waiting for more status. */
15459 			if (ha->status_srb != NULL) {
15460 				sp = ha->status_srb;
15461 				ha->status_srb = NULL;
15462 				sp->cmd.next = NULL;
15463 				ql_done(&sp->cmd);
15464 			}
15465 
15466 			/* Abort commands that did not finish. */
15467 			if (cnt == 0) {
15468 				for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
15469 				    cnt++) {
15470 					if (ha->pending_cmds.first != NULL) {
15471 						ql_start_iocb(ha, NULL);
15472 						cnt = 1;
15473 					}
15474 					sp = ha->outstanding_cmds[cnt];
15475 					if (sp != NULL &&
15476 					    sp->lun_queue->target_queue ==
15477 					    tq) {
15478 						(void) ql_abort((opaque_t)ha,
15479 						    sp->pkt, 0);
15480 					}
15481 				}
15482 			}
15483 		}
15484 	}
15485 
15486 	/* Shutdown IP. */
15487 	if (ha->flags & IP_INITIALIZED) {
15488 		(void) ql_shutdown_ip(ha);
15489 	}
15490 
15491 	/* Stop all timers. */
15492 	ADAPTER_STATE_LOCK(ha);
15493 	ha->port_retry_timer = 0;
15494 	ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
15495 	ha->watchdog_timer = 0;
15496 	ADAPTER_STATE_UNLOCK(ha);
15497 
15498 	if (pwr == PM_LEVEL_D3) {
15499 		ADAPTER_STATE_LOCK(ha);
15500 		ha->flags &= ~ONLINE;
15501 		ADAPTER_STATE_UNLOCK(ha);
15502 
15503 		/* Reset ISP chip. */
15504 		ql_reset_chip(ha);
15505 	}
15506 
15507 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15508 }
15509 
15510 /*
15511  * ql_get_dma_mem
15512  *	Function used to allocate dma memory.
15513  *
15514  * Input:
15515  *	ha:			adapter state pointer.
15516  *	mem:			pointer to dma memory object.
15517  *	size:			size of the request in bytes
15518  *
15519  * Returns:
15520  *	qn local function return status code.
15521  *
15522  * Context:
15523  *	Kernel context.
15524  */
15525 int
15526 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
15527     mem_alloc_type_t allocation_type, mem_alignment_t alignment)
15528 {
15529 	int	rval;
15530 
15531 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15532 
15533 	mem->size = size;
15534 	mem->type = allocation_type;
15535 	mem->cookie_count = 1;
15536 
15537 	switch (alignment) {
15538 	case QL_DMA_DATA_ALIGN:
15539 		mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
15540 		break;
15541 	case QL_DMA_RING_ALIGN:
15542 		mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
15543 		break;
15544 	default:
15545 		EL(ha, "failed, unknown alignment type %x\n", alignment);
15546 		break;
15547 	}
15548 
15549 	if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
15550 		ql_free_phys(ha, mem);
15551 		EL(ha, "failed, alloc_phys=%xh\n", rval);
15552 	}
15553 
15554 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15555 
15556 	return (rval);
15557 }
15558 
15559 /*
15560  * ql_alloc_phys
15561  *	Function used to allocate memory and zero it.
15562  *	Memory is below 4 GB.
15563  *
15564  * Input:
15565  *	ha:			adapter state pointer.
15566  *	mem:			pointer to dma memory object.
15567  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
15568  *	mem->cookie_count	number of segments allowed.
15569  *	mem->type		memory allocation type.
15570  *	mem->size		memory size.
15571  *	mem->alignment		memory alignment.
15572  *
15573  * Returns:
15574  *	qn local function return status code.
15575  *
15576  * Context:
15577  *	Kernel context.
15578  */
15579 int
15580 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15581 {
15582 	size_t			rlen;
15583 	ddi_dma_attr_t		dma_attr;
15584 	ddi_device_acc_attr_t	acc_attr = ql_dev_acc_attr;
15585 
15586 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15587 
15588 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15589 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15590 
15591 	dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
15592 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15593 
15594 	/*
15595 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
15596 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
15597 	 * to make sure buffer has enough room for overrun.
15598 	 */
15599 	if (mem->size & 7) {
15600 		mem->size += 8 - (mem->size & 7);
15601 	}
15602 
15603 	mem->flags = DDI_DMA_CONSISTENT;
15604 
15605 	/*
15606 	 * Allocate DMA memory for command.
15607 	 */
15608 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15609 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15610 	    DDI_SUCCESS) {
15611 		EL(ha, "failed, ddi_dma_alloc_handle\n");
15612 		mem->dma_handle = NULL;
15613 		return (QL_MEMORY_ALLOC_FAILED);
15614 	}
15615 
15616 	switch (mem->type) {
15617 	case KERNEL_MEM:
15618 		mem->bp = kmem_zalloc(mem->size, sleep);
15619 		break;
15620 	case BIG_ENDIAN_DMA:
15621 	case LITTLE_ENDIAN_DMA:
15622 	case NO_SWAP_DMA:
15623 		if (mem->type == BIG_ENDIAN_DMA) {
15624 			acc_attr.devacc_attr_endian_flags =
15625 			    DDI_STRUCTURE_BE_ACC;
15626 		} else if (mem->type == NO_SWAP_DMA) {
15627 			acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
15628 		}
15629 		if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
15630 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15631 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
15632 		    &mem->acc_handle) == DDI_SUCCESS) {
15633 			bzero(mem->bp, mem->size);
15634 			/* ensure we got what we asked for (32bit) */
15635 			if (dma_attr.dma_attr_addr_hi == NULL) {
15636 				if (mem->cookie.dmac_notused != NULL) {
15637 					EL(ha, "failed, ddi_dma_mem_alloc "
15638 					    "returned 64 bit DMA address\n");
15639 					ql_free_phys(ha, mem);
15640 					return (QL_MEMORY_ALLOC_FAILED);
15641 				}
15642 			}
15643 		} else {
15644 			mem->acc_handle = NULL;
15645 			mem->bp = NULL;
15646 		}
15647 		break;
15648 	default:
15649 		EL(ha, "failed, unknown type=%xh\n", mem->type);
15650 		mem->acc_handle = NULL;
15651 		mem->bp = NULL;
15652 		break;
15653 	}
15654 
15655 	if (mem->bp == NULL) {
15656 		EL(ha, "failed, ddi_dma_mem_alloc\n");
15657 		ddi_dma_free_handle(&mem->dma_handle);
15658 		mem->dma_handle = NULL;
15659 		return (QL_MEMORY_ALLOC_FAILED);
15660 	}
15661 
15662 	mem->flags |= DDI_DMA_RDWR;
15663 
15664 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15665 		EL(ha, "failed, ddi_dma_addr_bind_handle\n");
15666 		ql_free_phys(ha, mem);
15667 		return (QL_MEMORY_ALLOC_FAILED);
15668 	}
15669 
15670 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15671 
15672 	return (QL_SUCCESS);
15673 }
15674 
15675 /*
15676  * ql_free_phys
15677  *	Function used to free physical memory.
15678  *
15679  * Input:
15680  *	ha:	adapter state pointer.
15681  *	mem:	pointer to dma memory object.
15682  *
15683  * Context:
15684  *	Kernel context.
15685  */
15686 void
15687 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
15688 {
15689 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15690 
15691 	if (mem != NULL && mem->dma_handle != NULL) {
15692 		ql_unbind_dma_buffer(ha, mem);
15693 		switch (mem->type) {
15694 		case KERNEL_MEM:
15695 			if (mem->bp != NULL) {
15696 				kmem_free(mem->bp, mem->size);
15697 			}
15698 			break;
15699 		case LITTLE_ENDIAN_DMA:
15700 		case BIG_ENDIAN_DMA:
15701 		case NO_SWAP_DMA:
15702 			if (mem->acc_handle != NULL) {
15703 				ddi_dma_mem_free(&mem->acc_handle);
15704 				mem->acc_handle = NULL;
15705 			}
15706 			break;
15707 		default:
15708 			break;
15709 		}
15710 		mem->bp = NULL;
15711 		ddi_dma_free_handle(&mem->dma_handle);
15712 		mem->dma_handle = NULL;
15713 	}
15714 
15715 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15716 }
15717 
15718 /*
15719  * ql_alloc_dma_resouce.
15720  *	Allocates DMA resource for buffer.
15721  *
15722  * Input:
15723  *	ha:			adapter state pointer.
15724  *	mem:			pointer to dma memory object.
15725  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
15726  *	mem->cookie_count	number of segments allowed.
15727  *	mem->type		memory allocation type.
15728  *	mem->size		memory size.
15729  *	mem->bp			pointer to memory or struct buf
15730  *
15731  * Returns:
15732  *	qn local function return status code.
15733  *
15734  * Context:
15735  *	Kernel context.
15736  */
15737 int
15738 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15739 {
15740 	ddi_dma_attr_t	dma_attr;
15741 
15742 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15743 
15744 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15745 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15746 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15747 
15748 	/*
15749 	 * Allocate DMA handle for command.
15750 	 */
15751 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15752 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15753 	    DDI_SUCCESS) {
15754 		EL(ha, "failed, ddi_dma_alloc_handle\n");
15755 		mem->dma_handle = NULL;
15756 		return (QL_MEMORY_ALLOC_FAILED);
15757 	}
15758 
15759 	mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
15760 
15761 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15762 		EL(ha, "failed, bind_dma_buffer\n");
15763 		ddi_dma_free_handle(&mem->dma_handle);
15764 		mem->dma_handle = NULL;
15765 		return (QL_MEMORY_ALLOC_FAILED);
15766 	}
15767 
15768 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15769 
15770 	return (QL_SUCCESS);
15771 }
15772 
15773 /*
15774  * ql_free_dma_resource
15775  *	Frees DMA resources.
15776  *
15777  * Input:
15778  *	ha:		adapter state pointer.
15779  *	mem:		pointer to dma memory object.
15780  *	mem->dma_handle	DMA memory handle.
15781  *
15782  * Context:
15783  *	Kernel context.
15784  */
15785 void
15786 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
15787 {
15788 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15789 
15790 	ql_free_phys(ha, mem);
15791 
15792 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15793 }
15794 
15795 /*
15796  * ql_bind_dma_buffer
15797  *	Binds DMA buffer.
15798  *
15799  * Input:
15800  *	ha:			adapter state pointer.
15801  *	mem:			pointer to dma memory object.
15802  *	sleep:			KM_SLEEP or KM_NOSLEEP.
15803  *	mem->dma_handle		DMA memory handle.
15804  *	mem->cookie_count	number of segments allowed.
15805  *	mem->type		memory allocation type.
15806  *	mem->size		memory size.
15807  *	mem->bp			pointer to memory or struct buf
15808  *
15809  * Returns:
15810  *	mem->cookies		pointer to list of cookies.
15811  *	mem->cookie_count	number of cookies.
15812  *	status			success = DDI_DMA_MAPPED
15813  *				DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15814  *				DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15815  *				DDI_DMA_TOOBIG
15816  *
15817  * Context:
15818  *	Kernel context.
15819  */
15820 static int
15821 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15822 {
15823 	int			rval;
15824 	ddi_dma_cookie_t	*cookiep;
15825 	uint32_t		cnt = mem->cookie_count;
15826 
15827 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15828 
15829 	if (mem->type == STRUCT_BUF_MEMORY) {
15830 		rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15831 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15832 		    DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15833 	} else {
15834 		rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15835 		    mem->size, mem->flags, (sleep == KM_SLEEP) ?
15836 		    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15837 		    &mem->cookie_count);
15838 	}
15839 
15840 	if (rval == DDI_DMA_MAPPED) {
15841 		if (mem->cookie_count > cnt) {
15842 			(void) ddi_dma_unbind_handle(mem->dma_handle);
15843 			EL(ha, "failed, cookie_count %d > %d\n",
15844 			    mem->cookie_count, cnt);
15845 			rval = DDI_DMA_TOOBIG;
15846 		} else {
15847 			if (mem->cookie_count > 1) {
15848 				if (mem->cookies = kmem_zalloc(
15849 				    sizeof (ddi_dma_cookie_t) *
15850 				    mem->cookie_count, sleep)) {
15851 					*mem->cookies = mem->cookie;
15852 					cookiep = mem->cookies;
15853 					for (cnt = 1; cnt < mem->cookie_count;
15854 					    cnt++) {
15855 						ddi_dma_nextcookie(
15856 						    mem->dma_handle,
15857 						    ++cookiep);
15858 					}
15859 				} else {
15860 					(void) ddi_dma_unbind_handle(
15861 					    mem->dma_handle);
15862 					EL(ha, "failed, kmem_zalloc\n");
15863 					rval = DDI_DMA_NORESOURCES;
15864 				}
15865 			} else {
15866 				/*
15867 				 * It has been reported that dmac_size at times
15868 				 * may be incorrect on sparc machines so for
15869 				 * sparc machines that only have one segment
15870 				 * use the buffer size instead.
15871 				 */
15872 				mem->cookies = &mem->cookie;
15873 				mem->cookies->dmac_size = mem->size;
15874 			}
15875 		}
15876 	}
15877 
15878 	if (rval != DDI_DMA_MAPPED) {
15879 		EL(ha, "failed=%xh\n", rval);
15880 	} else {
15881 		/*EMPTY*/
15882 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15883 	}
15884 
15885 	return (rval);
15886 }
15887 
15888 /*
15889  * ql_unbind_dma_buffer
15890  *	Unbinds DMA buffer.
15891  *
15892  * Input:
15893  *	ha:			adapter state pointer.
15894  *	mem:			pointer to dma memory object.
15895  *	mem->dma_handle		DMA memory handle.
15896  *	mem->cookies		pointer to cookie list.
15897  *	mem->cookie_count	number of cookies.
15898  *
15899  * Context:
15900  *	Kernel context.
15901  */
15902 /* ARGSUSED */
15903 static void
15904 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15905 {
15906 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15907 
15908 	(void) ddi_dma_unbind_handle(mem->dma_handle);
15909 	if (mem->cookie_count > 1) {
15910 		kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15911 		    mem->cookie_count);
15912 		mem->cookies = NULL;
15913 	}
15914 	mem->cookie_count = 0;
15915 
15916 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15917 }
15918 
15919 static int
15920 ql_suspend_adapter(ql_adapter_state_t *ha)
15921 {
15922 	clock_t timer = 32 * drv_usectohz(1000000);
15923 
15924 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15925 
15926 	/*
15927 	 * First we will claim mbox ownership so that no
15928 	 * thread using mbox hangs when we disable the
15929 	 * interrupt in the middle of it.
15930 	 */
15931 	MBX_REGISTER_LOCK(ha);
15932 
15933 	/* Check for mailbox available, if not wait for signal. */
15934 	while (ha->mailbox_flags & MBX_BUSY_FLG) {
15935 		ha->mailbox_flags = (uint8_t)
15936 		    (ha->mailbox_flags | MBX_WANT_FLG);
15937 
15938 		/* 30 seconds from now */
15939 		if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15940 		    timer, TR_CLOCK_TICK) == -1) {
15941 
15942 			/* Release mailbox register lock. */
15943 			MBX_REGISTER_UNLOCK(ha);
15944 			EL(ha, "failed, Suspend mbox");
15945 			return (QL_FUNCTION_TIMEOUT);
15946 		}
15947 	}
15948 
15949 	/* Set busy flag. */
15950 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15951 	MBX_REGISTER_UNLOCK(ha);
15952 
15953 	(void) ql_wait_outstanding(ha);
15954 
15955 	/*
15956 	 * here we are sure that there will not be any mbox interrupt.
15957 	 * So, let's make sure that we return back all the outstanding
15958 	 * cmds as well as internally queued commands.
15959 	 */
15960 	ql_halt(ha, PM_LEVEL_D0);
15961 
15962 	if (ha->power_level != PM_LEVEL_D3) {
15963 		/* Disable ISP interrupts. */
15964 		WRT16_IO_REG(ha, ictrl, 0);
15965 	}
15966 
15967 	ADAPTER_STATE_LOCK(ha);
15968 	ha->flags &= ~INTERRUPTS_ENABLED;
15969 	ADAPTER_STATE_UNLOCK(ha);
15970 
15971 	MBX_REGISTER_LOCK(ha);
15972 	/* Reset busy status. */
15973 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15974 
15975 	/* If thread is waiting for mailbox go signal it to start. */
15976 	if (ha->mailbox_flags & MBX_WANT_FLG) {
15977 		ha->mailbox_flags = (uint8_t)
15978 		    (ha->mailbox_flags & ~MBX_WANT_FLG);
15979 		cv_broadcast(&ha->cv_mbx_wait);
15980 	}
15981 	/* Release mailbox register lock. */
15982 	MBX_REGISTER_UNLOCK(ha);
15983 
15984 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15985 
15986 	return (QL_SUCCESS);
15987 }
15988 
15989 /*
15990  * ql_add_link_b
15991  *	Add link to the end of the chain.
15992  *
15993  * Input:
15994  *	head = Head of link list.
15995  *	link = link to be added.
15996  *	LOCK must be already obtained.
15997  *
15998  * Context:
15999  *	Interrupt or Kernel context, no mailbox commands allowed.
16000  */
16001 void
16002 ql_add_link_b(ql_head_t *head, ql_link_t *link)
16003 {
16004 	/* at the end there isn't a next */
16005 	link->next = NULL;
16006 
16007 	if ((link->prev = head->last) == NULL) {
16008 		head->first = link;
16009 	} else {
16010 		head->last->next = link;
16011 	}
16012 
16013 	head->last = link;
16014 	link->head = head;	/* the queue we're on */
16015 }
16016 
16017 /*
16018  * ql_add_link_t
16019  *	Add link to the beginning of the chain.
16020  *
16021  * Input:
16022  *	head = Head of link list.
16023  *	link = link to be added.
16024  *	LOCK must be already obtained.
16025  *
16026  * Context:
16027  *	Interrupt or Kernel context, no mailbox commands allowed.
16028  */
16029 void
16030 ql_add_link_t(ql_head_t *head, ql_link_t *link)
16031 {
16032 	link->prev = NULL;
16033 
16034 	if ((link->next = head->first) == NULL)	{
16035 		head->last = link;
16036 	} else {
16037 		head->first->prev = link;
16038 	}
16039 
16040 	head->first = link;
16041 	link->head = head;	/* the queue we're on */
16042 }
16043 
16044 /*
16045  * ql_remove_link
16046  *	Remove a link from the chain.
16047  *
16048  * Input:
16049  *	head = Head of link list.
16050  *	link = link to be removed.
16051  *	LOCK must be already obtained.
16052  *
16053  * Context:
16054  *	Interrupt or Kernel context, no mailbox commands allowed.
16055  */
16056 void
16057 ql_remove_link(ql_head_t *head, ql_link_t *link)
16058 {
16059 	if (link->prev != NULL) {
16060 		if ((link->prev->next = link->next) == NULL) {
16061 			head->last = link->prev;
16062 		} else {
16063 			link->next->prev = link->prev;
16064 		}
16065 	} else if ((head->first = link->next) == NULL) {
16066 		head->last = NULL;
16067 	} else {
16068 		head->first->prev = NULL;
16069 	}
16070 
16071 	/* not on a queue any more */
16072 	link->prev = link->next = NULL;
16073 	link->head = NULL;
16074 }
16075 
16076 /*
16077  * ql_chg_endian
16078  *	Change endianess of byte array.
16079  *
16080  * Input:
16081  *	buf = array pointer.
16082  *	size = size of array in bytes.
16083  *
16084  * Context:
16085  *	Interrupt or Kernel context, no mailbox commands allowed.
16086  */
16087 void
16088 ql_chg_endian(uint8_t buf[], size_t size)
16089 {
16090 	uint8_t byte;
16091 	size_t  cnt1;
16092 	size_t  cnt;
16093 
16094 	cnt1 = size - 1;
16095 	for (cnt = 0; cnt < size / 2; cnt++) {
16096 		byte = buf[cnt1];
16097 		buf[cnt1] = buf[cnt];
16098 		buf[cnt] = byte;
16099 		cnt1--;
16100 	}
16101 }
16102 
16103 /*
16104  * ql_bstr_to_dec
16105  *	Convert decimal byte string to number.
16106  *
16107  * Input:
16108  *	s:	byte string pointer.
16109  *	ans:	interger pointer for number.
16110  *	size:	number of ascii bytes.
16111  *
16112  * Returns:
16113  *	success = number of ascii bytes processed.
16114  *
16115  * Context:
16116  *	Kernel/Interrupt context.
16117  */
16118 static int
16119 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
16120 {
16121 	int			mul, num, cnt, pos;
16122 	char			*str;
16123 
16124 	/* Calculate size of number. */
16125 	if (size == 0) {
16126 		for (str = s; *str >= '0' && *str <= '9'; str++) {
16127 			size++;
16128 		}
16129 	}
16130 
16131 	*ans = 0;
16132 	for (cnt = 0; *s != '\0' && size; size--, cnt++) {
16133 		if (*s >= '0' && *s <= '9') {
16134 			num = *s++ - '0';
16135 		} else {
16136 			break;
16137 		}
16138 
16139 		for (mul = 1, pos = 1; pos < size; pos++) {
16140 			mul *= 10;
16141 		}
16142 		*ans += num * mul;
16143 	}
16144 
16145 	return (cnt);
16146 }
16147 
16148 /*
16149  * ql_delay
16150  *	Calls delay routine if threads are not suspended, otherwise, busy waits
16151  *	Minimum = 1 tick = 10ms
16152  *
16153  * Input:
16154  *	dly = delay time in microseconds.
16155  *
16156  * Context:
16157  *	Kernel or Interrupt context, no mailbox commands allowed.
16158  */
16159 void
16160 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
16161 {
16162 	if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
16163 		drv_usecwait(usecs);
16164 	} else {
16165 		delay(drv_usectohz(usecs));
16166 	}
16167 }
16168 
16169 /*
16170  * ql_stall_drv
16171  *	Stalls one or all driver instances, waits for 30 seconds.
16172  *
16173  * Input:
16174  *	ha:		adapter state pointer or NULL for all.
16175  *	options:	BIT_0 --> leave driver stalled on exit if
16176  *				  failed.
16177  *
16178  * Returns:
16179  *	ql local function return status code.
16180  *
16181  * Context:
16182  *	Kernel context.
16183  */
16184 int
16185 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
16186 {
16187 	ql_link_t		*link;
16188 	ql_adapter_state_t	*ha2;
16189 	uint32_t		timer;
16190 
16191 	QL_PRINT_3(CE_CONT, "started\n");
16192 
16193 	/* Wait for 30 seconds for daemons unstall. */
16194 	timer = 3000;
16195 	link = ha == NULL ? ql_hba.first : &ha->hba;
16196 	while (link != NULL && timer) {
16197 		ha2 = link->base_address;
16198 
16199 		ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
16200 
16201 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16202 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16203 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
16204 		    ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
16205 			link = ha == NULL ? link->next : NULL;
16206 			continue;
16207 		}
16208 
16209 		ql_delay(ha2, 10000);
16210 		timer--;
16211 		link = ha == NULL ? ql_hba.first : &ha->hba;
16212 	}
16213 
16214 	if (ha2 != NULL && timer == 0) {
16215 		EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
16216 		    ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
16217 		    "unstalled"));
16218 		if (options & BIT_0) {
16219 			ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16220 		}
16221 		return (QL_FUNCTION_TIMEOUT);
16222 	}
16223 
16224 	QL_PRINT_3(CE_CONT, "done\n");
16225 
16226 	return (QL_SUCCESS);
16227 }
16228 
16229 /*
16230  * ql_restart_driver
16231  *	Restarts one or all driver instances.
16232  *
16233  * Input:
16234  *	ha:	adapter state pointer or NULL for all.
16235  *
16236  * Context:
16237  *	Kernel context.
16238  */
16239 void
16240 ql_restart_driver(ql_adapter_state_t *ha)
16241 {
16242 	ql_link_t		*link;
16243 	ql_adapter_state_t	*ha2;
16244 	uint32_t		timer;
16245 
16246 	QL_PRINT_3(CE_CONT, "started\n");
16247 
16248 	/* Tell all daemons to unstall. */
16249 	link = ha == NULL ? ql_hba.first : &ha->hba;
16250 	while (link != NULL) {
16251 		ha2 = link->base_address;
16252 
16253 		ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16254 
16255 		link = ha == NULL ? link->next : NULL;
16256 	}
16257 
16258 	/* Wait for 30 seconds for all daemons unstall. */
16259 	timer = 3000;
16260 	link = ha == NULL ? ql_hba.first : &ha->hba;
16261 	while (link != NULL && timer) {
16262 		ha2 = link->base_address;
16263 
16264 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16265 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16266 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
16267 			QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
16268 			    ha2->instance, ha2->vp_index);
16269 			ql_restart_queues(ha2);
16270 			link = ha == NULL ? link->next : NULL;
16271 			continue;
16272 		}
16273 
16274 		QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
16275 		    ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
16276 
16277 		ql_delay(ha2, 10000);
16278 		timer--;
16279 		link = ha == NULL ? ql_hba.first : &ha->hba;
16280 	}
16281 
16282 	QL_PRINT_3(CE_CONT, "done\n");
16283 }
16284 
16285 /*
16286  * ql_setup_interrupts
16287  *	Sets up interrupts based on the HBA's and platform's
16288  *	capabilities (e.g., legacy / MSI / FIXED).
16289  *
16290  * Input:
16291  *	ha = adapter state pointer.
16292  *
16293  * Returns:
16294  *	DDI_SUCCESS or DDI_FAILURE.
16295  *
16296  * Context:
16297  *	Kernel context.
16298  */
16299 static int
16300 ql_setup_interrupts(ql_adapter_state_t *ha)
16301 {
16302 	int32_t		rval = DDI_FAILURE;
16303 	int32_t		i;
16304 	int32_t		itypes = 0;
16305 
16306 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16307 
16308 	/*
16309 	 * The Solaris Advanced Interrupt Functions (aif) are only
16310 	 * supported on s10U1 or greater.
16311 	 */
16312 	if (ql_os_release_level < 10 || ql_disable_aif != 0) {
16313 		EL(ha, "interrupt framework is not supported or is "
16314 		    "disabled, using legacy\n");
16315 		return (ql_legacy_intr(ha));
16316 	} else if (ql_os_release_level == 10) {
16317 		/*
16318 		 * See if the advanced interrupt functions (aif) are
16319 		 * in the kernel
16320 		 */
16321 		void	*fptr = (void *)&ddi_intr_get_supported_types;
16322 
16323 		if (fptr == NULL) {
16324 			EL(ha, "aif is not supported, using legacy "
16325 			    "interrupts (rev)\n");
16326 			return (ql_legacy_intr(ha));
16327 		}
16328 	}
16329 
16330 	/* See what types of interrupts this HBA and platform support */
16331 	if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
16332 	    DDI_SUCCESS) {
16333 		EL(ha, "get supported types failed, rval=%xh, "
16334 		    "assuming FIXED\n", i);
16335 		itypes = DDI_INTR_TYPE_FIXED;
16336 	}
16337 
16338 	EL(ha, "supported types are: %xh\n", itypes);
16339 
16340 	if ((itypes & DDI_INTR_TYPE_MSIX) &&
16341 	    (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
16342 		EL(ha, "successful MSI-X setup\n");
16343 	} else if ((itypes & DDI_INTR_TYPE_MSI) &&
16344 	    (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
16345 		EL(ha, "successful MSI setup\n");
16346 	} else {
16347 		rval = ql_setup_fixed(ha);
16348 	}
16349 
16350 	if (rval != DDI_SUCCESS) {
16351 		EL(ha, "failed, aif, rval=%xh\n", rval);
16352 	} else {
16353 		/*EMPTY*/
16354 		QL_PRINT_3(CE_CONT, "(%d): done\n");
16355 	}
16356 
16357 	return (rval);
16358 }
16359 
16360 /*
16361  * ql_setup_msi
16362  *	Set up aif MSI interrupts
16363  *
16364  * Input:
16365  *	ha = adapter state pointer.
16366  *
16367  * Returns:
16368  *	DDI_SUCCESS or DDI_FAILURE.
16369  *
16370  * Context:
16371  *	Kernel context.
16372  */
16373 static int
16374 ql_setup_msi(ql_adapter_state_t *ha)
16375 {
16376 	int32_t		count = 0;
16377 	int32_t		avail = 0;
16378 	int32_t		actual = 0;
16379 	int32_t		msitype = DDI_INTR_TYPE_MSI;
16380 	int32_t		ret;
16381 	ql_ifunc_t	itrfun[10] = {0};
16382 
16383 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16384 
16385 	if (ql_disable_msi != 0) {
16386 		EL(ha, "MSI is disabled by user\n");
16387 		return (DDI_FAILURE);
16388 	}
16389 
16390 	/* MSI support is only suported on 24xx HBA's. */
16391 	if (!(CFG_IST(ha, CFG_CTRL_24258081))) {
16392 		EL(ha, "HBA does not support MSI\n");
16393 		return (DDI_FAILURE);
16394 	}
16395 
16396 	/* Get number of MSI interrupts the system supports */
16397 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16398 	    DDI_SUCCESS) || count == 0) {
16399 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16400 		return (DDI_FAILURE);
16401 	}
16402 
16403 	/* Get number of available MSI interrupts */
16404 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16405 	    DDI_SUCCESS) || avail == 0) {
16406 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16407 		return (DDI_FAILURE);
16408 	}
16409 
16410 	/* MSI requires only 1.  */
16411 	count = 1;
16412 	itrfun[0].ifunc = &ql_isr_aif;
16413 
16414 	/* Allocate space for interrupt handles */
16415 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16416 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16417 
16418 	ha->iflags |= IFLG_INTR_MSI;
16419 
16420 	/* Allocate the interrupts */
16421 	if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
16422 	    &actual, 0)) != DDI_SUCCESS || actual < count) {
16423 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16424 		    "actual=%xh\n", ret, count, actual);
16425 		ql_release_intr(ha);
16426 		return (DDI_FAILURE);
16427 	}
16428 
16429 	ha->intr_cnt = actual;
16430 
16431 	/* Get interrupt priority */
16432 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16433 	    DDI_SUCCESS) {
16434 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16435 		ql_release_intr(ha);
16436 		return (ret);
16437 	}
16438 
16439 	/* Add the interrupt handler */
16440 	if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
16441 	    (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
16442 		EL(ha, "failed, intr_add ret=%xh\n", ret);
16443 		ql_release_intr(ha);
16444 		return (ret);
16445 	}
16446 
16447 	/* Setup mutexes */
16448 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16449 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16450 		ql_release_intr(ha);
16451 		return (ret);
16452 	}
16453 
16454 	/* Get the capabilities */
16455 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16456 
16457 	/* Enable interrupts */
16458 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16459 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16460 		    DDI_SUCCESS) {
16461 			EL(ha, "failed, block enable, ret=%xh\n", ret);
16462 			ql_destroy_mutex(ha);
16463 			ql_release_intr(ha);
16464 			return (ret);
16465 		}
16466 	} else {
16467 		if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
16468 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
16469 			ql_destroy_mutex(ha);
16470 			ql_release_intr(ha);
16471 			return (ret);
16472 		}
16473 	}
16474 
16475 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16476 
16477 	return (DDI_SUCCESS);
16478 }
16479 
16480 /*
16481  * ql_setup_msix
16482  *	Set up aif MSI-X interrupts
16483  *
16484  * Input:
16485  *	ha = adapter state pointer.
16486  *
16487  * Returns:
16488  *	DDI_SUCCESS or DDI_FAILURE.
16489  *
16490  * Context:
16491  *	Kernel context.
16492  */
16493 static int
16494 ql_setup_msix(ql_adapter_state_t *ha)
16495 {
16496 	uint16_t	hwvect;
16497 	int32_t		count = 0;
16498 	int32_t		avail = 0;
16499 	int32_t		actual = 0;
16500 	int32_t		msitype = DDI_INTR_TYPE_MSIX;
16501 	int32_t		ret;
16502 	uint32_t	i;
16503 	ql_ifunc_t	itrfun[QL_MSIX_MAXAIF] = {0};
16504 
16505 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16506 
16507 	if (ql_disable_msix != 0) {
16508 		EL(ha, "MSI-X is disabled by user\n");
16509 		return (DDI_FAILURE);
16510 	}
16511 
16512 	/*
16513 	 * MSI-X support is only available on 24xx HBA's that have
16514 	 * rev A2 parts (revid = 3) or greater.
16515 	 */
16516 	if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
16517 	    (ha->device_id == 0x8432) || (ha->device_id == 0x8001) ||
16518 	    (ha->device_id == 0x8021))) {
16519 		EL(ha, "HBA does not support MSI-X\n");
16520 		return (DDI_FAILURE);
16521 	}
16522 
16523 	if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
16524 		EL(ha, "HBA does not support MSI-X (revid)\n");
16525 		return (DDI_FAILURE);
16526 	}
16527 
16528 	/* Per HP, these HP branded HBA's are not supported with MSI-X */
16529 	if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
16530 	    ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
16531 		EL(ha, "HBA does not support MSI-X (subdevid)\n");
16532 		return (DDI_FAILURE);
16533 	}
16534 
16535 	/* Get the number of 24xx/25xx MSI-X h/w vectors */
16536 	hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
16537 	    ql_pci_config_get16(ha, 0x7e) :
16538 	    ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
16539 
16540 	EL(ha, "pcie config space hwvect = %d\n", hwvect);
16541 
16542 	if (hwvect < QL_MSIX_MAXAIF) {
16543 		EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
16544 		    QL_MSIX_MAXAIF, hwvect);
16545 		return (DDI_FAILURE);
16546 	}
16547 
16548 	/* Get number of MSI-X interrupts the platform h/w supports */
16549 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16550 	    DDI_SUCCESS) || count == 0) {
16551 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16552 		return (DDI_FAILURE);
16553 	}
16554 
16555 	/* Get number of available system interrupts */
16556 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16557 	    DDI_SUCCESS) || avail == 0) {
16558 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16559 		return (DDI_FAILURE);
16560 	}
16561 
16562 	/* Fill out the intr table */
16563 	count = QL_MSIX_MAXAIF;
16564 	itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
16565 	itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
16566 
16567 	/* Allocate space for interrupt handles */
16568 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
16569 	if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
16570 		ha->hsize = 0;
16571 		EL(ha, "failed, unable to allocate htable space\n");
16572 		return (DDI_FAILURE);
16573 	}
16574 
16575 	ha->iflags |= IFLG_INTR_MSIX;
16576 
16577 	/* Allocate the interrupts */
16578 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
16579 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
16580 	    actual < QL_MSIX_MAXAIF) {
16581 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16582 		    "actual=%xh\n", ret, count, actual);
16583 		ql_release_intr(ha);
16584 		return (DDI_FAILURE);
16585 	}
16586 
16587 	ha->intr_cnt = actual;
16588 
16589 	/* Get interrupt priority */
16590 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16591 	    DDI_SUCCESS) {
16592 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16593 		ql_release_intr(ha);
16594 		return (ret);
16595 	}
16596 
16597 	/* Add the interrupt handlers */
16598 	for (i = 0; i < actual; i++) {
16599 		if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
16600 		    (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
16601 			EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
16602 			    actual, ret);
16603 			ql_release_intr(ha);
16604 			return (ret);
16605 		}
16606 	}
16607 
16608 	/*
16609 	 * duplicate the rest of the intr's
16610 	 * ddi_intr_dup_handler() isn't working on x86 just yet...
16611 	 */
16612 #ifdef __sparc
16613 	for (i = actual; i < hwvect; i++) {
16614 		if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
16615 		    &ha->htable[i])) != DDI_SUCCESS) {
16616 			EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
16617 			    i, actual, ret);
16618 			ql_release_intr(ha);
16619 			return (ret);
16620 		}
16621 	}
16622 #endif
16623 
16624 	/* Setup mutexes */
16625 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16626 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16627 		ql_release_intr(ha);
16628 		return (ret);
16629 	}
16630 
16631 	/* Get the capabilities */
16632 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16633 
16634 	/* Enable interrupts */
16635 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16636 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16637 		    DDI_SUCCESS) {
16638 			EL(ha, "failed, block enable, ret=%xh\n", ret);
16639 			ql_destroy_mutex(ha);
16640 			ql_release_intr(ha);
16641 			return (ret);
16642 		}
16643 	} else {
16644 		for (i = 0; i < ha->intr_cnt; i++) {
16645 			if ((ret = ddi_intr_enable(ha->htable[i])) !=
16646 			    DDI_SUCCESS) {
16647 				EL(ha, "failed, intr enable, ret=%xh\n", ret);
16648 				ql_destroy_mutex(ha);
16649 				ql_release_intr(ha);
16650 				return (ret);
16651 			}
16652 		}
16653 	}
16654 
16655 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16656 
16657 	return (DDI_SUCCESS);
16658 }
16659 
16660 /*
16661  * ql_setup_fixed
16662  *	Sets up aif FIXED interrupts
16663  *
16664  * Input:
16665  *	ha = adapter state pointer.
16666  *
16667  * Returns:
16668  *	DDI_SUCCESS or DDI_FAILURE.
16669  *
16670  * Context:
16671  *	Kernel context.
16672  */
16673 static int
16674 ql_setup_fixed(ql_adapter_state_t *ha)
16675 {
16676 	int32_t		count = 0;
16677 	int32_t		actual = 0;
16678 	int32_t		ret;
16679 	uint32_t	i;
16680 
16681 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16682 
16683 	/* Get number of fixed interrupts the system supports */
16684 	if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
16685 	    &count)) != DDI_SUCCESS) || count == 0) {
16686 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16687 		return (DDI_FAILURE);
16688 	}
16689 
16690 	ha->iflags |= IFLG_INTR_FIXED;
16691 
16692 	/* Allocate space for interrupt handles */
16693 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16694 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16695 
16696 	/* Allocate the interrupts */
16697 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
16698 	    0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
16699 	    actual < count) {
16700 		EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
16701 		    "actual=%xh\n", ret, count, actual);
16702 		ql_release_intr(ha);
16703 		return (DDI_FAILURE);
16704 	}
16705 
16706 	ha->intr_cnt = actual;
16707 
16708 	/* Get interrupt priority */
16709 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16710 	    DDI_SUCCESS) {
16711 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16712 		ql_release_intr(ha);
16713 		return (ret);
16714 	}
16715 
16716 	/* Add the interrupt handlers */
16717 	for (i = 0; i < ha->intr_cnt; i++) {
16718 		if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
16719 		    (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
16720 			EL(ha, "failed, intr_add ret=%xh\n", ret);
16721 			ql_release_intr(ha);
16722 			return (ret);
16723 		}
16724 	}
16725 
16726 	/* Setup mutexes */
16727 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16728 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16729 		ql_release_intr(ha);
16730 		return (ret);
16731 	}
16732 
16733 	/* Enable interrupts */
16734 	for (i = 0; i < ha->intr_cnt; i++) {
16735 		if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
16736 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
16737 			ql_destroy_mutex(ha);
16738 			ql_release_intr(ha);
16739 			return (ret);
16740 		}
16741 	}
16742 
16743 	EL(ha, "using FIXED interupts\n");
16744 
16745 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16746 
16747 	return (DDI_SUCCESS);
16748 }
16749 
16750 /*
16751  * ql_disable_intr
16752  *	Disables interrupts
16753  *
16754  * Input:
16755  *	ha = adapter state pointer.
16756  *
16757  * Returns:
16758  *
16759  * Context:
16760  *	Kernel context.
16761  */
16762 static void
16763 ql_disable_intr(ql_adapter_state_t *ha)
16764 {
16765 	uint32_t	i, rval;
16766 
16767 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16768 
16769 	if (!(ha->iflags & IFLG_INTR_AIF)) {
16770 
16771 		/* Disable legacy interrupts */
16772 		(void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
16773 
16774 	} else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
16775 	    (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
16776 
16777 		/* Remove AIF block interrupts (MSI) */
16778 		if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
16779 		    != DDI_SUCCESS) {
16780 			EL(ha, "failed intr block disable, rval=%x\n", rval);
16781 		}
16782 
16783 	} else {
16784 
16785 		/* Remove AIF non-block interrupts (fixed).  */
16786 		for (i = 0; i < ha->intr_cnt; i++) {
16787 			if ((rval = ddi_intr_disable(ha->htable[i])) !=
16788 			    DDI_SUCCESS) {
16789 				EL(ha, "failed intr disable, intr#=%xh, "
16790 				    "rval=%xh\n", i, rval);
16791 			}
16792 		}
16793 	}
16794 
16795 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16796 }
16797 
16798 /*
16799  * ql_release_intr
16800  *	Releases aif legacy interrupt resources
16801  *
16802  * Input:
16803  *	ha = adapter state pointer.
16804  *
16805  * Returns:
16806  *
16807  * Context:
16808  *	Kernel context.
16809  */
16810 static void
16811 ql_release_intr(ql_adapter_state_t *ha)
16812 {
16813 	int32_t 	i;
16814 
16815 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16816 
16817 	if (!(ha->iflags & IFLG_INTR_AIF)) {
16818 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16819 		return;
16820 	}
16821 
16822 	ha->iflags &= ~(IFLG_INTR_AIF);
16823 	if (ha->htable != NULL && ha->hsize > 0) {
16824 		i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16825 		while (i-- > 0) {
16826 			if (ha->htable[i] == 0) {
16827 				EL(ha, "htable[%x]=0h\n", i);
16828 				continue;
16829 			}
16830 
16831 			(void) ddi_intr_disable(ha->htable[i]);
16832 
16833 			if (i < ha->intr_cnt) {
16834 				(void) ddi_intr_remove_handler(ha->htable[i]);
16835 			}
16836 
16837 			(void) ddi_intr_free(ha->htable[i]);
16838 		}
16839 
16840 		kmem_free(ha->htable, ha->hsize);
16841 		ha->htable = NULL;
16842 	}
16843 
16844 	ha->hsize = 0;
16845 	ha->intr_cnt = 0;
16846 	ha->intr_pri = 0;
16847 	ha->intr_cap = 0;
16848 
16849 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16850 }
16851 
16852 /*
16853  * ql_legacy_intr
16854  *	Sets up legacy interrupts.
16855  *
16856  *	NB: Only to be used if AIF (Advanced Interupt Framework)
16857  *	    if NOT in the kernel.
16858  *
16859  * Input:
16860  *	ha = adapter state pointer.
16861  *
16862  * Returns:
16863  *	DDI_SUCCESS or DDI_FAILURE.
16864  *
16865  * Context:
16866  *	Kernel context.
16867  */
16868 static int
16869 ql_legacy_intr(ql_adapter_state_t *ha)
16870 {
16871 	int	rval = DDI_SUCCESS;
16872 
16873 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16874 
16875 	/* Setup mutexes */
16876 	if (ql_init_mutex(ha) != DDI_SUCCESS) {
16877 		EL(ha, "failed, mutex init\n");
16878 		return (DDI_FAILURE);
16879 	}
16880 
16881 	/* Setup standard/legacy interrupt handler */
16882 	if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16883 	    (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16884 		cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16885 		    QL_NAME, ha->instance);
16886 		ql_destroy_mutex(ha);
16887 		rval = DDI_FAILURE;
16888 	}
16889 
16890 	if (rval == DDI_SUCCESS) {
16891 		ha->iflags |= IFLG_INTR_LEGACY;
16892 		EL(ha, "using legacy interrupts\n");
16893 	}
16894 
16895 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16896 
16897 	return (rval);
16898 }
16899 
16900 /*
16901  * ql_init_mutex
16902  *	Initializes mutex's
16903  *
16904  * Input:
16905  *	ha = adapter state pointer.
16906  *
16907  * Returns:
16908  *	DDI_SUCCESS or DDI_FAILURE.
16909  *
16910  * Context:
16911  *	Kernel context.
16912  */
16913 static int
16914 ql_init_mutex(ql_adapter_state_t *ha)
16915 {
16916 	int	ret;
16917 	void	*intr;
16918 
16919 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16920 
16921 	if (ha->iflags & IFLG_INTR_AIF) {
16922 		intr = (void *)(uintptr_t)ha->intr_pri;
16923 	} else {
16924 		/* Get iblock cookies to initialize mutexes */
16925 		if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16926 		    &ha->iblock_cookie)) != DDI_SUCCESS) {
16927 			EL(ha, "failed, get_iblock: %xh\n", ret);
16928 			return (DDI_FAILURE);
16929 		}
16930 		intr = (void *)ha->iblock_cookie;
16931 	}
16932 
16933 	/* mutexes to protect the adapter state structure. */
16934 	mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16935 
16936 	/* mutex to protect the ISP response ring. */
16937 	mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16938 
16939 	/* mutex to protect the mailbox registers. */
16940 	mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16941 
16942 	/* power management protection */
16943 	mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16944 
16945 	/* Mailbox wait and interrupt conditional variable. */
16946 	cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16947 	cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16948 
16949 	/* mutex to protect the ISP request ring. */
16950 	mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16951 
16952 	/* Unsolicited buffer conditional variable. */
16953 	cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16954 
16955 	mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16956 	mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16957 
16958 	/* Suspended conditional variable. */
16959 	cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16960 
16961 	/* mutex to protect task daemon context. */
16962 	mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16963 
16964 	/* Task_daemon thread conditional variable. */
16965 	cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16966 
16967 	/* mutex to protect diag port manage interface */
16968 	mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16969 
16970 	/* mutex to protect per instance f/w dump flags and buffer */
16971 	mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16972 
16973 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16974 
16975 	return (DDI_SUCCESS);
16976 }
16977 
16978 /*
16979  * ql_destroy_mutex
16980  *	Destroys mutex's
16981  *
16982  * Input:
16983  *	ha = adapter state pointer.
16984  *
16985  * Returns:
16986  *
16987  * Context:
16988  *	Kernel context.
16989  */
16990 static void
16991 ql_destroy_mutex(ql_adapter_state_t *ha)
16992 {
16993 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16994 
16995 	mutex_destroy(&ha->dump_mutex);
16996 	mutex_destroy(&ha->portmutex);
16997 	cv_destroy(&ha->cv_task_daemon);
16998 	mutex_destroy(&ha->task_daemon_mutex);
16999 	cv_destroy(&ha->cv_dr_suspended);
17000 	mutex_destroy(&ha->cache_mutex);
17001 	mutex_destroy(&ha->ub_mutex);
17002 	cv_destroy(&ha->cv_ub);
17003 	mutex_destroy(&ha->req_ring_mutex);
17004 	cv_destroy(&ha->cv_mbx_intr);
17005 	cv_destroy(&ha->cv_mbx_wait);
17006 	mutex_destroy(&ha->pm_mutex);
17007 	mutex_destroy(&ha->mbx_mutex);
17008 	mutex_destroy(&ha->intr_mutex);
17009 	mutex_destroy(&ha->mutex);
17010 
17011 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17012 }
17013 
17014 /*
17015  * ql_fwmodule_resolve
17016  *	Loads and resolves external firmware module and symbols
17017  *
17018  * Input:
17019  *	ha:		adapter state pointer.
17020  *
17021  * Returns:
17022  *	ql local function return status code:
17023  *		QL_SUCCESS - external f/w module module and symbols resolved
17024  *		QL_FW_NOT_SUPPORTED - Driver does not support ISP type
17025  *		QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
17026  *		QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
17027  * Context:
17028  *	Kernel context.
17029  *
17030  * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
17031  * could switch to a tighter scope around acutal download (and add an extra
17032  * ddi_modopen for module opens that occur before root is mounted).
17033  *
17034  */
17035 uint32_t
17036 ql_fwmodule_resolve(ql_adapter_state_t *ha)
17037 {
17038 	int8_t			module[128];
17039 	int8_t			fw_version[128];
17040 	uint32_t		rval = QL_SUCCESS;
17041 	caddr_t			code, code02;
17042 	uint8_t			*p_ucfw;
17043 	uint16_t		*p_usaddr, *p_uslen;
17044 	uint32_t		*p_uiaddr, *p_uilen, *p_uifw;
17045 	uint32_t		*p_uiaddr02, *p_uilen02;
17046 	struct fw_table		*fwt;
17047 	extern struct fw_table	fw_table[];
17048 
17049 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17050 
17051 	if (ha->fw_module != NULL) {
17052 		EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
17053 		    ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
17054 		    ha->fw_subminor_version);
17055 		return (rval);
17056 	}
17057 
17058 	/* make sure the fw_class is in the fw_table of supported classes */
17059 	for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
17060 		if (fwt->fw_class == ha->fw_class)
17061 			break;			/* match */
17062 	}
17063 	if (fwt->fw_version == NULL) {
17064 		cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
17065 		    "in driver's fw_table", QL_NAME, ha->instance,
17066 		    ha->fw_class);
17067 		return (QL_FW_NOT_SUPPORTED);
17068 	}
17069 
17070 	/*
17071 	 * open the module related to the fw_class
17072 	 */
17073 	(void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
17074 	    ha->fw_class);
17075 
17076 	ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
17077 	if (ha->fw_module == NULL) {
17078 		cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
17079 		    QL_NAME, ha->instance, module);
17080 		return (QL_FWMODLOAD_FAILED);
17081 	}
17082 
17083 	/*
17084 	 * resolve the fw module symbols, data types depend on fw_class
17085 	 */
17086 
17087 	switch (ha->fw_class) {
17088 	case 0x2200:
17089 	case 0x2300:
17090 	case 0x6322:
17091 
17092 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17093 		    NULL)) == NULL) {
17094 			rval = QL_FWSYM_NOT_FOUND;
17095 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17096 		} else if ((p_usaddr = ddi_modsym(ha->fw_module,
17097 		    "risc_code_addr01", NULL)) == NULL) {
17098 			rval = QL_FWSYM_NOT_FOUND;
17099 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17100 		} else if ((p_uslen = ddi_modsym(ha->fw_module,
17101 		    "risc_code_length01", NULL)) == NULL) {
17102 			rval = QL_FWSYM_NOT_FOUND;
17103 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17104 		} else if ((p_ucfw = ddi_modsym(ha->fw_module,
17105 		    "firmware_version", NULL)) == NULL) {
17106 			rval = QL_FWSYM_NOT_FOUND;
17107 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
17108 		}
17109 
17110 		if (rval == QL_SUCCESS) {
17111 			ha->risc_fw[0].code = code;
17112 			ha->risc_fw[0].addr = *p_usaddr;
17113 			ha->risc_fw[0].length = *p_uslen;
17114 
17115 			(void) snprintf(fw_version, sizeof (fw_version),
17116 			    "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
17117 		}
17118 		break;
17119 
17120 	case 0x2400:
17121 	case 0x2500:
17122 	case 0x8100:
17123 
17124 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17125 		    NULL)) == NULL) {
17126 			rval = QL_FWSYM_NOT_FOUND;
17127 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17128 		} else if ((p_uiaddr = ddi_modsym(ha->fw_module,
17129 		    "risc_code_addr01", NULL)) == NULL) {
17130 			rval = QL_FWSYM_NOT_FOUND;
17131 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17132 		} else if ((p_uilen = ddi_modsym(ha->fw_module,
17133 		    "risc_code_length01", NULL)) == NULL) {
17134 			rval = QL_FWSYM_NOT_FOUND;
17135 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17136 		} else if ((p_uifw = ddi_modsym(ha->fw_module,
17137 		    "firmware_version", NULL)) == NULL) {
17138 			rval = QL_FWSYM_NOT_FOUND;
17139 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
17140 		}
17141 
17142 		if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
17143 		    NULL)) == NULL) {
17144 			rval = QL_FWSYM_NOT_FOUND;
17145 			EL(ha, "failed, f/w module %d rc02 symbol\n", module);
17146 		} else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
17147 		    "risc_code_addr02", NULL)) == NULL) {
17148 			rval = QL_FWSYM_NOT_FOUND;
17149 			EL(ha, "failed, f/w module %d rca02 symbol\n", module);
17150 		} else if ((p_uilen02 = ddi_modsym(ha->fw_module,
17151 		    "risc_code_length02", NULL)) == NULL) {
17152 			rval = QL_FWSYM_NOT_FOUND;
17153 			EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
17154 		}
17155 
17156 		if (rval == QL_SUCCESS) {
17157 			ha->risc_fw[0].code = code;
17158 			ha->risc_fw[0].addr = *p_uiaddr;
17159 			ha->risc_fw[0].length = *p_uilen;
17160 			ha->risc_fw[1].code = code02;
17161 			ha->risc_fw[1].addr = *p_uiaddr02;
17162 			ha->risc_fw[1].length = *p_uilen02;
17163 
17164 			(void) snprintf(fw_version, sizeof (fw_version),
17165 			    "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
17166 		}
17167 		break;
17168 
17169 	default:
17170 		EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
17171 		rval = QL_FW_NOT_SUPPORTED;
17172 	}
17173 
17174 	if (rval != QL_SUCCESS) {
17175 		cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
17176 		    "module %s (%x)", QL_NAME, ha->instance, module, rval);
17177 		if (ha->fw_module != NULL) {
17178 			(void) ddi_modclose(ha->fw_module);
17179 			ha->fw_module = NULL;
17180 		}
17181 	} else {
17182 		/*
17183 		 * check for firmware version mismatch between module and
17184 		 * compiled in fw_table version.
17185 		 */
17186 
17187 		if (strcmp(fwt->fw_version, fw_version) != 0) {
17188 
17189 			/*
17190 			 * If f/w / driver version mismatches then
17191 			 * return a successful status -- however warn
17192 			 * the user that this is NOT recommended.
17193 			 */
17194 
17195 			cmn_err(CE_WARN, "%s(%d): driver / f/w version "
17196 			    "mismatch for %x: driver-%s module-%s", QL_NAME,
17197 			    ha->instance, ha->fw_class, fwt->fw_version,
17198 			    fw_version);
17199 
17200 			ha->cfg_flags |= CFG_FW_MISMATCH;
17201 		} else {
17202 			ha->cfg_flags &= ~CFG_FW_MISMATCH;
17203 		}
17204 	}
17205 
17206 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17207 
17208 	return (rval);
17209 }
17210 
17211 /*
17212  * ql_port_state
17213  *	Set the state on all adapter ports.
17214  *
17215  * Input:
17216  *	ha:	parent adapter state pointer.
17217  *	state:	port state.
17218  *	flags:	task daemon flags to set.
17219  *
17220  * Context:
17221  *	Interrupt or Kernel context, no mailbox commands allowed.
17222  */
17223 void
17224 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
17225 {
17226 	ql_adapter_state_t	*vha;
17227 
17228 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17229 
17230 	TASK_DAEMON_LOCK(ha);
17231 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
17232 		if (FC_PORT_STATE_MASK(vha->state) != state) {
17233 			vha->state = state != FC_STATE_OFFLINE ?
17234 			    (FC_PORT_SPEED_MASK(vha->state) | state) : state;
17235 			vha->task_daemon_flags |= flags;
17236 		}
17237 	}
17238 	ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
17239 	TASK_DAEMON_UNLOCK(ha);
17240 
17241 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17242 }
17243 
17244 /*
17245  * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
17246  *
17247  * Input:	Pointer to the adapter state structure.
17248  * Returns:	Success or Failure.
17249  * Context:	Kernel context.
17250  */
17251 int
17252 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
17253 {
17254 	int	rval = DDI_SUCCESS;
17255 
17256 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17257 
17258 	ha->el_trace_desc =
17259 	    (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
17260 
17261 	if (ha->el_trace_desc == NULL) {
17262 		cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
17263 		    QL_NAME, ha->instance);
17264 		rval = DDI_FAILURE;
17265 	} else {
17266 		ha->el_trace_desc->next		= 0;
17267 		ha->el_trace_desc->trace_buffer =
17268 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
17269 
17270 		if (ha->el_trace_desc->trace_buffer == NULL) {
17271 			cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
17272 			    QL_NAME, ha->instance);
17273 			kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17274 			rval = DDI_FAILURE;
17275 		} else {
17276 			ha->el_trace_desc->trace_buffer_size =
17277 			    EL_TRACE_BUF_SIZE;
17278 			mutex_init(&ha->el_trace_desc->mutex, NULL,
17279 			    MUTEX_DRIVER, NULL);
17280 		}
17281 	}
17282 
17283 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17284 
17285 	return (rval);
17286 }
17287 
17288 /*
17289  * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
17290  *
17291  * Input:	Pointer to the adapter state structure.
17292  * Returns:	Success or Failure.
17293  * Context:	Kernel context.
17294  */
17295 int
17296 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
17297 {
17298 	int	rval = DDI_SUCCESS;
17299 
17300 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17301 
17302 	if (ha->el_trace_desc == NULL) {
17303 		cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
17304 		    QL_NAME, ha->instance);
17305 		rval = DDI_FAILURE;
17306 	} else {
17307 		if (ha->el_trace_desc->trace_buffer != NULL) {
17308 			kmem_free(ha->el_trace_desc->trace_buffer,
17309 			    ha->el_trace_desc->trace_buffer_size);
17310 		}
17311 		mutex_destroy(&ha->el_trace_desc->mutex);
17312 		kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17313 	}
17314 
17315 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17316 
17317 	return (rval);
17318 }
17319 
17320 /*
17321  * els_cmd_text	- Return a pointer to a string describing the command
17322  *
17323  * Input:	els_cmd = the els command opcode.
17324  * Returns:	pointer to a string.
17325  * Context:	Kernel context.
17326  */
17327 char *
17328 els_cmd_text(int els_cmd)
17329 {
17330 	cmd_table_t *entry = &els_cmd_tbl[0];
17331 
17332 	return (cmd_text(entry, els_cmd));
17333 }
17334 
17335 /*
17336  * mbx_cmd_text - Return a pointer to a string describing the command
17337  *
17338  * Input:	mbx_cmd = the mailbox command opcode.
17339  * Returns:	pointer to a string.
17340  * Context:	Kernel context.
17341  */
17342 char *
17343 mbx_cmd_text(int mbx_cmd)
17344 {
17345 	cmd_table_t *entry = &mbox_cmd_tbl[0];
17346 
17347 	return (cmd_text(entry, mbx_cmd));
17348 }
17349 
17350 /*
17351  * cmd_text	Return a pointer to a string describing the command
17352  *
17353  * Input:	entry = the command table
17354  *		cmd = the command.
17355  * Returns:	pointer to a string.
17356  * Context:	Kernel context.
17357  */
17358 char *
17359 cmd_text(cmd_table_t *entry, int cmd)
17360 {
17361 	for (; entry->cmd != 0; entry++) {
17362 		if (entry->cmd == cmd) {
17363 			break;
17364 		}
17365 	}
17366 	return (entry->string);
17367 }
17368 
17369 /*
17370  * ql_els_24xx_mbox_cmd_iocb - els request indication.
17371  *
17372  * Input:	ha = adapter state pointer.
17373  *		srb = scsi request block pointer.
17374  *		arg = els passthru entry iocb pointer.
17375  * Returns:
17376  * Context:	Kernel context.
17377  */
17378 void
17379 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
17380 {
17381 	els_descriptor_t	els_desc;
17382 
17383 	/* Extract the ELS information */
17384 	ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
17385 
17386 	/* Construct the passthru entry */
17387 	ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
17388 
17389 	/* Ensure correct endianness */
17390 	ql_isp_els_handle_cmd_endian(ha, srb);
17391 }
17392 
17393 /*
17394  * ql_fca_isp_els_request - Extract into an els descriptor the info required
17395  *			    to build an els_passthru iocb from an fc packet.
17396  *
17397  * Input:	ha = adapter state pointer.
17398  *		pkt = fc packet pointer
17399  *		els_desc = els descriptor pointer
17400  * Returns:
17401  * Context:	Kernel context.
17402  */
17403 static void
17404 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
17405     els_descriptor_t *els_desc)
17406 {
17407 	ls_code_t	els;
17408 
17409 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17410 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17411 
17412 	els_desc->els = els.ls_code;
17413 
17414 	els_desc->els_handle = ha->hba_buf.acc_handle;
17415 	els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
17416 	els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
17417 	/* if n_port_handle is not < 0x7d use 0 */
17418 	if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17419 		els_desc->n_port_handle = ha->n_port->n_port_handle;
17420 	} else {
17421 		els_desc->n_port_handle = 0;
17422 	}
17423 	els_desc->control_flags = 0;
17424 	els_desc->cmd_byte_count = pkt->pkt_cmdlen;
17425 	/*
17426 	 * Transmit DSD. This field defines the Fibre Channel Frame payload
17427 	 * (without the frame header) in system memory.
17428 	 */
17429 	els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
17430 	els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
17431 	els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
17432 
17433 	els_desc->rsp_byte_count = pkt->pkt_rsplen;
17434 	/*
17435 	 * Receive DSD. This field defines the ELS response payload buffer
17436 	 * for the ISP24xx firmware transferring the received ELS
17437 	 * response frame to a location in host memory.
17438 	 */
17439 	els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
17440 	els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
17441 	els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
17442 }
17443 
17444 /*
17445  * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
17446  * using the els descriptor.
17447  *
17448  * Input:	ha = adapter state pointer.
17449  *		els_desc = els descriptor pointer.
17450  *		els_entry = els passthru entry iocb pointer.
17451  * Returns:
17452  * Context:	Kernel context.
17453  */
17454 static void
17455 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
17456     els_passthru_entry_t *els_entry)
17457 {
17458 	uint32_t	*ptr32;
17459 
17460 	/*
17461 	 * Construct command packet.
17462 	 */
17463 	ddi_put8(els_desc->els_handle, &els_entry->entry_type,
17464 	    (uint8_t)ELS_PASSTHRU_TYPE);
17465 	ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
17466 	    els_desc->n_port_handle);
17467 	ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
17468 	ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
17469 	    (uint32_t)0);
17470 	ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
17471 	    els_desc->els);
17472 	ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
17473 	    els_desc->d_id.b.al_pa);
17474 	ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
17475 	    els_desc->d_id.b.area);
17476 	ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
17477 	    els_desc->d_id.b.domain);
17478 	ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
17479 	    els_desc->s_id.b.al_pa);
17480 	ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
17481 	    els_desc->s_id.b.area);
17482 	ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
17483 	    els_desc->s_id.b.domain);
17484 	ddi_put16(els_desc->els_handle, &els_entry->control_flags,
17485 	    els_desc->control_flags);
17486 	ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
17487 	    els_desc->rsp_byte_count);
17488 	ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
17489 	    els_desc->cmd_byte_count);
17490 	/* Load transmit data segments and count. */
17491 	ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
17492 	ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
17493 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
17494 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
17495 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
17496 	ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
17497 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
17498 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
17499 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
17500 }
17501 
17502 /*
17503  * ql_isp_els_handle_cmd_endian - els requests must be in big endian
17504  *				  in host memory.
17505  *
17506  * Input:	ha = adapter state pointer.
17507  *		srb = scsi request block
17508  * Returns:
17509  * Context:	Kernel context.
17510  */
17511 void
17512 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17513 {
17514 	ls_code_t	els;
17515 	fc_packet_t	*pkt;
17516 	uint8_t		*ptr;
17517 
17518 	pkt = srb->pkt;
17519 
17520 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17521 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17522 
17523 	ptr = (uint8_t *)pkt->pkt_cmd;
17524 
17525 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17526 }
17527 
17528 /*
17529  * ql_isp_els_handle_rsp_endian - els responses must be in big endian
17530  *				  in host memory.
17531  * Input:	ha = adapter state pointer.
17532  *		srb = scsi request block
17533  * Returns:
17534  * Context:	Kernel context.
17535  */
17536 void
17537 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17538 {
17539 	ls_code_t	els;
17540 	fc_packet_t	*pkt;
17541 	uint8_t		*ptr;
17542 
17543 	pkt = srb->pkt;
17544 
17545 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17546 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17547 
17548 	ptr = (uint8_t *)pkt->pkt_resp;
17549 	BIG_ENDIAN_32(&els);
17550 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17551 }
17552 
17553 /*
17554  * ql_isp_els_handle_endian - els requests/responses must be in big endian
17555  *			      in host memory.
17556  * Input:	ha = adapter state pointer.
17557  *		ptr = els request/response buffer pointer.
17558  *		ls_code = els command code.
17559  * Returns:
17560  * Context:	Kernel context.
17561  */
17562 void
17563 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
17564 {
17565 	switch (ls_code) {
17566 	case LA_ELS_PLOGI: {
17567 		BIG_ENDIAN_32(ptr);	/* Command Code */
17568 		ptr += 4;
17569 		BIG_ENDIAN_16(ptr);	/* FC-PH version */
17570 		ptr += 2;
17571 		BIG_ENDIAN_16(ptr);	/* b2b credit */
17572 		ptr += 2;
17573 		BIG_ENDIAN_16(ptr);	/* Cmn Feature flags */
17574 		ptr += 2;
17575 		BIG_ENDIAN_16(ptr);	/* Rcv data size */
17576 		ptr += 2;
17577 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
17578 		ptr += 2;
17579 		BIG_ENDIAN_16(ptr);	/* Rel offset */
17580 		ptr += 2;
17581 		BIG_ENDIAN_32(ptr);	/* E_D_TOV */
17582 		ptr += 4;		/* Port Name */
17583 		ptr += 8;		/* Node Name */
17584 		ptr += 8;		/* Class 1 */
17585 		ptr += 16;		/* Class 2 */
17586 		ptr += 16;		/* Class 3 */
17587 		BIG_ENDIAN_16(ptr);	/* Service options */
17588 		ptr += 2;
17589 		BIG_ENDIAN_16(ptr);	/* Initiator control */
17590 		ptr += 2;
17591 		BIG_ENDIAN_16(ptr);	/* Recipient Control */
17592 		ptr += 2;
17593 		BIG_ENDIAN_16(ptr);	/* Rcv size */
17594 		ptr += 2;
17595 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
17596 		ptr += 2;
17597 		BIG_ENDIAN_16(ptr);	/* N_Port e2e credit */
17598 		ptr += 2;
17599 		BIG_ENDIAN_16(ptr);	/* Open Seq/Exch */
17600 		break;
17601 	}
17602 	case LA_ELS_PRLI: {
17603 		BIG_ENDIAN_32(ptr);	/* Command Code/Page length */
17604 		ptr += 4;		/* Type */
17605 		ptr += 2;
17606 		BIG_ENDIAN_16(ptr);	/* Flags */
17607 		ptr += 2;
17608 		BIG_ENDIAN_32(ptr);	/* Originator Process associator  */
17609 		ptr += 4;
17610 		BIG_ENDIAN_32(ptr);	/* Responder Process associator */
17611 		ptr += 4;
17612 		BIG_ENDIAN_32(ptr);	/* Flags */
17613 		break;
17614 	}
17615 	default:
17616 		EL(ha, "can't handle els code %x\n", ls_code);
17617 		break;
17618 	}
17619 }
17620 
17621 /*
17622  * ql_n_port_plogi
17623  *	In N port 2 N port topology where an N Port has logged in with the
17624  *	firmware because it has the N_Port login initiative, we send up
17625  *	a plogi by proxy which stimulates the login procedure to continue.
17626  *
17627  * Input:
17628  *	ha = adapter state pointer.
17629  * Returns:
17630  *
17631  * Context:
17632  *	Kernel context.
17633  */
17634 static int
17635 ql_n_port_plogi(ql_adapter_state_t *ha)
17636 {
17637 	int		rval;
17638 	ql_tgt_t	*tq;
17639 	ql_head_t done_q = { NULL, NULL };
17640 
17641 	rval = QL_SUCCESS;
17642 
17643 	if (ha->topology & QL_N_PORT) {
17644 		/* if we're doing this the n_port_handle must be good */
17645 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17646 			tq = ql_loop_id_to_queue(ha,
17647 			    ha->n_port->n_port_handle);
17648 			if (tq != NULL) {
17649 				(void) ql_send_plogi(ha, tq, &done_q);
17650 			} else {
17651 				EL(ha, "n_port_handle = %x, tq = %x\n",
17652 				    ha->n_port->n_port_handle, tq);
17653 			}
17654 		} else {
17655 			EL(ha, "n_port_handle = %x, tq = %x\n",
17656 			    ha->n_port->n_port_handle, tq);
17657 		}
17658 		if (done_q.first != NULL) {
17659 			ql_done(done_q.first);
17660 		}
17661 	}
17662 	return (rval);
17663 }
17664 
17665 /*
17666  * Compare two WWNs. The NAA is omitted for comparison.
17667  *
17668  * Note particularly that the indentation used in this
17669  * function  isn't according to Sun recommendations. It
17670  * is indented to make reading a bit easy.
17671  *
17672  * Return Values:
17673  *   if first == second return  0
17674  *   if first > second  return  1
17675  *   if first < second  return -1
17676  */
17677 int
17678 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
17679 {
17680 	la_wwn_t t1, t2;
17681 	int rval;
17682 
17683 	EL(ha, "WWPN=%08x%08x\n",
17684 	    BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
17685 	EL(ha, "WWPN=%08x%08x\n",
17686 	    BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
17687 	/*
17688 	 * Fibre Channel protocol is big endian, so compare
17689 	 * as big endian values
17690 	 */
17691 	t1.i_wwn[0] = BE_32(first->i_wwn[0]);
17692 	t1.i_wwn[1] = BE_32(first->i_wwn[1]);
17693 
17694 	t2.i_wwn[0] = BE_32(second->i_wwn[0]);
17695 	t2.i_wwn[1] = BE_32(second->i_wwn[1]);
17696 
17697 	if (t1.i_wwn[0] == t2.i_wwn[0]) {
17698 		if (t1.i_wwn[1] == t2.i_wwn[1]) {
17699 			rval = 0;
17700 		} else if (t1.i_wwn[1] > t2.i_wwn[1]) {
17701 			rval = 1;
17702 		} else {
17703 			rval = -1;
17704 		}
17705 	} else {
17706 		if (t1.i_wwn[0] > t2.i_wwn[0]) {
17707 			rval = 1;
17708 		} else {
17709 			rval = -1;
17710 		}
17711 	}
17712 	return (rval);
17713 }
17714 
17715 /*
17716  * ql_wait_for_td_stop
17717  *	Wait for task daemon to stop running.  Internal command timeout
17718  *	is approximately 30 seconds, so it may help in some corner
17719  *	cases to wait that long
17720  *
17721  * Input:
17722  *	ha = adapter state pointer.
17723  *
17724  * Returns:
17725  *	DDI_SUCCESS or DDI_FAILURE.
17726  *
17727  * Context:
17728  *	Kernel context.
17729  */
17730 
17731 static int
17732 ql_wait_for_td_stop(ql_adapter_state_t *ha)
17733 {
17734 	int	rval = DDI_FAILURE;
17735 	UINT16	wait_cnt;
17736 
17737 	for (wait_cnt = 0; wait_cnt < 3000; wait_cnt++) {
17738 		/* The task daemon clears the stop flag on exit. */
17739 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
17740 			if (ha->cprinfo.cc_events & CALLB_CPR_START ||
17741 			    ddi_in_panic()) {
17742 				drv_usecwait(10000);
17743 			} else {
17744 				delay(drv_usectohz(10000));
17745 			}
17746 		} else {
17747 			rval = DDI_SUCCESS;
17748 			break;
17749 		}
17750 	}
17751 	return (rval);
17752 }
17753 
17754 /*
17755  * ql_nvram_cache_desc_ctor - Construct an nvram cache descriptor.
17756  *
17757  * Input:	Pointer to the adapter state structure.
17758  * Returns:	Success or Failure.
17759  * Context:	Kernel context.
17760  */
17761 int
17762 ql_nvram_cache_desc_ctor(ql_adapter_state_t *ha)
17763 {
17764 	int	rval = DDI_SUCCESS;
17765 
17766 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17767 
17768 	ha->nvram_cache =
17769 	    (nvram_cache_desc_t *)kmem_zalloc(sizeof (nvram_cache_desc_t),
17770 	    KM_SLEEP);
17771 
17772 	if (ha->nvram_cache == NULL) {
17773 		cmn_err(CE_WARN, "%s(%d): can't construct nvram cache"
17774 		    " descriptor", QL_NAME, ha->instance);
17775 		rval = DDI_FAILURE;
17776 	} else {
17777 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
17778 			ha->nvram_cache->size = sizeof (nvram_24xx_t);
17779 		} else {
17780 			ha->nvram_cache->size = sizeof (nvram_t);
17781 		}
17782 		ha->nvram_cache->cache =
17783 		    (void *)kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
17784 		if (ha->nvram_cache->cache == NULL) {
17785 			cmn_err(CE_WARN, "%s(%d): can't get nvram cache buffer",
17786 			    QL_NAME, ha->instance);
17787 			kmem_free(ha->nvram_cache,
17788 			    sizeof (nvram_cache_desc_t));
17789 			ha->nvram_cache = 0;
17790 			rval = DDI_FAILURE;
17791 		} else {
17792 			mutex_init(&ha->nvram_cache->mutex, NULL,
17793 			    MUTEX_DRIVER, NULL);
17794 			ha->nvram_cache->valid = 0;
17795 		}
17796 	}
17797 
17798 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17799 
17800 	return (rval);
17801 }
17802 
17803 /*
17804  * ql_nvram_cache_desc_dtor - Destroy an nvram cache descriptor.
17805  *
17806  * Input:	Pointer to the adapter state structure.
17807  * Returns:	Success or Failure.
17808  * Context:	Kernel context.
17809  */
17810 int
17811 ql_nvram_cache_desc_dtor(ql_adapter_state_t *ha)
17812 {
17813 	int	rval = DDI_SUCCESS;
17814 
17815 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17816 
17817 	if (ha->nvram_cache == NULL) {
17818 		cmn_err(CE_WARN, "%s(%d): can't destroy nvram descriptor",
17819 		    QL_NAME, ha->instance);
17820 		rval = DDI_FAILURE;
17821 	} else {
17822 		if (ha->nvram_cache->cache != NULL) {
17823 			kmem_free(ha->nvram_cache->cache,
17824 			    ha->nvram_cache->size);
17825 		}
17826 		mutex_destroy(&ha->nvram_cache->mutex);
17827 		kmem_free(ha->nvram_cache, sizeof (nvram_cache_desc_t));
17828 	}
17829 
17830 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17831 
17832 	return (rval);
17833 }
17834 
17835 /*
17836  * ql_process_idc_event - Handle an Inter-Driver Communication async event.
17837  *
17838  * Input:	Pointer to the adapter state structure.
17839  * Returns:	void
17840  * Context:	Kernel context.
17841  */
17842 static void
17843 ql_process_idc_event(ql_adapter_state_t *ha)
17844 {
17845 	int	rval;
17846 
17847 	switch (ha->idc_mb[0]) {
17848 	case MBA_IDC_NOTIFICATION:
17849 		/*
17850 		 * The informational opcode (idc_mb[2]) can be a
17851 		 * defined value or the mailbox command being executed
17852 		 * on another function which stimulated this IDC message.
17853 		 */
17854 		ADAPTER_STATE_LOCK(ha);
17855 		switch (ha->idc_mb[2]) {
17856 		case IDC_OPC_DRV_START:
17857 			if (ha->idc_flash_acc != 0) {
17858 				ha->idc_flash_acc--;
17859 				if (ha->idc_flash_acc == 0) {
17860 					ha->idc_flash_acc_timer = 0;
17861 					GLOBAL_HW_UNLOCK();
17862 				}
17863 			}
17864 			if (ha->idc_restart_cnt != 0) {
17865 				ha->idc_restart_cnt--;
17866 				if (ha->idc_restart_cnt == 0) {
17867 					ha->idc_restart_timer = 0;
17868 					ADAPTER_STATE_UNLOCK(ha);
17869 					TASK_DAEMON_LOCK(ha);
17870 					ha->task_daemon_flags &= ~DRIVER_STALL;
17871 					TASK_DAEMON_UNLOCK(ha);
17872 					ql_restart_queues(ha);
17873 				} else {
17874 					ADAPTER_STATE_UNLOCK(ha);
17875 				}
17876 			} else {
17877 				ADAPTER_STATE_UNLOCK(ha);
17878 			}
17879 			break;
17880 		case IDC_OPC_FLASH_ACC:
17881 			ha->idc_flash_acc_timer = 30;
17882 			if (ha->idc_flash_acc == 0) {
17883 				GLOBAL_HW_LOCK();
17884 			}
17885 			ha->idc_flash_acc++;
17886 			ADAPTER_STATE_UNLOCK(ha);
17887 			break;
17888 		case IDC_OPC_RESTART_MPI:
17889 			ha->idc_restart_timer = 30;
17890 			ha->idc_restart_cnt++;
17891 			ADAPTER_STATE_UNLOCK(ha);
17892 			TASK_DAEMON_LOCK(ha);
17893 			ha->task_daemon_flags |= DRIVER_STALL;
17894 			TASK_DAEMON_UNLOCK(ha);
17895 			break;
17896 		case IDC_OPC_PORT_RESET_MBC:
17897 		case IDC_OPC_SET_PORT_CONFIG_MBC:
17898 			ha->idc_restart_timer = 30;
17899 			ha->idc_restart_cnt++;
17900 			ADAPTER_STATE_UNLOCK(ha);
17901 			TASK_DAEMON_LOCK(ha);
17902 			ha->task_daemon_flags |= DRIVER_STALL;
17903 			TASK_DAEMON_UNLOCK(ha);
17904 			(void) ql_wait_outstanding(ha);
17905 			break;
17906 		default:
17907 			ADAPTER_STATE_UNLOCK(ha);
17908 			EL(ha, "Unknown IDC opcode=%xh %xh\n", ha->idc_mb[0],
17909 			    ha->idc_mb[2]);
17910 			break;
17911 		}
17912 		/*
17913 		 * If there is a timeout value associated with this IDC
17914 		 * notification then there is an implied requirement
17915 		 * that we return an ACK.
17916 		 */
17917 		if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
17918 			rval = ql_idc_ack(ha);
17919 			if (rval != QL_SUCCESS) {
17920 				EL(ha, "idc_ack status=%xh %xh\n", rval,
17921 				    ha->idc_mb[2]);
17922 			}
17923 		}
17924 		break;
17925 	case MBA_IDC_COMPLETE:
17926 		/*
17927 		 * We don't ACK completions, only these require action.
17928 		 */
17929 		switch (ha->idc_mb[2]) {
17930 		case IDC_OPC_PORT_RESET_MBC:
17931 		case IDC_OPC_SET_PORT_CONFIG_MBC:
17932 			ADAPTER_STATE_LOCK(ha);
17933 			if (ha->idc_restart_cnt != 0) {
17934 				ha->idc_restart_cnt--;
17935 				if (ha->idc_restart_cnt == 0) {
17936 					ha->idc_restart_timer = 0;
17937 					ADAPTER_STATE_UNLOCK(ha);
17938 					TASK_DAEMON_LOCK(ha);
17939 					ha->task_daemon_flags &= ~DRIVER_STALL;
17940 					TASK_DAEMON_UNLOCK(ha);
17941 					ql_restart_queues(ha);
17942 				} else {
17943 					ADAPTER_STATE_UNLOCK(ha);
17944 				}
17945 			} else {
17946 				ADAPTER_STATE_UNLOCK(ha);
17947 			}
17948 			break;
17949 		default:
17950 			break; /* Don't care... */
17951 		}
17952 		break;
17953 	case MBA_IDC_TIME_EXTENDED:
17954 		QL_PRINT_10(CE_CONT, "(%d): MBA_IDC_TIME_EXTENDED="
17955 		    "%xh\n", ha->instance, ha->idc_mb[2]);
17956 		break;
17957 	default:
17958 		EL(ha, "Inconsistent IDC event =%xh %xh\n", ha->idc_mb[0],
17959 		    ha->idc_mb[2]);
17960 		ADAPTER_STATE_UNLOCK(ha);
17961 		break;
17962 	}
17963 }
17964