xref: /illumos-gate/usr/src/uts/common/io/aac/aac.c (revision 56f33205c9ed776c3c909e07d52e94610a675740)
1 /*
2  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright 2005-08 Adaptec, Inc.
8  * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner
9  * Copyright (c) 2000 Michael Smith
10  * Copyright (c) 2001 Scott Long
11  * Copyright (c) 2000 BSDi
12  * All rights reserved.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/modctl.h>
36 #include <sys/conf.h>
37 #include <sys/cmn_err.h>
38 #include <sys/ddi.h>
39 #include <sys/devops.h>
40 #include <sys/pci.h>
41 #include <sys/types.h>
42 #include <sys/ddidmareq.h>
43 #include <sys/scsi/scsi.h>
44 #include <sys/ksynch.h>
45 #include <sys/sunddi.h>
46 #include <sys/byteorder.h>
47 #include "aac_regs.h"
48 #include "aac.h"
49 
50 /*
51  * FMA header files
52  */
53 #include <sys/ddifm.h>
54 #include <sys/fm/protocol.h>
55 #include <sys/fm/util.h>
56 #include <sys/fm/io/ddi.h>
57 
58 /*
59  * For minor nodes created by the SCSA framework, minor numbers are
60  * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a
61  * number less than 64.
62  *
63  * To support cfgadm, need to confirm the SCSA framework by creating
64  * devctl/scsi and driver specific minor nodes under SCSA format,
65  * and calling scsi_hba_xxx() functions aacordingly.
66  */
67 
68 #define	AAC_MINOR		32
69 #define	INST2AAC(x)		(((x) << INST_MINOR_SHIFT) | AAC_MINOR)
70 #define	AAC_SCSA_MINOR(x)	((x) & TRAN_MINOR_MASK)
71 #define	AAC_IS_SCSA_NODE(x)	((x) == DEVCTL_MINOR || (x) == SCSI_MINOR)
72 
73 #define	SD2TRAN(sd)		((sd)->sd_address.a_hba_tran)
74 #define	AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private)
75 #define	AAC_DIP2TRAN(dip)	((scsi_hba_tran_t *)ddi_get_driver_private(dip))
76 #define	AAC_DIP2SOFTS(dip)	(AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip)))
77 #define	SD2AAC(sd)		(AAC_TRAN2SOFTS(SD2TRAN(sd)))
78 #define	AAC_PD(t)		((t) - AAC_MAX_LD)
79 #define	AAC_DEV(softs, t)	(((t) < AAC_MAX_LD) ? \
80 				&(softs)->containers[(t)].dev : \
81 				((t) < AAC_MAX_DEV(softs)) ? \
82 				&(softs)->nondasds[AAC_PD(t)].dev : NULL)
83 #define	AAC_DEVCFG_BEGIN(softs, tgt) \
84 				aac_devcfg((softs), (tgt), 1)
85 #define	AAC_DEVCFG_END(softs, tgt) \
86 				aac_devcfg((softs), (tgt), 0)
87 #define	PKT2AC(pkt)		((struct aac_cmd *)(pkt)->pkt_ha_private)
88 #define	AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \
89 		if (!(cond)) { \
90 			int count = (timeout) * 10; \
91 			while (count) { \
92 				drv_usecwait(100); \
93 				if (cond) \
94 					break; \
95 				count--; \
96 			} \
97 			(timeout) = (count + 9) / 10; \
98 		} \
99 	}
100 
101 #define	AAC_SENSE_DATA_DESCR_LEN \
102 	(sizeof (struct scsi_descr_sense_hdr) + \
103 	sizeof (struct scsi_information_sense_descr))
104 #define	AAC_ARQ64_LENGTH \
105 	(sizeof (struct scsi_arq_status) + \
106 	AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH)
107 
108 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */
109 #define	AAC_GETGXADDR(cmdlen, cdbp) \
110 	((cmdlen == 6) ? GETG0ADDR(cdbp) : \
111 	(cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \
112 	((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp))
113 
114 #define	AAC_CDB_INQUIRY_CMDDT	0x02
115 #define	AAC_CDB_INQUIRY_EVPD	0x01
116 #define	AAC_VPD_PAGE_CODE	1
117 #define	AAC_VPD_PAGE_LENGTH	3
118 #define	AAC_VPD_PAGE_DATA	4
119 #define	AAC_VPD_ID_CODESET	0
120 #define	AAC_VPD_ID_TYPE		1
121 #define	AAC_VPD_ID_LENGTH	3
122 #define	AAC_VPD_ID_DATA		4
123 
124 #define	AAC_SCSI_RPTLUNS_HEAD_SIZE			0x08
125 #define	AAC_SCSI_RPTLUNS_ADDR_SIZE			0x08
126 #define	AAC_SCSI_RPTLUNS_ADDR_MASK			0xC0
127 /* 00b - peripheral device addressing method */
128 #define	AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL		0x00
129 /* 01b - flat space addressing method */
130 #define	AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE		0x40
131 /* 10b - logical unit addressing method */
132 #define	AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT		0x80
133 
134 /* Return the size of FIB with data part type data_type */
135 #define	AAC_FIB_SIZEOF(data_type) \
136 	(sizeof (struct aac_fib_header) + sizeof (data_type))
137 /* Return the container size defined in mir */
138 #define	AAC_MIR_SIZE(softs, acc, mir) \
139 	(((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \
140 	(uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \
141 	((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \
142 	(uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity))
143 
144 /* The last entry of aac_cards[] is for unknown cards */
145 #define	AAC_UNKNOWN_CARD \
146 	(sizeof (aac_cards) / sizeof (struct aac_card_type) - 1)
147 #define	CARD_IS_UNKNOWN(i)	(i == AAC_UNKNOWN_CARD)
148 #define	BUF_IS_READ(bp)		((bp)->b_flags & B_READ)
149 #define	AAC_IS_Q_EMPTY(q)	((q)->q_head == NULL)
150 #define	AAC_CMDQ(acp)		(!((acp)->flags & AAC_CMD_SYNC))
151 
152 #define	PCI_MEM_GET32(softs, off) \
153 	ddi_get32((softs)->pci_mem_handle, \
154 	    (void *)((softs)->pci_mem_base_vaddr + (off)))
155 #define	PCI_MEM_PUT32(softs, off, val) \
156 	ddi_put32((softs)->pci_mem_handle, \
157 	    (void *)((softs)->pci_mem_base_vaddr + (off)), \
158 	    (uint32_t)(val))
159 #define	PCI_MEM_GET16(softs, off) \
160 	ddi_get16((softs)->pci_mem_handle, \
161 	(void *)((softs)->pci_mem_base_vaddr + (off)))
162 #define	PCI_MEM_PUT16(softs, off, val) \
163 	ddi_put16((softs)->pci_mem_handle, \
164 	(void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val))
165 /* Write host data at valp to device mem[off] repeatedly count times */
166 #define	PCI_MEM_REP_PUT8(softs, off, valp, count) \
167 	ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \
168 	    (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
169 	    count, DDI_DEV_AUTOINCR)
170 /* Read device data at mem[off] to host addr valp repeatedly count times */
171 #define	PCI_MEM_REP_GET8(softs, off, valp, count) \
172 	ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \
173 	    (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
174 	    count, DDI_DEV_AUTOINCR)
175 #define	AAC_GET_FIELD8(acc, d, s, field) \
176 	(d)->field = ddi_get8(acc, (uint8_t *)&(s)->field)
177 #define	AAC_GET_FIELD32(acc, d, s, field) \
178 	(d)->field = ddi_get32(acc, (uint32_t *)&(s)->field)
179 #define	AAC_GET_FIELD64(acc, d, s, field) \
180 	(d)->field = ddi_get64(acc, (uint64_t *)&(s)->field)
181 #define	AAC_REP_GET_FIELD8(acc, d, s, field, r) \
182 	ddi_rep_get8((acc), (uint8_t *)&(d)->field, \
183 	    (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
184 #define	AAC_REP_GET_FIELD32(acc, d, s, field, r) \
185 	ddi_rep_get32((acc), (uint32_t *)&(d)->field, \
186 	    (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
187 
188 #define	AAC_ENABLE_INTR(softs) { \
189 		if (softs->flags & AAC_FLAGS_NEW_COMM) \
190 			PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \
191 		else \
192 			PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \
193 	}
194 
195 #define	AAC_DISABLE_INTR(softs)		PCI_MEM_PUT32(softs, AAC_OIMR, ~0)
196 #define	AAC_STATUS_CLR(softs, mask)	PCI_MEM_PUT32(softs, AAC_ODBR, mask)
197 #define	AAC_STATUS_GET(softs)		PCI_MEM_GET32(softs, AAC_ODBR)
198 #define	AAC_NOTIFY(softs, val)		PCI_MEM_PUT32(softs, AAC_IDBR, val)
199 #define	AAC_OUTB_GET(softs)		PCI_MEM_GET32(softs, AAC_OQUE)
200 #define	AAC_OUTB_SET(softs, val)	PCI_MEM_PUT32(softs, AAC_OQUE, val)
201 #define	AAC_FWSTATUS_GET(softs)	\
202 	((softs)->aac_if.aif_get_fwstatus(softs))
203 #define	AAC_MAILBOX_GET(softs, mb) \
204 	((softs)->aac_if.aif_get_mailbox((softs), (mb)))
205 #define	AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \
206 	((softs)->aac_if.aif_set_mailbox((softs), (cmd), \
207 	    (arg0), (arg1), (arg2), (arg3)))
208 
209 #define	AAC_MGT_SLOT_NUM	2
210 #define	AAC_THROTTLE_DRAIN	-1
211 
212 #define	AAC_QUIESCE_TICK	1	/* 1 second */
213 #define	AAC_QUIESCE_TIMEOUT	180	/* 180 seconds */
214 #define	AAC_DEFAULT_TICK	10	/* 10 seconds */
215 #define	AAC_SYNC_TICK		(30*60)	/* 30 minutes */
216 
217 /* Poll time for aac_do_poll_io() */
218 #define	AAC_POLL_TIME		60	/* 60 seconds */
219 
220 /* IOP reset */
221 #define	AAC_IOP_RESET_SUCCEED		0	/* IOP reset succeed */
222 #define	AAC_IOP_RESET_FAILED		-1	/* IOP reset failed */
223 #define	AAC_IOP_RESET_ABNORMAL		-2	/* Reset operation abnormal */
224 
225 /*
226  * Hardware access functions
227  */
228 static int aac_rx_get_fwstatus(struct aac_softstate *);
229 static int aac_rx_get_mailbox(struct aac_softstate *, int);
230 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
231     uint32_t, uint32_t, uint32_t);
232 static int aac_rkt_get_fwstatus(struct aac_softstate *);
233 static int aac_rkt_get_mailbox(struct aac_softstate *, int);
234 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
235     uint32_t, uint32_t, uint32_t);
236 
237 /*
238  * SCSA function prototypes
239  */
240 static int aac_attach(dev_info_t *, ddi_attach_cmd_t);
241 static int aac_detach(dev_info_t *, ddi_detach_cmd_t);
242 static int aac_reset(dev_info_t *, ddi_reset_cmd_t);
243 static int aac_quiesce(dev_info_t *);
244 static int aac_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
245 
246 /*
247  * Interrupt handler functions
248  */
249 static int aac_query_intrs(struct aac_softstate *, int);
250 static int aac_add_intrs(struct aac_softstate *);
251 static void aac_remove_intrs(struct aac_softstate *);
252 static int aac_enable_intrs(struct aac_softstate *);
253 static int aac_disable_intrs(struct aac_softstate *);
254 static uint_t aac_intr_old(caddr_t);
255 static uint_t aac_intr_new(caddr_t);
256 static uint_t aac_softintr(caddr_t);
257 
258 /*
259  * Internal functions in attach
260  */
261 static int aac_check_card_type(struct aac_softstate *);
262 static int aac_check_firmware(struct aac_softstate *);
263 static int aac_common_attach(struct aac_softstate *);
264 static void aac_common_detach(struct aac_softstate *);
265 static int aac_probe_containers(struct aac_softstate *);
266 static int aac_alloc_comm_space(struct aac_softstate *);
267 static int aac_setup_comm_space(struct aac_softstate *);
268 static void aac_free_comm_space(struct aac_softstate *);
269 static int aac_hba_setup(struct aac_softstate *);
270 
271 /*
272  * Sync FIB operation functions
273  */
274 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t,
275     uint32_t, uint32_t, uint32_t, uint32_t *);
276 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t);
277 
278 /*
279  * Command queue operation functions
280  */
281 static void aac_cmd_initq(struct aac_cmd_queue *);
282 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *);
283 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *);
284 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *);
285 
286 /*
287  * FIB queue operation functions
288  */
289 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t);
290 static int aac_fib_dequeue(struct aac_softstate *, int, int *);
291 
292 /*
293  * Slot operation functions
294  */
295 static int aac_create_slots(struct aac_softstate *);
296 static void aac_destroy_slots(struct aac_softstate *);
297 static void aac_alloc_fibs(struct aac_softstate *);
298 static void aac_destroy_fibs(struct aac_softstate *);
299 static struct aac_slot *aac_get_slot(struct aac_softstate *);
300 static void aac_release_slot(struct aac_softstate *, struct aac_slot *);
301 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *);
302 static void aac_free_fib(struct aac_slot *);
303 
304 /*
305  * Internal functions
306  */
307 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_cmd *,
308     uint16_t);
309 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *);
310 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *);
311 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *);
312 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *);
313 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *);
314 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *);
315 static void aac_cmd_fib_startstop(struct aac_softstate *, struct aac_cmd *);
316 static void aac_start_waiting_io(struct aac_softstate *);
317 static void aac_drain_comp_q(struct aac_softstate *);
318 int aac_do_io(struct aac_softstate *, struct aac_cmd *);
319 static int aac_sync_fib_slot_bind(struct aac_softstate *, struct aac_cmd *);
320 static void aac_sync_fib_slot_release(struct aac_softstate *, struct aac_cmd *);
321 static void aac_start_io(struct aac_softstate *, struct aac_cmd *);
322 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *);
323 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *);
324 static int aac_send_command(struct aac_softstate *, struct aac_slot *);
325 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *);
326 static int aac_dma_sync_ac(struct aac_cmd *);
327 static int aac_shutdown(struct aac_softstate *);
328 static int aac_reset_adapter(struct aac_softstate *);
329 static int aac_do_quiesce(struct aac_softstate *softs);
330 static int aac_do_unquiesce(struct aac_softstate *softs);
331 static void aac_unhold_bus(struct aac_softstate *, int);
332 static void aac_set_throttle(struct aac_softstate *, struct aac_device *,
333     int, int);
334 
335 /*
336  * Adapter Initiated FIB handling function
337  */
338 static int aac_handle_aif(struct aac_softstate *, struct aac_fib *);
339 
340 /*
341  * Timeout handling thread function
342  */
343 static void aac_daemon(void *);
344 
345 /*
346  * IOCTL interface related functions
347  */
348 static int aac_open(dev_t *, int, int, cred_t *);
349 static int aac_close(dev_t, int, int, cred_t *);
350 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
351 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int);
352 
353 /*
354  * FMA Prototypes
355  */
356 static void aac_fm_init(struct aac_softstate *);
357 static void aac_fm_fini(struct aac_softstate *);
358 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
359 int aac_check_acc_handle(ddi_acc_handle_t);
360 int aac_check_dma_handle(ddi_dma_handle_t);
361 void aac_fm_ereport(struct aac_softstate *, char *);
362 
363 /*
364  * Auto enumeration functions
365  */
366 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t);
367 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
368     void *, dev_info_t **);
369 static int aac_dr_event(struct aac_softstate *, int, int, int);
370 
371 #ifdef DEBUG
372 /*
373  * UART	debug output support
374  */
375 
376 #define	AAC_PRINT_BUFFER_SIZE		512
377 #define	AAC_PRINT_TIMEOUT		250	/* 1/4 sec. = 250 msec. */
378 
379 #define	AAC_FW_DBG_STRLEN_OFFSET	0x00
380 #define	AAC_FW_DBG_FLAGS_OFFSET		0x04
381 #define	AAC_FW_DBG_BLED_OFFSET		0x08
382 
383 static int aac_get_fw_debug_buffer(struct aac_softstate *);
384 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *);
385 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *);
386 
387 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE];
388 static char aac_fmt[] = " %s";
389 static char aac_fmt_header[] = " %s.%d: %s";
390 static kmutex_t aac_prt_mutex;
391 
392 /*
393  * Debug flags to be put into the softstate flags field
394  * when initialized
395  */
396 uint32_t aac_debug_flags =
397 /*    AACDB_FLAGS_KERNEL_PRINT | */
398 /*    AACDB_FLAGS_FW_PRINT |	*/
399 /*    AACDB_FLAGS_MISC |	*/
400 /*    AACDB_FLAGS_FUNC1 |	*/
401 /*    AACDB_FLAGS_FUNC2 |	*/
402 /*    AACDB_FLAGS_SCMD |	*/
403 /*    AACDB_FLAGS_AIF |		*/
404 /*    AACDB_FLAGS_FIB |		*/
405 /*    AACDB_FLAGS_IOCTL |	*/
406 0;
407 uint32_t aac_debug_fib_flags =
408 /*    AACDB_FLAGS_FIB_RW |	*/
409 /*    AACDB_FLAGS_FIB_IOCTL |	*/
410 /*    AACDB_FLAGS_FIB_SRB |	*/
411 /*    AACDB_FLAGS_FIB_SYNC |	*/
412 /*    AACDB_FLAGS_FIB_HEADER |	*/
413 /*    AACDB_FLAGS_FIB_TIMEOUT |	*/
414 0;
415 
416 #endif /* DEBUG */
417 
418 static struct cb_ops aac_cb_ops = {
419 	aac_open,	/* open */
420 	aac_close,	/* close */
421 	nodev,		/* strategy */
422 	nodev,		/* print */
423 	nodev,		/* dump */
424 	nodev,		/* read */
425 	nodev,		/* write */
426 	aac_ioctl,	/* ioctl */
427 	nodev,		/* devmap */
428 	nodev,		/* mmap */
429 	nodev,		/* segmap */
430 	nochpoll,	/* poll */
431 	ddi_prop_op,	/* cb_prop_op */
432 	NULL,		/* streamtab */
433 	D_64BIT | D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
434 	CB_REV,		/* cb_rev */
435 	nodev,		/* async I/O read entry point */
436 	nodev		/* async I/O write entry point */
437 };
438 
439 static struct dev_ops aac_dev_ops = {
440 	DEVO_REV,
441 	0,
442 	aac_getinfo,
443 	nulldev,
444 	nulldev,
445 	aac_attach,
446 	aac_detach,
447 	aac_reset,
448 	&aac_cb_ops,
449 	NULL,
450 	NULL,
451 	aac_quiesce,
452 };
453 
454 static struct modldrv aac_modldrv = {
455 	&mod_driverops,
456 	"AAC Driver " AAC_DRIVER_VERSION,
457 	&aac_dev_ops,
458 };
459 
460 static struct modlinkage aac_modlinkage = {
461 	MODREV_1,
462 	&aac_modldrv,
463 	NULL
464 };
465 
466 static struct aac_softstate  *aac_softstatep;
467 
468 /*
469  * Supported card list
470  * ordered in vendor id, subvendor id, subdevice id, and device id
471  */
472 static struct aac_card_type aac_cards[] = {
473 	{0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX,
474 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
475 	    "Dell", "PERC 3/Di"},
476 	{0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX,
477 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
478 	    "Dell", "PERC 3/Di"},
479 	{0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX,
480 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
481 	    "Dell", "PERC 3/Si"},
482 	{0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX,
483 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
484 	    "Dell", "PERC 3/Di"},
485 	{0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX,
486 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
487 	    "Dell", "PERC 3/Si"},
488 	{0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX,
489 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
490 	    "Dell", "PERC 3/Di"},
491 	{0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX,
492 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
493 	    "Dell", "PERC 3/Di"},
494 	{0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX,
495 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
496 	    "Dell", "PERC 3/Di"},
497 	{0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX,
498 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
499 	    "Dell", "PERC 3/Di"},
500 	{0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX,
501 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
502 	    "Dell", "PERC 3/Di"},
503 	{0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX,
504 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
505 	    "Dell", "PERC 320/DC"},
506 	{0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX,
507 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"},
508 
509 	{0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX,
510 	    0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"},
511 	{0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX,
512 	    0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"},
513 	{0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT,
514 	    0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"},
515 
516 	{0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX,
517 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
518 	{0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX,
519 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
520 
521 	{0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX,
522 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
523 	    "Adaptec", "2200S"},
524 	{0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX,
525 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
526 	    "Adaptec", "2120S"},
527 	{0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX,
528 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
529 	    "Adaptec", "2200S"},
530 	{0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX,
531 	    0, AAC_TYPE_SCSI, "Adaptec", "3230S"},
532 	{0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX,
533 	    0, AAC_TYPE_SCSI, "Adaptec", "3240S"},
534 	{0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX,
535 	    0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"},
536 	{0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX,
537 	    0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"},
538 	{0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT,
539 	    0, AAC_TYPE_SCSI, "Adaptec", "2230S"},
540 	{0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT,
541 	    0, AAC_TYPE_SCSI, "Adaptec", "2130S"},
542 	{0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX,
543 	    0, AAC_TYPE_SATA, "Adaptec", "2020SA"},
544 	{0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX,
545 	    0, AAC_TYPE_SATA, "Adaptec", "2025SA"},
546 	{0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX,
547 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"},
548 	{0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX,
549 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"},
550 	{0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX,
551 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"},
552 	{0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX,
553 	    0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"},
554 	{0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX,
555 	    0, AAC_TYPE_SCSI, "Adaptec", "2240S"},
556 	{0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX,
557 	    0, AAC_TYPE_SAS, "Adaptec", "4005SAS"},
558 	{0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX,
559 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"},
560 	{0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX,
561 	    0, AAC_TYPE_SAS, "Adaptec", "4800SAS"},
562 	{0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX,
563 	    0, AAC_TYPE_SAS, "Adaptec", "4805SAS"},
564 	{0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT,
565 	    0, AAC_TYPE_SATA, "Adaptec", "2820SA"},
566 	{0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT,
567 	    0, AAC_TYPE_SATA, "Adaptec", "2620SA"},
568 	{0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT,
569 	    0, AAC_TYPE_SATA, "Adaptec", "2420SA"},
570 	{0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT,
571 	    0, AAC_TYPE_SATA, "ICP", "9024RO"},
572 	{0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT,
573 	    0, AAC_TYPE_SATA, "ICP", "9014RO"},
574 	{0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT,
575 	    0, AAC_TYPE_SATA, "ICP", "9047MA"},
576 	{0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT,
577 	    0, AAC_TYPE_SATA, "ICP", "9087MA"},
578 	{0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX,
579 	    0, AAC_TYPE_SAS, "ICP", "9085LI"},
580 	{0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX,
581 	    0, AAC_TYPE_SAS, "ICP", "5085BR"},
582 	{0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT,
583 	    0, AAC_TYPE_SATA, "ICP", "9067MA"},
584 	{0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX,
585 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"},
586 	{0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX,
587 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"},
588 	{0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX,
589 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"},
590 	{0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX,
591 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"},
592 	{0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX,
593 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"},
594 	{0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX,
595 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"},
596 
597 	{0, 0, 0, 0, AAC_HWIF_UNKNOWN,
598 	    0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"},
599 };
600 
601 /*
602  * Hardware access functions for i960 based cards
603  */
604 static struct aac_interface aac_rx_interface = {
605 	aac_rx_get_fwstatus,
606 	aac_rx_get_mailbox,
607 	aac_rx_set_mailbox
608 };
609 
610 /*
611  * Hardware access functions for Rocket based cards
612  */
613 static struct aac_interface aac_rkt_interface = {
614 	aac_rkt_get_fwstatus,
615 	aac_rkt_get_mailbox,
616 	aac_rkt_set_mailbox
617 };
618 
619 ddi_device_acc_attr_t aac_acc_attr = {
620 	DDI_DEVICE_ATTR_V1,
621 	DDI_STRUCTURE_LE_ACC,
622 	DDI_STRICTORDER_ACC,
623 	DDI_DEFAULT_ACC
624 };
625 
626 static struct {
627 	int	size;
628 	int	notify;
629 } aac_qinfo[] = {
630 	{AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
631 	{AAC_HOST_HIGH_CMD_ENTRIES, 0},
632 	{AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
633 	{AAC_ADAP_HIGH_CMD_ENTRIES, 0},
634 	{AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
635 	{AAC_HOST_HIGH_RESP_ENTRIES, 0},
636 	{AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
637 	{AAC_ADAP_HIGH_RESP_ENTRIES, 0}
638 };
639 
640 /*
641  * Default aac dma attributes
642  */
643 static ddi_dma_attr_t aac_dma_attr = {
644 	DMA_ATTR_V0,
645 	0,		/* lowest usable address */
646 	0xffffffffull,	/* high DMA address range */
647 	0xffffffffull,	/* DMA counter register */
648 	AAC_DMA_ALIGN,	/* DMA address alignment */
649 	1,		/* DMA burstsizes */
650 	1,		/* min effective DMA size */
651 	0xffffffffull,	/* max DMA xfer size */
652 	0xffffffffull,	/* segment boundary */
653 	1,		/* s/g list length */
654 	AAC_BLK_SIZE,	/* granularity of device */
655 	0		/* DMA transfer flags */
656 };
657 
658 struct aac_drinfo {
659 	struct aac_softstate *softs;
660 	int tgt;
661 	int lun;
662 	int event;
663 };
664 
665 static int aac_tick = AAC_DEFAULT_TICK;	/* tick for the internal timer */
666 static uint32_t aac_timebase = 0;	/* internal timer in seconds */
667 static uint32_t aac_sync_time = 0;	/* next time to sync. with firmware */
668 
669 /*
670  * Warlock directives
671  *
672  * Different variables with the same types have to be protected by the
673  * same mutex; otherwise, warlock will complain with "variables don't
674  * seem to be protected consistently". For example,
675  * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected
676  * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to
677  * declare them as protected explictly at aac_cmd_dequeue().
678  */
679 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \
680     scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \
681     mode_format mode_geometry mode_header aac_cmd))
682 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \
683     aac_sge))
684 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \
685     aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \
686     aac_sg_table aac_srb))
687 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry))
688 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
689 _NOTE(SCHEME_PROTECTS_DATA("unique to dr event", aac_drinfo))
690 _NOTE(SCHEME_PROTECTS_DATA("unique to scsi_transport", buf))
691 
692 int
693 _init(void)
694 {
695 	int rval = 0;
696 
697 #ifdef DEBUG
698 	mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL);
699 #endif
700 	DBCALLED(NULL, 1);
701 
702 	if ((rval = ddi_soft_state_init((void *)&aac_softstatep,
703 	    sizeof (struct aac_softstate), 0)) != 0)
704 		goto error;
705 
706 	if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) {
707 		ddi_soft_state_fini((void *)&aac_softstatep);
708 		goto error;
709 	}
710 
711 	if ((rval = mod_install(&aac_modlinkage)) != 0) {
712 		ddi_soft_state_fini((void *)&aac_softstatep);
713 		scsi_hba_fini(&aac_modlinkage);
714 		goto error;
715 	}
716 	return (rval);
717 
718 error:
719 	AACDB_PRINT(NULL, CE_WARN, "Mod init error!");
720 #ifdef DEBUG
721 	mutex_destroy(&aac_prt_mutex);
722 #endif
723 	return (rval);
724 }
725 
726 int
727 _info(struct modinfo *modinfop)
728 {
729 	DBCALLED(NULL, 1);
730 	return (mod_info(&aac_modlinkage, modinfop));
731 }
732 
733 /*
734  * An HBA driver cannot be unload unless you reboot,
735  * so this function will be of no use.
736  */
737 int
738 _fini(void)
739 {
740 	int rval;
741 
742 	DBCALLED(NULL, 1);
743 
744 	if ((rval = mod_remove(&aac_modlinkage)) != 0)
745 		goto error;
746 
747 	scsi_hba_fini(&aac_modlinkage);
748 	ddi_soft_state_fini((void *)&aac_softstatep);
749 #ifdef DEBUG
750 	mutex_destroy(&aac_prt_mutex);
751 #endif
752 	return (0);
753 
754 error:
755 	AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!");
756 	return (rval);
757 }
758 
759 static int
760 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
761 {
762 	int instance, i;
763 	struct aac_softstate *softs = NULL;
764 	int attach_state = 0;
765 	char *data;
766 
767 	DBCALLED(NULL, 1);
768 
769 	switch (cmd) {
770 	case DDI_ATTACH:
771 		break;
772 	case DDI_RESUME:
773 		return (DDI_FAILURE);
774 	default:
775 		return (DDI_FAILURE);
776 	}
777 
778 	instance = ddi_get_instance(dip);
779 
780 	/* Get soft state */
781 	if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) {
782 		AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state");
783 		goto error;
784 	}
785 	softs = ddi_get_soft_state(aac_softstatep, instance);
786 	attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED;
787 
788 	softs->instance = instance;
789 	softs->devinfo_p = dip;
790 	softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr;
791 	softs->addr_dma_attr.dma_attr_granular = 1;
792 	softs->acc_attr = aac_acc_attr;
793 	softs->reg_attr = aac_acc_attr;
794 	softs->card = AAC_UNKNOWN_CARD;
795 #ifdef DEBUG
796 	softs->debug_flags = aac_debug_flags;
797 	softs->debug_fib_flags = aac_debug_fib_flags;
798 #endif
799 
800 	/* Initialize FMA */
801 	aac_fm_init(softs);
802 
803 	/* Check the card type */
804 	if (aac_check_card_type(softs) == AACERR) {
805 		AACDB_PRINT(softs, CE_WARN, "Card not supported");
806 		goto error;
807 	}
808 	/* We have found the right card and everything is OK */
809 	attach_state |= AAC_ATTACH_CARD_DETECTED;
810 
811 	/* Map PCI mem space */
812 	if (ddi_regs_map_setup(dip, 1,
813 	    (caddr_t *)&softs->pci_mem_base_vaddr, 0,
814 	    softs->map_size_min, &softs->reg_attr,
815 	    &softs->pci_mem_handle) != DDI_SUCCESS)
816 		goto error;
817 
818 	softs->map_size = softs->map_size_min;
819 	attach_state |= AAC_ATTACH_PCI_MEM_MAPPED;
820 
821 	AAC_DISABLE_INTR(softs);
822 
823 	/* Init mutexes and condvars */
824 	mutex_init(&softs->q_comp_mutex, NULL,
825 	    MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri));
826 	cv_init(&softs->event, NULL, CV_DRIVER, NULL);
827 	mutex_init(&softs->aifq_mutex, NULL,
828 	    MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri));
829 	cv_init(&softs->aifv, NULL, CV_DRIVER, NULL);
830 	cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL);
831 	mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER,
832 	    DDI_INTR_PRI(softs->intr_pri));
833 	attach_state |= AAC_ATTACH_KMUTEX_INITED;
834 
835 	/* Init the cmd queues */
836 	for (i = 0; i < AAC_CMDQ_NUM; i++)
837 		aac_cmd_initq(&softs->q_wait[i]);
838 	aac_cmd_initq(&softs->q_busy);
839 	aac_cmd_initq(&softs->q_comp);
840 
841 	/* Check for legacy device naming support */
842 	softs->legacy = 1; /* default to use legacy name */
843 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
844 	    "legacy-name-enable", &data) == DDI_SUCCESS)) {
845 		if (strcmp(data, "no") == 0) {
846 			AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled");
847 			softs->legacy = 0;
848 		}
849 		ddi_prop_free(data);
850 	}
851 
852 	/*
853 	 * Everything has been set up till now,
854 	 * we will do some common attach.
855 	 */
856 	mutex_enter(&softs->io_lock);
857 	if (aac_common_attach(softs) == AACERR) {
858 		mutex_exit(&softs->io_lock);
859 		goto error;
860 	}
861 	mutex_exit(&softs->io_lock);
862 	attach_state |= AAC_ATTACH_COMM_SPACE_SETUP;
863 
864 	/* Check for buf breakup support */
865 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
866 	    "breakup-enable", &data) == DDI_SUCCESS)) {
867 		if (strcmp(data, "yes") == 0) {
868 			AACDB_PRINT(softs, CE_NOTE, "buf breakup enabled");
869 			softs->flags |= AAC_FLAGS_BRKUP;
870 		}
871 		ddi_prop_free(data);
872 	}
873 	softs->dma_max = softs->buf_dma_attr.dma_attr_maxxfer;
874 	if (softs->flags & AAC_FLAGS_BRKUP) {
875 		softs->dma_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
876 		    DDI_PROP_DONTPASS, "dma-max", softs->dma_max);
877 	}
878 
879 	if (aac_hba_setup(softs) != AACOK)
880 		goto error;
881 	attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP;
882 
883 	/* Create devctl/scsi nodes for cfgadm */
884 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
885 	    INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
886 		AACDB_PRINT(softs, CE_WARN, "failed to create devctl node");
887 		goto error;
888 	}
889 	attach_state |= AAC_ATTACH_CREATE_DEVCTL;
890 
891 	if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance),
892 	    DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
893 		AACDB_PRINT(softs, CE_WARN, "failed to create scsi node");
894 		goto error;
895 	}
896 	attach_state |= AAC_ATTACH_CREATE_SCSI;
897 
898 	/* Create aac node for app. to issue ioctls */
899 	if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance),
900 	    DDI_PSEUDO, 0) != DDI_SUCCESS) {
901 		AACDB_PRINT(softs, CE_WARN, "failed to create aac node");
902 		goto error;
903 	}
904 
905 	/* Create a taskq for dealing with dr events */
906 	if ((softs->taskq = ddi_taskq_create(dip, "aac_dr_taskq", 1,
907 	    TASKQ_DEFAULTPRI, 0)) == NULL) {
908 		AACDB_PRINT(softs, CE_WARN, "ddi_taskq_create failed");
909 		goto error;
910 	}
911 
912 	aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
913 	softs->state = AAC_STATE_RUN;
914 
915 	/* Create a thread for command timeout */
916 	softs->timeout_id = timeout(aac_daemon, (void *)softs,
917 	    (60 * drv_usectohz(1000000)));
918 
919 	/* Common attach is OK, so we are attached! */
920 	ddi_report_dev(dip);
921 	AACDB_PRINT(softs, CE_NOTE, "aac attached ok");
922 	return (DDI_SUCCESS);
923 
924 error:
925 	if (softs && softs->taskq)
926 		ddi_taskq_destroy(softs->taskq);
927 	if (attach_state & AAC_ATTACH_CREATE_SCSI)
928 		ddi_remove_minor_node(dip, "scsi");
929 	if (attach_state & AAC_ATTACH_CREATE_DEVCTL)
930 		ddi_remove_minor_node(dip, "devctl");
931 	if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP)
932 		aac_common_detach(softs);
933 	if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) {
934 		(void) scsi_hba_detach(dip);
935 		scsi_hba_tran_free(AAC_DIP2TRAN(dip));
936 	}
937 	if (attach_state & AAC_ATTACH_KMUTEX_INITED) {
938 		mutex_destroy(&softs->q_comp_mutex);
939 		cv_destroy(&softs->event);
940 		mutex_destroy(&softs->aifq_mutex);
941 		cv_destroy(&softs->aifv);
942 		cv_destroy(&softs->drain_cv);
943 		mutex_destroy(&softs->io_lock);
944 	}
945 	if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED)
946 		ddi_regs_map_free(&softs->pci_mem_handle);
947 	aac_fm_fini(softs);
948 	if (attach_state & AAC_ATTACH_CARD_DETECTED)
949 		softs->card = AACERR;
950 	if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED)
951 		ddi_soft_state_free(aac_softstatep, instance);
952 	return (DDI_FAILURE);
953 }
954 
955 static int
956 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
957 {
958 	scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip);
959 	struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
960 
961 	DBCALLED(softs, 1);
962 
963 	switch (cmd) {
964 	case DDI_DETACH:
965 		break;
966 	case DDI_SUSPEND:
967 		return (DDI_FAILURE);
968 	default:
969 		return (DDI_FAILURE);
970 	}
971 
972 	mutex_enter(&softs->io_lock);
973 	AAC_DISABLE_INTR(softs);
974 	softs->state = AAC_STATE_STOPPED;
975 
976 	mutex_exit(&softs->io_lock);
977 	(void) untimeout(softs->timeout_id);
978 	mutex_enter(&softs->io_lock);
979 	softs->timeout_id = 0;
980 
981 	ddi_taskq_destroy(softs->taskq);
982 
983 	ddi_remove_minor_node(dip, "aac");
984 	ddi_remove_minor_node(dip, "scsi");
985 	ddi_remove_minor_node(dip, "devctl");
986 
987 	mutex_exit(&softs->io_lock);
988 
989 	aac_common_detach(softs);
990 
991 	mutex_enter(&softs->io_lock);
992 	(void) scsi_hba_detach(dip);
993 	scsi_hba_tran_free(tran);
994 	mutex_exit(&softs->io_lock);
995 
996 	mutex_destroy(&softs->q_comp_mutex);
997 	cv_destroy(&softs->event);
998 	mutex_destroy(&softs->aifq_mutex);
999 	cv_destroy(&softs->aifv);
1000 	cv_destroy(&softs->drain_cv);
1001 	mutex_destroy(&softs->io_lock);
1002 
1003 	ddi_regs_map_free(&softs->pci_mem_handle);
1004 	aac_fm_fini(softs);
1005 	softs->hwif = AAC_HWIF_UNKNOWN;
1006 	softs->card = AAC_UNKNOWN_CARD;
1007 	ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip));
1008 
1009 	return (DDI_SUCCESS);
1010 }
1011 
1012 /*ARGSUSED*/
1013 static int
1014 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1015 {
1016 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1017 
1018 	DBCALLED(softs, 1);
1019 
1020 	mutex_enter(&softs->io_lock);
1021 	(void) aac_shutdown(softs);
1022 	mutex_exit(&softs->io_lock);
1023 
1024 	return (DDI_SUCCESS);
1025 }
1026 
1027 /*
1028  * quiesce(9E) entry point.
1029  *
1030  * This function is called when the system is single-threaded at high
1031  * PIL with preemption disabled. Therefore, this function must not be
1032  * blocked.
1033  *
1034  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1035  * DDI_FAILURE indicates an error condition and should almost never happen.
1036  */
1037 static int
1038 aac_quiesce(dev_info_t *dip)
1039 {
1040 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1041 
1042 	if (softs == NULL)
1043 		return (DDI_FAILURE);
1044 
1045 	AAC_DISABLE_INTR(softs);
1046 
1047 	return (DDI_SUCCESS);
1048 }
1049 
1050 /* ARGSUSED */
1051 static int
1052 aac_getinfo(dev_info_t *self, ddi_info_cmd_t infocmd, void *arg,
1053     void **result)
1054 {
1055 	int error = DDI_SUCCESS;
1056 
1057 	switch (infocmd) {
1058 	case DDI_INFO_DEVT2INSTANCE:
1059 		*result = (void *)(intptr_t)(MINOR2INST(getminor((dev_t)arg)));
1060 		break;
1061 	default:
1062 		error = DDI_FAILURE;
1063 	}
1064 	return (error);
1065 }
1066 
1067 /*
1068  * Bring the controller down to a dormant state and detach all child devices.
1069  * This function is called before detach or system shutdown.
1070  * Note: we can assume that the q_wait on the controller is empty, as we
1071  * won't allow shutdown if any device is open.
1072  */
1073 static int
1074 aac_shutdown(struct aac_softstate *softs)
1075 {
1076 	ddi_acc_handle_t acc;
1077 	struct aac_close_command *cc;
1078 	int rval;
1079 
1080 	(void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
1081 	acc = softs->sync_ac.slotp->fib_acc_handle;
1082 
1083 	cc = (struct aac_close_command *)&softs->sync_ac.slotp->fibp->data[0];
1084 
1085 	ddi_put32(acc, &cc->Command, VM_CloseAll);
1086 	ddi_put32(acc, &cc->ContainerId, 0xfffffffful);
1087 
1088 	/* Flush all caches, set FW to write through mode */
1089 	rval = aac_sync_fib(softs, ContainerCommand,
1090 	    AAC_FIB_SIZEOF(struct aac_close_command));
1091 	aac_sync_fib_slot_release(softs, &softs->sync_ac);
1092 
1093 	AACDB_PRINT(softs, CE_NOTE,
1094 	    "shutting down aac %s", (rval == AACOK) ? "ok" : "fail");
1095 	return (rval);
1096 }
1097 
1098 static uint_t
1099 aac_softintr(caddr_t arg)
1100 {
1101 	struct aac_softstate *softs = (void *)arg;
1102 
1103 	if (!AAC_IS_Q_EMPTY(&softs->q_comp)) {
1104 		aac_drain_comp_q(softs);
1105 	}
1106 	return (DDI_INTR_CLAIMED);
1107 }
1108 
1109 /*
1110  * Setup auto sense data for pkt
1111  */
1112 static void
1113 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key,
1114     uchar_t add_code, uchar_t qual_code, uint64_t info)
1115 {
1116 	struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp);
1117 
1118 	*pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
1119 	pkt->pkt_state |= STATE_ARQ_DONE;
1120 
1121 	*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1122 	arqstat->sts_rqpkt_reason = CMD_CMPLT;
1123 	arqstat->sts_rqpkt_resid = 0;
1124 	arqstat->sts_rqpkt_state =
1125 	    STATE_GOT_BUS |
1126 	    STATE_GOT_TARGET |
1127 	    STATE_SENT_CMD |
1128 	    STATE_XFERRED_DATA;
1129 	arqstat->sts_rqpkt_statistics = 0;
1130 
1131 	if (info <= 0xfffffffful) {
1132 		arqstat->sts_sensedata.es_valid = 1;
1133 		arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
1134 		arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT;
1135 		arqstat->sts_sensedata.es_key = key;
1136 		arqstat->sts_sensedata.es_add_code = add_code;
1137 		arqstat->sts_sensedata.es_qual_code = qual_code;
1138 
1139 		arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF;
1140 		arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF;
1141 		arqstat->sts_sensedata.es_info_3 = (info >>  8) & 0xFF;
1142 		arqstat->sts_sensedata.es_info_4 = info & 0xFF;
1143 	} else { /* 64-bit LBA */
1144 		struct scsi_descr_sense_hdr *dsp;
1145 		struct scsi_information_sense_descr *isd;
1146 
1147 		dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata;
1148 		dsp->ds_class = CLASS_EXTENDED_SENSE;
1149 		dsp->ds_code = CODE_FMT_DESCR_CURRENT;
1150 		dsp->ds_key = key;
1151 		dsp->ds_add_code = add_code;
1152 		dsp->ds_qual_code = qual_code;
1153 		dsp->ds_addl_sense_length =
1154 		    sizeof (struct scsi_information_sense_descr);
1155 
1156 		isd = (struct scsi_information_sense_descr *)(dsp+1);
1157 		isd->isd_descr_type = DESCR_INFORMATION;
1158 		isd->isd_valid = 1;
1159 		isd->isd_information[0] = (info >> 56) & 0xFF;
1160 		isd->isd_information[1] = (info >> 48) & 0xFF;
1161 		isd->isd_information[2] = (info >> 40) & 0xFF;
1162 		isd->isd_information[3] = (info >> 32) & 0xFF;
1163 		isd->isd_information[4] = (info >> 24) & 0xFF;
1164 		isd->isd_information[5] = (info >> 16) & 0xFF;
1165 		isd->isd_information[6] = (info >>  8) & 0xFF;
1166 		isd->isd_information[7] = (info) & 0xFF;
1167 	}
1168 }
1169 
1170 /*
1171  * Setup auto sense data for HARDWARE ERROR
1172  */
1173 static void
1174 aac_set_arq_data_hwerr(struct aac_cmd *acp)
1175 {
1176 	union scsi_cdb *cdbp;
1177 	uint64_t err_blkno;
1178 
1179 	cdbp = (void *)acp->pkt->pkt_cdbp;
1180 	err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp);
1181 	aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno);
1182 }
1183 
1184 /*
1185  * Send a command to the adapter in New Comm. interface
1186  */
1187 static int
1188 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp)
1189 {
1190 	uint32_t index, device;
1191 
1192 	index = PCI_MEM_GET32(softs, AAC_IQUE);
1193 	if (index == 0xffffffffUL) {
1194 		index = PCI_MEM_GET32(softs, AAC_IQUE);
1195 		if (index == 0xffffffffUL)
1196 			return (AACERR);
1197 	}
1198 
1199 	device = index;
1200 	PCI_MEM_PUT32(softs, device,
1201 	    (uint32_t)(slotp->fib_phyaddr & 0xfffffffful));
1202 	device += 4;
1203 	PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32));
1204 	device += 4;
1205 	PCI_MEM_PUT32(softs, device, slotp->acp->fib_size);
1206 	PCI_MEM_PUT32(softs, AAC_IQUE, index);
1207 	return (AACOK);
1208 }
1209 
1210 static void
1211 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp)
1212 {
1213 	struct aac_device *dvp = acp->dvp;
1214 	int q = AAC_CMDQ(acp);
1215 
1216 	if (acp->slotp) { /* outstanding cmd */
1217 		if (!(acp->flags & AAC_CMD_IN_SYNC_SLOT)) {
1218 			aac_release_slot(softs, acp->slotp);
1219 			acp->slotp = NULL;
1220 		}
1221 		if (dvp) {
1222 			dvp->ncmds[q]--;
1223 			if (dvp->throttle[q] == AAC_THROTTLE_DRAIN &&
1224 			    dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC)
1225 				aac_set_throttle(softs, dvp, q,
1226 				    softs->total_slots);
1227 			/*
1228 			 * Setup auto sense data for UNIT ATTENTION
1229 			 * Each lun should generate a unit attention
1230 			 * condition when reset.
1231 			 * Phys. drives are treated as logical ones
1232 			 * during error recovery.
1233 			 */
1234 			if (dvp->type == AAC_DEV_LD) {
1235 				struct aac_container *ctp =
1236 				    (struct aac_container *)dvp;
1237 				if (ctp->reset == 0)
1238 					goto noreset;
1239 
1240 				AACDB_PRINT(softs, CE_NOTE,
1241 				    "Unit attention: reset");
1242 				ctp->reset = 0;
1243 				aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION,
1244 				    0x29, 0x02, 0);
1245 			}
1246 		}
1247 noreset:
1248 		softs->bus_ncmds[q]--;
1249 		aac_cmd_delete(&softs->q_busy, acp);
1250 	} else { /* cmd in waiting queue */
1251 		aac_cmd_delete(&softs->q_wait[q], acp);
1252 	}
1253 
1254 	if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */
1255 		mutex_enter(&softs->q_comp_mutex);
1256 		aac_cmd_enqueue(&softs->q_comp, acp);
1257 		mutex_exit(&softs->q_comp_mutex);
1258 	} else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */
1259 		cv_broadcast(&softs->event);
1260 	}
1261 }
1262 
1263 static void
1264 aac_handle_io(struct aac_softstate *softs, int index)
1265 {
1266 	struct aac_slot *slotp;
1267 	struct aac_cmd *acp;
1268 	uint32_t fast;
1269 
1270 	fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE;
1271 	index >>= 2;
1272 
1273 	/* Make sure firmware reported index is valid */
1274 	ASSERT(index >= 0 && index < softs->total_slots);
1275 	slotp = &softs->io_slot[index];
1276 	ASSERT(slotp->index == index);
1277 	acp = slotp->acp;
1278 
1279 	if (acp == NULL || acp->slotp != slotp) {
1280 		cmn_err(CE_WARN,
1281 		    "Firmware error: invalid slot index received from FW");
1282 		return;
1283 	}
1284 
1285 	acp->flags |= AAC_CMD_CMPLT;
1286 	(void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1287 
1288 	if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) {
1289 		/*
1290 		 * For fast response IO, the firmware do not return any FIB
1291 		 * data, so we need to fill in the FIB status and state so that
1292 		 * FIB users can handle it correctly.
1293 		 */
1294 		if (fast) {
1295 			uint32_t state;
1296 
1297 			state = ddi_get32(slotp->fib_acc_handle,
1298 			    &slotp->fibp->Header.XferState);
1299 			/*
1300 			 * Update state for CPU not for device, no DMA sync
1301 			 * needed
1302 			 */
1303 			ddi_put32(slotp->fib_acc_handle,
1304 			    &slotp->fibp->Header.XferState,
1305 			    state | AAC_FIBSTATE_DONEADAP);
1306 			ddi_put32(slotp->fib_acc_handle,
1307 			    (void *)&slotp->fibp->data[0], ST_OK);
1308 		}
1309 
1310 		/* Handle completed ac */
1311 		acp->ac_comp(softs, acp);
1312 	} else {
1313 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1314 		acp->flags |= AAC_CMD_ERR;
1315 		if (acp->pkt) {
1316 			acp->pkt->pkt_reason = CMD_TRAN_ERR;
1317 			acp->pkt->pkt_statistics = 0;
1318 		}
1319 	}
1320 	aac_end_io(softs, acp);
1321 }
1322 
1323 /*
1324  * Interrupt handler for New Comm. interface
1325  * New Comm. interface use a different mechanism for interrupt. No explict
1326  * message queues, and driver need only accesses the mapped PCI mem space to
1327  * find the completed FIB or AIF.
1328  */
1329 static int
1330 aac_process_intr_new(struct aac_softstate *softs)
1331 {
1332 	uint32_t index;
1333 
1334 	index = AAC_OUTB_GET(softs);
1335 	if (index == 0xfffffffful)
1336 		index = AAC_OUTB_GET(softs);
1337 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1338 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1339 		return (0);
1340 	}
1341 	if (index != 0xfffffffful) {
1342 		do {
1343 			if ((index & AAC_SENDERADDR_MASK_AIF) == 0) {
1344 				aac_handle_io(softs, index);
1345 			} else if (index != 0xfffffffeul) {
1346 				struct aac_fib *fibp;	/* FIB in AIF queue */
1347 				uint16_t fib_size, fib_size0;
1348 
1349 				/*
1350 				 * 0xfffffffe means that the controller wants
1351 				 * more work, ignore it for now. Otherwise,
1352 				 * AIF received.
1353 				 */
1354 				index &= ~2;
1355 
1356 				mutex_enter(&softs->aifq_mutex);
1357 				/*
1358 				 * Copy AIF from adapter to the empty AIF slot
1359 				 */
1360 				fibp = &softs->aifq[softs->aifq_idx].d;
1361 				fib_size0 = PCI_MEM_GET16(softs, index + \
1362 				    offsetof(struct aac_fib, Header.Size));
1363 				fib_size = (fib_size0 > AAC_FIB_SIZE) ?
1364 				    AAC_FIB_SIZE : fib_size0;
1365 				PCI_MEM_REP_GET8(softs, index, fibp,
1366 				    fib_size);
1367 
1368 				if (aac_check_acc_handle(softs-> \
1369 				    pci_mem_handle) == DDI_SUCCESS)
1370 					(void) aac_handle_aif(softs, fibp);
1371 				else
1372 					ddi_fm_service_impact(softs->devinfo_p,
1373 					    DDI_SERVICE_UNAFFECTED);
1374 				mutex_exit(&softs->aifq_mutex);
1375 
1376 				/*
1377 				 * AIF memory is owned by the adapter, so let it
1378 				 * know that we are done with it.
1379 				 */
1380 				AAC_OUTB_SET(softs, index);
1381 				AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1382 			}
1383 
1384 			index = AAC_OUTB_GET(softs);
1385 		} while (index != 0xfffffffful);
1386 
1387 		/*
1388 		 * Process waiting cmds before start new ones to
1389 		 * ensure first IOs are serviced first.
1390 		 */
1391 		aac_start_waiting_io(softs);
1392 		return (AAC_DB_COMMAND_READY);
1393 	} else {
1394 		return (0);
1395 	}
1396 }
1397 
1398 static uint_t
1399 aac_intr_new(caddr_t arg)
1400 {
1401 	struct aac_softstate *softs = (void *)arg;
1402 	uint_t rval;
1403 
1404 	mutex_enter(&softs->io_lock);
1405 	if (aac_process_intr_new(softs))
1406 		rval = DDI_INTR_CLAIMED;
1407 	else
1408 		rval = DDI_INTR_UNCLAIMED;
1409 	mutex_exit(&softs->io_lock);
1410 
1411 	aac_drain_comp_q(softs);
1412 	return (rval);
1413 }
1414 
1415 /*
1416  * Interrupt handler for old interface
1417  * Explicit message queues are used to send FIB to and get completed FIB from
1418  * the adapter. Driver and adapter maitain the queues in the producer/consumer
1419  * manner. The driver has to query the queues to find the completed FIB.
1420  */
1421 static int
1422 aac_process_intr_old(struct aac_softstate *softs)
1423 {
1424 	uint16_t status;
1425 
1426 	status = AAC_STATUS_GET(softs);
1427 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1428 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1429 		return (DDI_INTR_UNCLAIMED);
1430 	}
1431 	if (status & AAC_DB_RESPONSE_READY) {
1432 		int slot_idx;
1433 
1434 		/* ACK the intr */
1435 		AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1436 		(void) AAC_STATUS_GET(softs);
1437 		while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q,
1438 		    &slot_idx) == AACOK)
1439 			aac_handle_io(softs, slot_idx);
1440 
1441 		/*
1442 		 * Process waiting cmds before start new ones to
1443 		 * ensure first IOs are serviced first.
1444 		 */
1445 		aac_start_waiting_io(softs);
1446 		return (AAC_DB_RESPONSE_READY);
1447 	} else if (status & AAC_DB_COMMAND_READY) {
1448 		int aif_idx;
1449 
1450 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY);
1451 		(void) AAC_STATUS_GET(softs);
1452 		if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) ==
1453 		    AACOK) {
1454 			ddi_acc_handle_t acc = softs->comm_space_acc_handle;
1455 			struct aac_fib *fibp;	/* FIB in AIF queue */
1456 			struct aac_fib *fibp0;	/* FIB in communication space */
1457 			uint16_t fib_size, fib_size0;
1458 			uint32_t fib_xfer_state;
1459 			uint32_t addr, size;
1460 
1461 			ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS));
1462 
1463 #define	AAC_SYNC_AIF(softs, aif_idx, type) \
1464 	{ (void) ddi_dma_sync((softs)->comm_space_dma_handle, \
1465 	    offsetof(struct aac_comm_space, \
1466 	    adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \
1467 	    (type)); }
1468 
1469 			mutex_enter(&softs->aifq_mutex);
1470 			/* Copy AIF from adapter to the empty AIF slot */
1471 			fibp = &softs->aifq[softs->aifq_idx].d;
1472 			AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU);
1473 			fibp0 = &softs->comm_space->adapter_fibs[aif_idx];
1474 			fib_size0 = ddi_get16(acc, &fibp0->Header.Size);
1475 			fib_size = (fib_size0 > AAC_FIB_SIZE) ?
1476 			    AAC_FIB_SIZE : fib_size0;
1477 			ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0,
1478 			    fib_size, DDI_DEV_AUTOINCR);
1479 
1480 			(void) aac_handle_aif(softs, fibp);
1481 			mutex_exit(&softs->aifq_mutex);
1482 
1483 			/* Complete AIF back to adapter with good status */
1484 			fib_xfer_state = LE_32(fibp->Header.XferState);
1485 			if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) {
1486 				ddi_put32(acc, &fibp0->Header.XferState,
1487 				    fib_xfer_state | AAC_FIBSTATE_DONEHOST);
1488 				ddi_put32(acc, (void *)&fibp0->data[0], ST_OK);
1489 				if (fib_size0 > AAC_FIB_SIZE)
1490 					ddi_put16(acc, &fibp0->Header.Size,
1491 					    AAC_FIB_SIZE);
1492 				AAC_SYNC_AIF(softs, aif_idx,
1493 				    DDI_DMA_SYNC_FORDEV);
1494 			}
1495 
1496 			/* Put the AIF response on the response queue */
1497 			addr = ddi_get32(acc,
1498 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1499 			    Header.SenderFibAddress);
1500 			size = (uint32_t)ddi_get16(acc,
1501 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1502 			    Header.Size);
1503 			ddi_put32(acc,
1504 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1505 			    Header.ReceiverFibAddress, addr);
1506 			if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q,
1507 			    addr, size) == AACERR)
1508 				cmn_err(CE_NOTE, "!AIF ack failed");
1509 		}
1510 		return (AAC_DB_COMMAND_READY);
1511 	} else if (status & AAC_DB_PRINTF_READY) {
1512 		/* ACK the intr */
1513 		AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY);
1514 		(void) AAC_STATUS_GET(softs);
1515 		(void) ddi_dma_sync(softs->comm_space_dma_handle,
1516 		    offsetof(struct aac_comm_space, adapter_print_buf),
1517 		    AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU);
1518 		if (aac_check_dma_handle(softs->comm_space_dma_handle) ==
1519 		    DDI_SUCCESS)
1520 			cmn_err(CE_NOTE, "MSG From Adapter: %s",
1521 			    softs->comm_space->adapter_print_buf);
1522 		else
1523 			ddi_fm_service_impact(softs->devinfo_p,
1524 			    DDI_SERVICE_UNAFFECTED);
1525 		AAC_NOTIFY(softs, AAC_DB_PRINTF_READY);
1526 		return (AAC_DB_PRINTF_READY);
1527 	} else if (status & AAC_DB_COMMAND_NOT_FULL) {
1528 		/*
1529 		 * Without these two condition statements, the OS could hang
1530 		 * after a while, especially if there are a lot of AIF's to
1531 		 * handle, for instance if a drive is pulled from an array
1532 		 * under heavy load.
1533 		 */
1534 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1535 		return (AAC_DB_COMMAND_NOT_FULL);
1536 	} else if (status & AAC_DB_RESPONSE_NOT_FULL) {
1537 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1538 		AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL);
1539 		return (AAC_DB_RESPONSE_NOT_FULL);
1540 	} else {
1541 		return (0);
1542 	}
1543 }
1544 
1545 static uint_t
1546 aac_intr_old(caddr_t arg)
1547 {
1548 	struct aac_softstate *softs = (void *)arg;
1549 	int rval;
1550 
1551 	mutex_enter(&softs->io_lock);
1552 	if (aac_process_intr_old(softs))
1553 		rval = DDI_INTR_CLAIMED;
1554 	else
1555 		rval = DDI_INTR_UNCLAIMED;
1556 	mutex_exit(&softs->io_lock);
1557 
1558 	aac_drain_comp_q(softs);
1559 	return (rval);
1560 }
1561 
1562 /*
1563  * Query FIXED or MSI interrupts
1564  */
1565 static int
1566 aac_query_intrs(struct aac_softstate *softs, int intr_type)
1567 {
1568 	dev_info_t *dip = softs->devinfo_p;
1569 	int avail, actual, count;
1570 	int i, flag, ret;
1571 
1572 	AACDB_PRINT(softs, CE_NOTE,
1573 	    "aac_query_intrs:interrupt type 0x%x", intr_type);
1574 
1575 	/* Get number of interrupts */
1576 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
1577 	if ((ret != DDI_SUCCESS) || (count == 0)) {
1578 		AACDB_PRINT(softs, CE_WARN,
1579 		    "ddi_intr_get_nintrs() failed, ret %d count %d",
1580 		    ret, count);
1581 		return (DDI_FAILURE);
1582 	}
1583 
1584 	/* Get number of available interrupts */
1585 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
1586 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
1587 		AACDB_PRINT(softs, CE_WARN,
1588 		    "ddi_intr_get_navail() failed, ret %d avail %d",
1589 		    ret, avail);
1590 		return (DDI_FAILURE);
1591 	}
1592 
1593 	AACDB_PRINT(softs, CE_NOTE,
1594 	    "ddi_intr_get_nvail returned %d, navail() returned %d",
1595 	    count, avail);
1596 
1597 	/* Allocate an array of interrupt handles */
1598 	softs->intr_size = count * sizeof (ddi_intr_handle_t);
1599 	softs->htable = kmem_alloc(softs->intr_size, KM_SLEEP);
1600 
1601 	if (intr_type == DDI_INTR_TYPE_MSI) {
1602 		count = 1; /* only one vector needed by now */
1603 		flag = DDI_INTR_ALLOC_STRICT;
1604 	} else { /* must be DDI_INTR_TYPE_FIXED */
1605 		flag = DDI_INTR_ALLOC_NORMAL;
1606 	}
1607 
1608 	/* Call ddi_intr_alloc() */
1609 	ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0,
1610 	    count, &actual, flag);
1611 
1612 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
1613 		AACDB_PRINT(softs, CE_WARN,
1614 		    "ddi_intr_alloc() failed, ret = %d", ret);
1615 		actual = 0;
1616 		goto error;
1617 	}
1618 
1619 	if (actual < count) {
1620 		AACDB_PRINT(softs, CE_NOTE,
1621 		    "Requested: %d, Received: %d", count, actual);
1622 		goto error;
1623 	}
1624 
1625 	softs->intr_cnt = actual;
1626 
1627 	/* Get priority for first msi, assume remaining are all the same */
1628 	if ((ret = ddi_intr_get_pri(softs->htable[0],
1629 	    &softs->intr_pri)) != DDI_SUCCESS) {
1630 		AACDB_PRINT(softs, CE_WARN,
1631 		    "ddi_intr_get_pri() failed, ret = %d", ret);
1632 		goto error;
1633 	}
1634 
1635 	/* Test for high level mutex */
1636 	if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) {
1637 		AACDB_PRINT(softs, CE_WARN,
1638 		    "aac_query_intrs: Hi level interrupt not supported");
1639 		goto error;
1640 	}
1641 
1642 	return (DDI_SUCCESS);
1643 
1644 error:
1645 	/* Free already allocated intr */
1646 	for (i = 0; i < actual; i++)
1647 		(void) ddi_intr_free(softs->htable[i]);
1648 
1649 	kmem_free(softs->htable, softs->intr_size);
1650 	return (DDI_FAILURE);
1651 }
1652 
1653 
1654 /*
1655  * Register FIXED or MSI interrupts, and enable them
1656  */
1657 static int
1658 aac_add_intrs(struct aac_softstate *softs)
1659 {
1660 	int i, ret;
1661 	int actual;
1662 	ddi_intr_handler_t *aac_intr;
1663 
1664 	actual = softs->intr_cnt;
1665 	aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ?
1666 	    aac_intr_new : aac_intr_old);
1667 
1668 	/* Call ddi_intr_add_handler() */
1669 	for (i = 0; i < actual; i++) {
1670 		if ((ret = ddi_intr_add_handler(softs->htable[i],
1671 		    aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) {
1672 			cmn_err(CE_WARN,
1673 			    "ddi_intr_add_handler() failed ret = %d", ret);
1674 
1675 			/* Free already allocated intr */
1676 			for (i = 0; i < actual; i++)
1677 				(void) ddi_intr_free(softs->htable[i]);
1678 
1679 			kmem_free(softs->htable, softs->intr_size);
1680 			return (DDI_FAILURE);
1681 		}
1682 	}
1683 
1684 	if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap))
1685 	    != DDI_SUCCESS) {
1686 		cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret);
1687 
1688 		/* Free already allocated intr */
1689 		for (i = 0; i < actual; i++)
1690 			(void) ddi_intr_free(softs->htable[i]);
1691 
1692 		kmem_free(softs->htable, softs->intr_size);
1693 		return (DDI_FAILURE);
1694 	}
1695 
1696 	return (DDI_SUCCESS);
1697 }
1698 
1699 /*
1700  * Unregister FIXED or MSI interrupts
1701  */
1702 static void
1703 aac_remove_intrs(struct aac_softstate *softs)
1704 {
1705 	int i;
1706 
1707 	/* Disable all interrupts */
1708 	(void) aac_disable_intrs(softs);
1709 	/* Call ddi_intr_remove_handler() */
1710 	for (i = 0; i < softs->intr_cnt; i++) {
1711 		(void) ddi_intr_remove_handler(softs->htable[i]);
1712 		(void) ddi_intr_free(softs->htable[i]);
1713 	}
1714 
1715 	kmem_free(softs->htable, softs->intr_size);
1716 }
1717 
1718 static int
1719 aac_enable_intrs(struct aac_softstate *softs)
1720 {
1721 	int rval = AACOK;
1722 
1723 	if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1724 		/* for MSI block enable */
1725 		if (ddi_intr_block_enable(softs->htable, softs->intr_cnt) !=
1726 		    DDI_SUCCESS)
1727 			rval = AACERR;
1728 	} else {
1729 		int i;
1730 
1731 		/* Call ddi_intr_enable() for legacy/MSI non block enable */
1732 		for (i = 0; i < softs->intr_cnt; i++) {
1733 			if (ddi_intr_enable(softs->htable[i]) != DDI_SUCCESS)
1734 				rval = AACERR;
1735 		}
1736 	}
1737 	return (rval);
1738 }
1739 
1740 static int
1741 aac_disable_intrs(struct aac_softstate *softs)
1742 {
1743 	int rval = AACOK;
1744 
1745 	if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1746 		/* Call ddi_intr_block_disable() */
1747 		if (ddi_intr_block_disable(softs->htable, softs->intr_cnt) !=
1748 		    DDI_SUCCESS)
1749 			rval = AACERR;
1750 	} else {
1751 		int i;
1752 
1753 		for (i = 0; i < softs->intr_cnt; i++) {
1754 			if (ddi_intr_disable(softs->htable[i]) != DDI_SUCCESS)
1755 				rval = AACERR;
1756 		}
1757 	}
1758 	return (rval);
1759 }
1760 
1761 /*
1762  * Set pkt_reason and OR in pkt_statistics flag
1763  */
1764 static void
1765 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp,
1766     uchar_t reason, uint_t stat)
1767 {
1768 #ifndef __lock_lint
1769 	_NOTE(ARGUNUSED(softs))
1770 #endif
1771 	if (acp->pkt->pkt_reason == CMD_CMPLT)
1772 		acp->pkt->pkt_reason = reason;
1773 	acp->pkt->pkt_statistics |= stat;
1774 }
1775 
1776 /*
1777  * Handle a finished pkt of soft SCMD
1778  */
1779 static void
1780 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp)
1781 {
1782 	ASSERT(acp->pkt);
1783 
1784 	acp->flags |= AAC_CMD_CMPLT;
1785 
1786 	acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \
1787 	    STATE_SENT_CMD | STATE_GOT_STATUS;
1788 	if (acp->pkt->pkt_state & STATE_XFERRED_DATA)
1789 		acp->pkt->pkt_resid = 0;
1790 
1791 	/* AAC_CMD_NO_INTR means no complete callback */
1792 	if (!(acp->flags & AAC_CMD_NO_INTR)) {
1793 		mutex_enter(&softs->q_comp_mutex);
1794 		aac_cmd_enqueue(&softs->q_comp, acp);
1795 		mutex_exit(&softs->q_comp_mutex);
1796 		ddi_trigger_softintr(softs->softint_id);
1797 	}
1798 }
1799 
1800 /*
1801  * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old()
1802  */
1803 
1804 /*
1805  * Handle completed logical device IO command
1806  */
1807 /*ARGSUSED*/
1808 static void
1809 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1810 {
1811 	struct aac_slot *slotp = acp->slotp;
1812 	struct aac_blockread_response *resp;
1813 	uint32_t status;
1814 
1815 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
1816 	ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1817 
1818 	acp->pkt->pkt_state |= STATE_GOT_STATUS;
1819 
1820 	/*
1821 	 * block_read/write has a similar response header, use blockread
1822 	 * response for both.
1823 	 */
1824 	resp = (struct aac_blockread_response *)&slotp->fibp->data[0];
1825 	status = ddi_get32(slotp->fib_acc_handle, &resp->Status);
1826 	if (status == ST_OK) {
1827 		acp->pkt->pkt_resid = 0;
1828 		acp->pkt->pkt_state |= STATE_XFERRED_DATA;
1829 	} else {
1830 		aac_set_arq_data_hwerr(acp);
1831 	}
1832 }
1833 
1834 /*
1835  * Handle completed phys. device IO command
1836  */
1837 static void
1838 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1839 {
1840 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
1841 	struct aac_fib *fibp = acp->slotp->fibp;
1842 	struct scsi_pkt *pkt = acp->pkt;
1843 	struct aac_srb_reply *resp;
1844 	uint32_t resp_status;
1845 
1846 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
1847 	ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1848 
1849 	resp = (struct aac_srb_reply *)&fibp->data[0];
1850 	resp_status = ddi_get32(acc, &resp->status);
1851 
1852 	/* First check FIB status */
1853 	if (resp_status == ST_OK) {
1854 		uint32_t scsi_status;
1855 		uint32_t srb_status;
1856 		uint32_t data_xfer_length;
1857 
1858 		scsi_status = ddi_get32(acc, &resp->scsi_status);
1859 		srb_status = ddi_get32(acc, &resp->srb_status);
1860 		data_xfer_length = ddi_get32(acc, &resp->data_xfer_length);
1861 
1862 		*pkt->pkt_scbp = (uint8_t)scsi_status;
1863 		pkt->pkt_state |= STATE_GOT_STATUS;
1864 		if (scsi_status == STATUS_GOOD) {
1865 			uchar_t cmd = ((union scsi_cdb *)(void *)
1866 			    (pkt->pkt_cdbp))->scc_cmd;
1867 
1868 			/* Next check SRB status */
1869 			switch (srb_status & 0x3f) {
1870 			case SRB_STATUS_DATA_OVERRUN:
1871 				AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \
1872 				    "scmd=%d, xfer=%d, buflen=%d",
1873 				    (uint32_t)cmd, data_xfer_length,
1874 				    acp->bcount);
1875 
1876 				switch (cmd) {
1877 				case SCMD_READ:
1878 				case SCMD_WRITE:
1879 				case SCMD_READ_G1:
1880 				case SCMD_WRITE_G1:
1881 				case SCMD_READ_G4:
1882 				case SCMD_WRITE_G4:
1883 				case SCMD_READ_G5:
1884 				case SCMD_WRITE_G5:
1885 					aac_set_pkt_reason(softs, acp,
1886 					    CMD_DATA_OVR, 0);
1887 					break;
1888 				}
1889 				/*FALLTHRU*/
1890 			case SRB_STATUS_ERROR_RECOVERY:
1891 			case SRB_STATUS_PENDING:
1892 			case SRB_STATUS_SUCCESS:
1893 				/*
1894 				 * pkt_resid should only be calculated if the
1895 				 * status is ERROR_RECOVERY/PENDING/SUCCESS/
1896 				 * OVERRUN/UNDERRUN
1897 				 */
1898 				if (data_xfer_length) {
1899 					pkt->pkt_state |= STATE_XFERRED_DATA;
1900 					pkt->pkt_resid = acp->bcount - \
1901 					    data_xfer_length;
1902 					ASSERT(pkt->pkt_resid >= 0);
1903 				}
1904 				break;
1905 			case SRB_STATUS_ABORTED:
1906 				AACDB_PRINT(softs, CE_NOTE,
1907 				    "SRB_STATUS_ABORTED, xfer=%d, resid=%d",
1908 				    data_xfer_length, pkt->pkt_resid);
1909 				aac_set_pkt_reason(softs, acp, CMD_ABORTED,
1910 				    STAT_ABORTED);
1911 				break;
1912 			case SRB_STATUS_ABORT_FAILED:
1913 				AACDB_PRINT(softs, CE_NOTE,
1914 				    "SRB_STATUS_ABORT_FAILED, xfer=%d, " \
1915 				    "resid=%d", data_xfer_length,
1916 				    pkt->pkt_resid);
1917 				aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL,
1918 				    0);
1919 				break;
1920 			case SRB_STATUS_PARITY_ERROR:
1921 				AACDB_PRINT(softs, CE_NOTE,
1922 				    "SRB_STATUS_PARITY_ERROR, xfer=%d, " \
1923 				    "resid=%d", data_xfer_length,
1924 				    pkt->pkt_resid);
1925 				aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0);
1926 				break;
1927 			case SRB_STATUS_NO_DEVICE:
1928 			case SRB_STATUS_INVALID_PATH_ID:
1929 			case SRB_STATUS_INVALID_TARGET_ID:
1930 			case SRB_STATUS_INVALID_LUN:
1931 			case SRB_STATUS_SELECTION_TIMEOUT:
1932 #ifdef DEBUG
1933 				if (AAC_DEV_IS_VALID(acp->dvp)) {
1934 					AACDB_PRINT(softs, CE_NOTE,
1935 					    "SRB_STATUS_NO_DEVICE(%d), " \
1936 					    "xfer=%d, resid=%d ",
1937 					    srb_status & 0x3f,
1938 					    data_xfer_length, pkt->pkt_resid);
1939 				}
1940 #endif
1941 				aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0);
1942 				break;
1943 			case SRB_STATUS_COMMAND_TIMEOUT:
1944 			case SRB_STATUS_TIMEOUT:
1945 				AACDB_PRINT(softs, CE_NOTE,
1946 				    "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \
1947 				    "resid=%d", data_xfer_length,
1948 				    pkt->pkt_resid);
1949 				aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
1950 				    STAT_TIMEOUT);
1951 				break;
1952 			case SRB_STATUS_BUS_RESET:
1953 				AACDB_PRINT(softs, CE_NOTE,
1954 				    "SRB_STATUS_BUS_RESET, xfer=%d, " \
1955 				    "resid=%d", data_xfer_length,
1956 				    pkt->pkt_resid);
1957 				aac_set_pkt_reason(softs, acp, CMD_RESET,
1958 				    STAT_BUS_RESET);
1959 				break;
1960 			default:
1961 				AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \
1962 				    "xfer=%d, resid=%d", srb_status & 0x3f,
1963 				    data_xfer_length, pkt->pkt_resid);
1964 				aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1965 				break;
1966 			}
1967 		} else if (scsi_status == STATUS_CHECK) {
1968 			/* CHECK CONDITION */
1969 			struct scsi_arq_status *arqstat =
1970 			    (void *)(pkt->pkt_scbp);
1971 			uint32_t sense_data_size;
1972 
1973 			pkt->pkt_state |= STATE_ARQ_DONE;
1974 
1975 			*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1976 			arqstat->sts_rqpkt_reason = CMD_CMPLT;
1977 			arqstat->sts_rqpkt_resid = 0;
1978 			arqstat->sts_rqpkt_state =
1979 			    STATE_GOT_BUS |
1980 			    STATE_GOT_TARGET |
1981 			    STATE_SENT_CMD |
1982 			    STATE_XFERRED_DATA;
1983 			arqstat->sts_rqpkt_statistics = 0;
1984 
1985 			sense_data_size = ddi_get32(acc,
1986 			    &resp->sense_data_size);
1987 			ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE);
1988 			AACDB_PRINT(softs, CE_NOTE,
1989 			    "CHECK CONDITION: sense len=%d, xfer len=%d",
1990 			    sense_data_size, data_xfer_length);
1991 
1992 			if (sense_data_size > SENSE_LENGTH)
1993 				sense_data_size = SENSE_LENGTH;
1994 			ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata,
1995 			    (uint8_t *)resp->sense_data, sense_data_size,
1996 			    DDI_DEV_AUTOINCR);
1997 		} else {
1998 			AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \
1999 			    "scsi_status=%d, srb_status=%d",
2000 			    scsi_status, srb_status);
2001 			aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
2002 		}
2003 	} else {
2004 		AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d",
2005 		    resp_status);
2006 		aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
2007 	}
2008 }
2009 
2010 /*
2011  * Handle completed IOCTL command
2012  */
2013 /*ARGSUSED*/
2014 void
2015 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2016 {
2017 	struct aac_slot *slotp = acp->slotp;
2018 
2019 	/*
2020 	 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb()
2021 	 * may wait on softs->event, so use cv_broadcast() instead
2022 	 * of cv_signal().
2023 	 */
2024 	ASSERT(acp->flags & AAC_CMD_SYNC);
2025 	ASSERT(acp->flags & AAC_CMD_NO_CB);
2026 
2027 	/* Get the size of the response FIB from its FIB.Header.Size field */
2028 	acp->fib_size = ddi_get16(slotp->fib_acc_handle,
2029 	    &slotp->fibp->Header.Size);
2030 
2031 	ASSERT(acp->fib_size <= softs->aac_max_fib_size);
2032 	ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp,
2033 	    (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR);
2034 }
2035 
2036 /*
2037  * Handle completed sync fib command
2038  */
2039 /*ARGSUSED*/
2040 void
2041 aac_sync_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2042 {
2043 }
2044 
2045 /*
2046  * Handle completed Flush command
2047  */
2048 /*ARGSUSED*/
2049 static void
2050 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2051 {
2052 	struct aac_slot *slotp = acp->slotp;
2053 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
2054 	struct aac_synchronize_reply *resp;
2055 	uint32_t status;
2056 
2057 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
2058 
2059 	acp->pkt->pkt_state |= STATE_GOT_STATUS;
2060 
2061 	resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0];
2062 	status = ddi_get32(acc, &resp->Status);
2063 	if (status != CT_OK)
2064 		aac_set_arq_data_hwerr(acp);
2065 }
2066 
2067 static void
2068 aac_startstop_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2069 {
2070 	struct aac_slot *slotp = acp->slotp;
2071 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
2072 	struct aac_Container_resp *resp;
2073 	uint32_t status;
2074 
2075 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
2076 
2077 	acp->pkt->pkt_state |= STATE_GOT_STATUS;
2078 
2079 	resp = (struct aac_Container_resp *)&slotp->fibp->data[0];
2080 	status = ddi_get32(acc, &resp->Status);
2081 	if (status != 0) {
2082 		AACDB_PRINT(softs, CE_WARN, "Cannot start/stop a unit");
2083 		aac_set_arq_data_hwerr(acp);
2084 	}
2085 }
2086 
2087 /*
2088  * Access PCI space to see if the driver can support the card
2089  */
2090 static int
2091 aac_check_card_type(struct aac_softstate *softs)
2092 {
2093 	ddi_acc_handle_t pci_config_handle;
2094 	int card_index;
2095 	uint32_t pci_cmd;
2096 
2097 	/* Map pci configuration space */
2098 	if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) !=
2099 	    DDI_SUCCESS) {
2100 		AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space");
2101 		return (AACERR);
2102 	}
2103 
2104 	softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID);
2105 	softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID);
2106 	softs->subvendid = pci_config_get16(pci_config_handle,
2107 	    PCI_CONF_SUBVENID);
2108 	softs->subsysid = pci_config_get16(pci_config_handle,
2109 	    PCI_CONF_SUBSYSID);
2110 
2111 	card_index = 0;
2112 	while (!CARD_IS_UNKNOWN(card_index)) {
2113 		if ((aac_cards[card_index].vendor == softs->vendid) &&
2114 		    (aac_cards[card_index].device == softs->devid) &&
2115 		    (aac_cards[card_index].subvendor == softs->subvendid) &&
2116 		    (aac_cards[card_index].subsys == softs->subsysid)) {
2117 			break;
2118 		}
2119 		card_index++;
2120 	}
2121 
2122 	softs->card = card_index;
2123 	softs->hwif = aac_cards[card_index].hwif;
2124 
2125 	/*
2126 	 * Unknown aac card
2127 	 * do a generic match based on the VendorID and DeviceID to
2128 	 * support the new cards in the aac family
2129 	 */
2130 	if (CARD_IS_UNKNOWN(card_index)) {
2131 		if (softs->vendid != 0x9005) {
2132 			AACDB_PRINT(softs, CE_WARN,
2133 			    "Unknown vendor 0x%x", softs->vendid);
2134 			goto error;
2135 		}
2136 		switch (softs->devid) {
2137 		case 0x285:
2138 			softs->hwif = AAC_HWIF_I960RX;
2139 			break;
2140 		case 0x286:
2141 			softs->hwif = AAC_HWIF_RKT;
2142 			break;
2143 		default:
2144 			AACDB_PRINT(softs, CE_WARN,
2145 			    "Unknown device \"pci9005,%x\"", softs->devid);
2146 			goto error;
2147 		}
2148 	}
2149 
2150 	/* Set hardware dependent interface */
2151 	switch (softs->hwif) {
2152 	case AAC_HWIF_I960RX:
2153 		softs->aac_if = aac_rx_interface;
2154 		softs->map_size_min = AAC_MAP_SIZE_MIN_RX;
2155 		break;
2156 	case AAC_HWIF_RKT:
2157 		softs->aac_if = aac_rkt_interface;
2158 		softs->map_size_min = AAC_MAP_SIZE_MIN_RKT;
2159 		break;
2160 	default:
2161 		AACDB_PRINT(softs, CE_WARN,
2162 		    "Unknown hardware interface %d", softs->hwif);
2163 		goto error;
2164 	}
2165 
2166 	/* Set card names */
2167 	(void *)strncpy(softs->vendor_name, aac_cards[card_index].vid,
2168 	    AAC_VENDOR_LEN);
2169 	(void *)strncpy(softs->product_name, aac_cards[card_index].desc,
2170 	    AAC_PRODUCT_LEN);
2171 
2172 	/* Set up quirks */
2173 	softs->flags = aac_cards[card_index].quirks;
2174 
2175 	/* Force the busmaster enable bit on */
2176 	pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2177 	if ((pci_cmd & PCI_COMM_ME) == 0) {
2178 		pci_cmd |= PCI_COMM_ME;
2179 		pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd);
2180 		pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2181 		if ((pci_cmd & PCI_COMM_ME) == 0) {
2182 			cmn_err(CE_CONT, "?Cannot enable busmaster bit");
2183 			goto error;
2184 		}
2185 	}
2186 
2187 	/* Set memory base to map */
2188 	softs->pci_mem_base_paddr = 0xfffffff0UL & \
2189 	    pci_config_get32(pci_config_handle, PCI_CONF_BASE0);
2190 
2191 	pci_config_teardown(&pci_config_handle);
2192 
2193 	return (AACOK); /* card type detected */
2194 error:
2195 	pci_config_teardown(&pci_config_handle);
2196 	return (AACERR); /* no matched card found */
2197 }
2198 
2199 /*
2200  * Do the usual interrupt handler setup stuff.
2201  */
2202 static int
2203 aac_register_intrs(struct aac_softstate *softs)
2204 {
2205 	dev_info_t *dip;
2206 	int intr_types;
2207 
2208 	ASSERT(softs->devinfo_p);
2209 	dip = softs->devinfo_p;
2210 
2211 	/* Get the type of device intrrupts */
2212 	if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
2213 		AACDB_PRINT(softs, CE_WARN,
2214 		    "ddi_intr_get_supported_types() failed");
2215 		return (AACERR);
2216 	}
2217 	AACDB_PRINT(softs, CE_NOTE,
2218 	    "ddi_intr_get_supported_types() ret: 0x%x", intr_types);
2219 
2220 	/* Query interrupt, and alloc/init all needed struct */
2221 	if (intr_types & DDI_INTR_TYPE_MSI) {
2222 		if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI)
2223 		    != DDI_SUCCESS) {
2224 			AACDB_PRINT(softs, CE_WARN,
2225 			    "MSI interrupt query failed");
2226 			return (AACERR);
2227 		}
2228 		softs->intr_type = DDI_INTR_TYPE_MSI;
2229 	} else if (intr_types & DDI_INTR_TYPE_FIXED) {
2230 		if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED)
2231 		    != DDI_SUCCESS) {
2232 			AACDB_PRINT(softs, CE_WARN,
2233 			    "FIXED interrupt query failed");
2234 			return (AACERR);
2235 		}
2236 		softs->intr_type = DDI_INTR_TYPE_FIXED;
2237 	} else {
2238 		AACDB_PRINT(softs, CE_WARN,
2239 		    "Device cannot suppport both FIXED and MSI interrupts");
2240 		return (AACERR);
2241 	}
2242 
2243 	/* Connect interrupt handlers */
2244 	if (aac_add_intrs(softs) != DDI_SUCCESS) {
2245 		AACDB_PRINT(softs, CE_WARN,
2246 		    "Interrupt registration failed, intr type: %s",
2247 		    softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED");
2248 		return (AACERR);
2249 	}
2250 	(void) aac_enable_intrs(softs);
2251 
2252 	if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id,
2253 	    NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) {
2254 		AACDB_PRINT(softs, CE_WARN,
2255 		    "Can not setup soft interrupt handler!");
2256 		aac_remove_intrs(softs);
2257 		return (AACERR);
2258 	}
2259 
2260 	return (AACOK);
2261 }
2262 
2263 static void
2264 aac_unregister_intrs(struct aac_softstate *softs)
2265 {
2266 	aac_remove_intrs(softs);
2267 	ddi_remove_softintr(softs->softint_id);
2268 }
2269 
2270 /*
2271  * Check the firmware to determine the features to support and the FIB
2272  * parameters to use.
2273  */
2274 static int
2275 aac_check_firmware(struct aac_softstate *softs)
2276 {
2277 	uint32_t options;
2278 	uint32_t atu_size;
2279 	ddi_acc_handle_t pci_handle;
2280 	uint8_t *data;
2281 	uint32_t max_fibs;
2282 	uint32_t max_fib_size;
2283 	uint32_t sg_tablesize;
2284 	uint32_t max_sectors;
2285 	uint32_t status;
2286 
2287 	/* Get supported options */
2288 	if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0,
2289 	    &status)) != AACOK) {
2290 		if (status != SRB_STATUS_INVALID_REQUEST) {
2291 			cmn_err(CE_CONT,
2292 			    "?Fatal error: request adapter info error");
2293 			return (AACERR);
2294 		}
2295 		options = 0;
2296 		atu_size = 0;
2297 	} else {
2298 		options = AAC_MAILBOX_GET(softs, 1);
2299 		atu_size = AAC_MAILBOX_GET(softs, 2);
2300 	}
2301 
2302 	if (softs->state & AAC_STATE_RESET) {
2303 		if ((softs->support_opt == options) &&
2304 		    (softs->atu_size == atu_size))
2305 			return (AACOK);
2306 
2307 		cmn_err(CE_WARN,
2308 		    "?Fatal error: firmware changed, system needs reboot");
2309 		return (AACERR);
2310 	}
2311 
2312 	/*
2313 	 * The following critical settings are initialized only once during
2314 	 * driver attachment.
2315 	 */
2316 	softs->support_opt = options;
2317 	softs->atu_size = atu_size;
2318 
2319 	/* Process supported options */
2320 	if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
2321 	    (softs->flags & AAC_FLAGS_NO4GB) == 0) {
2322 		AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window");
2323 		softs->flags |= AAC_FLAGS_4GB_WINDOW;
2324 	} else {
2325 		/*
2326 		 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space
2327 		 * only. IO is handled by the DMA engine which does not suffer
2328 		 * from the ATU window programming workarounds necessary for
2329 		 * CPU copy operations.
2330 		 */
2331 		softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull;
2332 		softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull;
2333 	}
2334 
2335 	if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) {
2336 		AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address");
2337 		softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
2338 		softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull;
2339 		softs->flags |= AAC_FLAGS_SG_64BIT;
2340 	}
2341 
2342 	if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) {
2343 		softs->flags |= AAC_FLAGS_ARRAY_64BIT;
2344 		AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size");
2345 	}
2346 
2347 	if (options & AAC_SUPPORTED_NONDASD) {
2348 		if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0,
2349 		    "nondasd-enable", (char **)&data) == DDI_SUCCESS)) {
2350 			if (strcmp((char *)data, "yes") == 0) {
2351 				AACDB_PRINT(softs, CE_NOTE,
2352 				    "!Enable Non-DASD access");
2353 				softs->flags |= AAC_FLAGS_NONDASD;
2354 			}
2355 			ddi_prop_free(data);
2356 		}
2357 	}
2358 
2359 	/* Read preferred settings */
2360 	max_fib_size = 0;
2361 	if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF,
2362 	    0, 0, 0, 0, NULL)) == AACOK) {
2363 		options = AAC_MAILBOX_GET(softs, 1);
2364 		max_fib_size = (options & 0xffff);
2365 		max_sectors = (options >> 16) << 1;
2366 		options = AAC_MAILBOX_GET(softs, 2);
2367 		sg_tablesize = (options >> 16);
2368 		options = AAC_MAILBOX_GET(softs, 3);
2369 		max_fibs = (options & 0xffff);
2370 	}
2371 
2372 	/* Enable new comm. and rawio at the same time */
2373 	if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) &&
2374 	    (max_fib_size != 0)) {
2375 		/* read out and save PCI MBR */
2376 		if ((atu_size > softs->map_size) &&
2377 		    (ddi_regs_map_setup(softs->devinfo_p, 1,
2378 		    (caddr_t *)&data, 0, atu_size, &softs->reg_attr,
2379 		    &pci_handle) == DDI_SUCCESS)) {
2380 			ddi_regs_map_free(&softs->pci_mem_handle);
2381 			softs->pci_mem_handle = pci_handle;
2382 			softs->pci_mem_base_vaddr = data;
2383 			softs->map_size = atu_size;
2384 		}
2385 		if (atu_size == softs->map_size) {
2386 			softs->flags |= AAC_FLAGS_NEW_COMM;
2387 			AACDB_PRINT(softs, CE_NOTE,
2388 			    "!Enable New Comm. interface");
2389 		}
2390 	}
2391 
2392 	/* Set FIB parameters */
2393 	if (softs->flags & AAC_FLAGS_NEW_COMM) {
2394 		softs->aac_max_fibs = max_fibs;
2395 		softs->aac_max_fib_size = max_fib_size;
2396 		softs->aac_max_sectors = max_sectors;
2397 		softs->aac_sg_tablesize = sg_tablesize;
2398 
2399 		softs->flags |= AAC_FLAGS_RAW_IO;
2400 		AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO");
2401 	} else {
2402 		softs->aac_max_fibs =
2403 		    (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512;
2404 		softs->aac_max_fib_size = AAC_FIB_SIZE;
2405 		softs->aac_max_sectors = 128;	/* 64K */
2406 		if (softs->flags & AAC_FLAGS_17SG)
2407 			softs->aac_sg_tablesize = 17;
2408 		else if (softs->flags & AAC_FLAGS_34SG)
2409 			softs->aac_sg_tablesize = 34;
2410 		else if (softs->flags & AAC_FLAGS_SG_64BIT)
2411 			softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2412 			    sizeof (struct aac_blockwrite64) +
2413 			    sizeof (struct aac_sg_entry64)) /
2414 			    sizeof (struct aac_sg_entry64);
2415 		else
2416 			softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2417 			    sizeof (struct aac_blockwrite) +
2418 			    sizeof (struct aac_sg_entry)) /
2419 			    sizeof (struct aac_sg_entry);
2420 	}
2421 
2422 	if ((softs->flags & AAC_FLAGS_RAW_IO) &&
2423 	    (softs->flags & AAC_FLAGS_ARRAY_64BIT)) {
2424 		softs->flags |= AAC_FLAGS_LBA_64BIT;
2425 		AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array");
2426 	}
2427 	softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize;
2428 	softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9;
2429 	/*
2430 	 * 64K maximum segment size in scatter gather list is controlled by
2431 	 * the NEW_COMM bit in the adapter information. If not set, the card
2432 	 * can only accept a maximum of 64K. It is not recommended to permit
2433 	 * more than 128KB of total transfer size to the adapters because
2434 	 * performance is negatively impacted.
2435 	 *
2436 	 * For new comm, segment size equals max xfer size. For old comm,
2437 	 * we use 64K for both.
2438 	 */
2439 	softs->buf_dma_attr.dma_attr_count_max =
2440 	    softs->buf_dma_attr.dma_attr_maxxfer - 1;
2441 
2442 	/* Setup FIB operations */
2443 	if (softs->flags & AAC_FLAGS_RAW_IO)
2444 		softs->aac_cmd_fib = aac_cmd_fib_rawio;
2445 	else if (softs->flags & AAC_FLAGS_SG_64BIT)
2446 		softs->aac_cmd_fib = aac_cmd_fib_brw64;
2447 	else
2448 		softs->aac_cmd_fib = aac_cmd_fib_brw;
2449 	softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \
2450 	    aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32;
2451 
2452 	/* 64-bit LBA needs descriptor format sense data */
2453 	softs->slen = sizeof (struct scsi_arq_status);
2454 	if ((softs->flags & AAC_FLAGS_LBA_64BIT) &&
2455 	    softs->slen < AAC_ARQ64_LENGTH)
2456 		softs->slen = AAC_ARQ64_LENGTH;
2457 
2458 	AACDB_PRINT(softs, CE_NOTE,
2459 	    "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d",
2460 	    softs->aac_max_fibs, softs->aac_max_fib_size,
2461 	    softs->aac_max_sectors, softs->aac_sg_tablesize);
2462 
2463 	return (AACOK);
2464 }
2465 
2466 static void
2467 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0,
2468     struct FsaRev *fsarev1)
2469 {
2470 	ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
2471 
2472 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash);
2473 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type);
2474 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor);
2475 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major);
2476 	AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber);
2477 }
2478 
2479 /*
2480  * The following function comes from Adaptec:
2481  *
2482  * Query adapter information and supplement adapter information
2483  */
2484 static int
2485 aac_get_adapter_info(struct aac_softstate *softs,
2486     struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr)
2487 {
2488 	struct aac_cmd *acp = &softs->sync_ac;
2489 	ddi_acc_handle_t acc;
2490 	struct aac_fib *fibp;
2491 	struct aac_adapter_info *ainfp;
2492 	struct aac_supplement_adapter_info *sinfp;
2493 	int rval;
2494 
2495 	(void) aac_sync_fib_slot_bind(softs, acp);
2496 	acc = acp->slotp->fib_acc_handle;
2497 	fibp = acp->slotp->fibp;
2498 
2499 	ddi_put8(acc, &fibp->data[0], 0);
2500 	if (aac_sync_fib(softs, RequestAdapterInfo,
2501 	    sizeof (struct aac_fib_header)) != AACOK) {
2502 		AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed");
2503 		rval = AACERR;
2504 		goto finish;
2505 	}
2506 	ainfp = (struct aac_adapter_info *)fibp->data;
2507 	if (ainfr) {
2508 		AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2509 		AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase);
2510 		AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture);
2511 		AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant);
2512 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed);
2513 		AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem);
2514 		AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem);
2515 		AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem);
2516 		aac_fsa_rev(softs, &ainfp->KernelRevision,
2517 		    &ainfr->KernelRevision);
2518 		aac_fsa_rev(softs, &ainfp->MonitorRevision,
2519 		    &ainfr->MonitorRevision);
2520 		aac_fsa_rev(softs, &ainfp->HardwareRevision,
2521 		    &ainfr->HardwareRevision);
2522 		aac_fsa_rev(softs, &ainfp->BIOSRevision,
2523 		    &ainfr->BIOSRevision);
2524 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled);
2525 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask);
2526 		AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber);
2527 		AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform);
2528 		AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2529 		AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant);
2530 	}
2531 	if (sinfr) {
2532 		if (!(softs->support_opt &
2533 		    AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) {
2534 			AACDB_PRINT(softs, CE_WARN,
2535 			    "SupplementAdapterInfo not supported");
2536 			rval = AACERR;
2537 			goto finish;
2538 		}
2539 		ddi_put8(acc, &fibp->data[0], 0);
2540 		if (aac_sync_fib(softs, RequestSupplementAdapterInfo,
2541 		    sizeof (struct aac_fib_header)) != AACOK) {
2542 			AACDB_PRINT(softs, CE_WARN,
2543 			    "RequestSupplementAdapterInfo failed");
2544 			rval = AACERR;
2545 			goto finish;
2546 		}
2547 		sinfp = (struct aac_supplement_adapter_info *)fibp->data;
2548 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1);
2549 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2);
2550 		AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize);
2551 		AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId);
2552 		AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts);
2553 		AAC_GET_FIELD32(acc, sinfr, sinfp, Version);
2554 		AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits);
2555 		AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber);
2556 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3);
2557 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12);
2558 		AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts);
2559 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo,
2560 		    sizeof (struct vpd_info));
2561 		aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision,
2562 		    &sinfr->FlashFirmwareRevision);
2563 		AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions);
2564 		aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision,
2565 		    &sinfr->FlashFirmwareBootRevision);
2566 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo,
2567 		    MFG_PCBA_SERIAL_NUMBER_WIDTH);
2568 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0],
2569 		    MFG_WWN_WIDTH);
2570 		AAC_GET_FIELD32(acc, sinfr, sinfp, SupportedOptions2);
2571 		AAC_GET_FIELD32(acc, sinfr, sinfp, ExpansionFlag);
2572 		if (sinfr->ExpansionFlag == 1) {
2573 			AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits3);
2574 			AAC_GET_FIELD32(acc, sinfr, sinfp,
2575 			    SupportedPerformanceMode);
2576 			AAC_REP_GET_FIELD32(acc, sinfr, sinfp,
2577 			    ReservedGrowth[0], 80);
2578 		}
2579 	}
2580 	rval = AACOK;
2581 finish:
2582 	aac_sync_fib_slot_release(softs, acp);
2583 	return (rval);
2584 }
2585 
2586 static int
2587 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max,
2588     uint32_t *tgt_max)
2589 {
2590 	struct aac_cmd *acp = &softs->sync_ac;
2591 	ddi_acc_handle_t acc;
2592 	struct aac_fib *fibp;
2593 	struct aac_ctcfg *c_cmd;
2594 	struct aac_ctcfg_resp *c_resp;
2595 	uint32_t scsi_method_id;
2596 	struct aac_bus_info *cmd;
2597 	struct aac_bus_info_response *resp;
2598 	int rval;
2599 
2600 	(void) aac_sync_fib_slot_bind(softs, acp);
2601 	acc = acp->slotp->fib_acc_handle;
2602 	fibp = acp->slotp->fibp;
2603 
2604 	/* Detect MethodId */
2605 	c_cmd = (struct aac_ctcfg *)&fibp->data[0];
2606 	ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig);
2607 	ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD);
2608 	ddi_put32(acc, &c_cmd->param, 0);
2609 	rval = aac_sync_fib(softs, ContainerCommand,
2610 	    AAC_FIB_SIZEOF(struct aac_ctcfg));
2611 	c_resp = (struct aac_ctcfg_resp *)&fibp->data[0];
2612 	if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) {
2613 		AACDB_PRINT(softs, CE_WARN,
2614 		    "VM_ContainerConfig command fail");
2615 		rval = AACERR;
2616 		goto finish;
2617 	}
2618 	scsi_method_id = ddi_get32(acc, &c_resp->param);
2619 
2620 	/* Detect phys. bus count and max. target id first */
2621 	cmd = (struct aac_bus_info *)&fibp->data[0];
2622 	ddi_put32(acc, &cmd->Command, VM_Ioctl);
2623 	ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */
2624 	ddi_put32(acc, &cmd->MethodId, scsi_method_id);
2625 	ddi_put32(acc, &cmd->ObjectId, 0);
2626 	ddi_put32(acc, &cmd->CtlCmd, GetBusInfo);
2627 	/*
2628 	 * For VM_Ioctl, the firmware uses the Header.Size filled from the
2629 	 * driver as the size to be returned. Therefore the driver has to use
2630 	 * sizeof (struct aac_bus_info_response) because it is greater than
2631 	 * sizeof (struct aac_bus_info).
2632 	 */
2633 	rval = aac_sync_fib(softs, ContainerCommand,
2634 	    AAC_FIB_SIZEOF(struct aac_bus_info_response));
2635 	resp = (struct aac_bus_info_response *)cmd;
2636 
2637 	/* Scan all coordinates with INQUIRY */
2638 	if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) {
2639 		AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail");
2640 		rval = AACERR;
2641 		goto finish;
2642 	}
2643 	*bus_max = ddi_get32(acc, &resp->BusCount);
2644 	*tgt_max = ddi_get32(acc, &resp->TargetsPerBus);
2645 
2646 finish:
2647 	aac_sync_fib_slot_release(softs, acp);
2648 	return (AACOK);
2649 }
2650 
2651 /*
2652  * The following function comes from Adaptec:
2653  *
2654  * Routine to be called during initialization of communications with
2655  * the adapter to handle possible adapter configuration issues. When
2656  * the adapter first boots up, it examines attached drives, etc, and
2657  * potentially comes up with a new or revised configuration (relative to
2658  * what's stored in it's NVRAM). Additionally it may discover problems
2659  * that make the current physical configuration unworkable (currently
2660  * applicable only to cluster configuration issues).
2661  *
2662  * If there are no configuration issues or the issues are considered
2663  * trival by the adapter, it will set it's configuration status to
2664  * "FSACT_CONTINUE" and execute the "commit confiuguration" action
2665  * automatically on it's own.
2666  *
2667  * However, if there are non-trivial issues, the adapter will set it's
2668  * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT"
2669  * and wait for some agent on the host to issue the "\ContainerCommand
2670  * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the
2671  * adapter to commit the new/updated configuration and enable
2672  * un-inhibited operation.  The host agent should first issue the
2673  * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB
2674  * command to obtain information about config issues detected by
2675  * the adapter.
2676  *
2677  * Normally the adapter's PC BIOS will execute on the host following
2678  * adapter poweron and reset and will be responsible for querring the
2679  * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG
2680  * command if appropriate.
2681  *
2682  * However, with the introduction of IOP reset support, the adapter may
2683  * boot up without the benefit of the adapter's PC BIOS host agent.
2684  * This routine is intended to take care of these issues in situations
2685  * where BIOS doesn't execute following adapter poweron or reset.  The
2686  * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so
2687  * there is no harm in doing this when it's already been done.
2688  */
2689 static int
2690 aac_handle_adapter_config_issues(struct aac_softstate *softs)
2691 {
2692 	struct aac_cmd *acp = &softs->sync_ac;
2693 	ddi_acc_handle_t acc;
2694 	struct aac_fib *fibp;
2695 	struct aac_Container *cmd;
2696 	struct aac_Container_resp *resp;
2697 	struct aac_cf_status_header *cfg_sts_hdr;
2698 	uint32_t resp_status;
2699 	uint32_t ct_status;
2700 	uint32_t cfg_stat_action;
2701 	int rval;
2702 
2703 	(void) aac_sync_fib_slot_bind(softs, acp);
2704 	acc = acp->slotp->fib_acc_handle;
2705 	fibp = acp->slotp->fibp;
2706 
2707 	/* Get adapter config status */
2708 	cmd = (struct aac_Container *)&fibp->data[0];
2709 
2710 	bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2711 	ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2712 	ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS);
2713 	ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE],
2714 	    sizeof (struct aac_cf_status_header));
2715 	rval = aac_sync_fib(softs, ContainerCommand,
2716 	    AAC_FIB_SIZEOF(struct aac_Container));
2717 	resp = (struct aac_Container_resp *)cmd;
2718 	cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data;
2719 
2720 	resp_status = ddi_get32(acc, &resp->Status);
2721 	ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2722 	if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) {
2723 		cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action);
2724 
2725 		/* Commit configuration if it's reasonable to do so. */
2726 		if (cfg_stat_action <= CFACT_PAUSE) {
2727 			bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2728 			ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2729 			ddi_put32(acc, &cmd->CTCommand.command,
2730 			    CT_COMMIT_CONFIG);
2731 			rval = aac_sync_fib(softs, ContainerCommand,
2732 			    AAC_FIB_SIZEOF(struct aac_Container));
2733 
2734 			resp_status = ddi_get32(acc, &resp->Status);
2735 			ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2736 			if ((rval == AACOK) && (resp_status == 0) &&
2737 			    (ct_status == CT_OK))
2738 				/* Successful completion */
2739 				rval = AACMPE_OK;
2740 			else
2741 				/* Auto-commit aborted due to error(s). */
2742 				rval = AACMPE_COMMIT_CONFIG;
2743 		} else {
2744 			/*
2745 			 * Auto-commit aborted due to adapter indicating
2746 			 * configuration issue(s) too dangerous to auto-commit.
2747 			 */
2748 			rval = AACMPE_CONFIG_STATUS;
2749 		}
2750 	} else {
2751 		cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted");
2752 		rval = AACMPE_CONFIG_STATUS;
2753 	}
2754 
2755 	aac_sync_fib_slot_release(softs, acp);
2756 	return (rval);
2757 }
2758 
2759 /*
2760  * Hardware initialization and resource allocation
2761  */
2762 static int
2763 aac_common_attach(struct aac_softstate *softs)
2764 {
2765 	uint32_t status;
2766 	int i;
2767 
2768 	DBCALLED(softs, 1);
2769 
2770 	/*
2771 	 * Do a little check here to make sure there aren't any outstanding
2772 	 * FIBs in the message queue. At this point there should not be and
2773 	 * if there are they are probably left over from another instance of
2774 	 * the driver like when the system crashes and the crash dump driver
2775 	 * gets loaded.
2776 	 */
2777 	while (AAC_OUTB_GET(softs) != 0xfffffffful)
2778 		;
2779 
2780 	/*
2781 	 * Wait the card to complete booting up before do anything that
2782 	 * attempts to communicate with it.
2783 	 */
2784 	status = AAC_FWSTATUS_GET(softs);
2785 	if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC)
2786 		goto error;
2787 	i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */
2788 	AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i);
2789 	if (i == 0) {
2790 		cmn_err(CE_CONT, "?Fatal error: controller not ready");
2791 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2792 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2793 		goto error;
2794 	}
2795 
2796 	/* Read and set card supported options and settings */
2797 	if (aac_check_firmware(softs) == AACERR) {
2798 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2799 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2800 		goto error;
2801 	}
2802 
2803 	/* Add interrupt handlers */
2804 	if (aac_register_intrs(softs) == AACERR) {
2805 		cmn_err(CE_CONT,
2806 		    "?Fatal error: interrupts register failed");
2807 		goto error;
2808 	}
2809 
2810 	/* Setup communication space with the card */
2811 	if (softs->comm_space_dma_handle == NULL) {
2812 		if (aac_alloc_comm_space(softs) != AACOK)
2813 			goto error;
2814 	}
2815 	if (aac_setup_comm_space(softs) != AACOK) {
2816 		cmn_err(CE_CONT, "?Setup communication space failed");
2817 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2818 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2819 		goto error;
2820 	}
2821 
2822 #ifdef DEBUG
2823 	if (aac_get_fw_debug_buffer(softs) != AACOK)
2824 		cmn_err(CE_CONT, "?firmware UART trace not supported");
2825 #endif
2826 
2827 	/* Allocate slots */
2828 	if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) {
2829 		cmn_err(CE_CONT, "?Fatal error: slots allocate failed");
2830 		goto error;
2831 	}
2832 	AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots);
2833 
2834 	/* Allocate FIBs */
2835 	if (softs->total_fibs < softs->total_slots) {
2836 		aac_alloc_fibs(softs);
2837 		if (softs->total_fibs == 0)
2838 			goto error;
2839 		AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated",
2840 		    softs->total_fibs);
2841 	}
2842 
2843 	AAC_STATUS_CLR(softs, ~0); /* Clear out all interrupts */
2844 	AAC_ENABLE_INTR(softs); /* Enable the interrupts we can handle */
2845 
2846 	/* Get adapter names */
2847 	if (CARD_IS_UNKNOWN(softs->card)) {
2848 		struct aac_supplement_adapter_info sinf;
2849 
2850 		if (aac_get_adapter_info(softs, NULL, &sinf) != AACOK) {
2851 			cmn_err(CE_CONT, "?Query adapter information failed");
2852 		} else {
2853 			softs->feature_bits = sinf.FeatureBits;
2854 			softs->support_opt2 = sinf.SupportedOptions2;
2855 
2856 			char *p, *p0, *p1;
2857 
2858 			/*
2859 			 * Now find the controller name in supp_adapter_info->
2860 			 * AdapterTypeText. Use the first word as the vendor
2861 			 * and the other words as the product name.
2862 			 */
2863 			AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = "
2864 			    "\"%s\"", sinf.AdapterTypeText);
2865 			p = sinf.AdapterTypeText;
2866 			p0 = p1 = NULL;
2867 			/* Skip heading spaces */
2868 			while (*p && (*p == ' ' || *p == '\t'))
2869 				p++;
2870 			p0 = p;
2871 			while (*p && (*p != ' ' && *p != '\t'))
2872 				p++;
2873 			/* Remove middle spaces */
2874 			while (*p && (*p == ' ' || *p == '\t'))
2875 				*p++ = 0;
2876 			p1 = p;
2877 			/* Remove trailing spaces */
2878 			p = p1 + strlen(p1) - 1;
2879 			while (p > p1 && (*p == ' ' || *p == '\t'))
2880 				*p-- = 0;
2881 			if (*p0 && *p1) {
2882 				(void *)strncpy(softs->vendor_name, p0,
2883 				    AAC_VENDOR_LEN);
2884 				(void *)strncpy(softs->product_name, p1,
2885 				    AAC_PRODUCT_LEN);
2886 			} else {
2887 				cmn_err(CE_WARN,
2888 				    "?adapter name mis-formatted\n");
2889 				if (*p0)
2890 					(void *)strncpy(softs->product_name,
2891 					    p0, AAC_PRODUCT_LEN);
2892 			}
2893 		}
2894 	}
2895 
2896 	cmn_err(CE_NOTE,
2897 	    "!aac driver %d.%02d.%02d-%d, found card: " \
2898 	    "%s %s(pci0x%x.%x.%x.%x) at 0x%x",
2899 	    AAC_DRIVER_MAJOR_VERSION,
2900 	    AAC_DRIVER_MINOR_VERSION,
2901 	    AAC_DRIVER_BUGFIX_LEVEL,
2902 	    AAC_DRIVER_BUILD,
2903 	    softs->vendor_name, softs->product_name,
2904 	    softs->vendid, softs->devid, softs->subvendid, softs->subsysid,
2905 	    softs->pci_mem_base_paddr);
2906 
2907 	/* Perform acceptance of adapter-detected config changes if possible */
2908 	if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) {
2909 		cmn_err(CE_CONT, "?Handle adapter config issues failed");
2910 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2911 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2912 		goto error;
2913 	}
2914 
2915 	/* Setup containers (logical devices) */
2916 	if (aac_probe_containers(softs) != AACOK) {
2917 		cmn_err(CE_CONT, "?Fatal error: get container info error");
2918 		goto error;
2919 	}
2920 
2921 	/* Setup phys. devices */
2922 	if (softs->flags & AAC_FLAGS_NONDASD) {
2923 		uint32_t bus_max, tgt_max;
2924 		uint32_t bus, tgt;
2925 		int index;
2926 
2927 		if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) {
2928 			cmn_err(CE_CONT, "?Fatal error: get bus info error");
2929 			goto error;
2930 		}
2931 		AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d",
2932 		    bus_max, tgt_max);
2933 		if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) {
2934 			if (softs->state & AAC_STATE_RESET) {
2935 				cmn_err(CE_WARN,
2936 				    "?Fatal error: bus map changed");
2937 				goto error;
2938 			}
2939 			softs->bus_max = bus_max;
2940 			softs->tgt_max = tgt_max;
2941 			if (softs->nondasds) {
2942 				kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2943 				    sizeof (struct aac_nondasd));
2944 			}
2945 			softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \
2946 			    sizeof (struct aac_nondasd), KM_SLEEP);
2947 
2948 			index = 0;
2949 			for (bus = 0; bus < softs->bus_max; bus++) {
2950 				for (tgt = 0; tgt < softs->tgt_max; tgt++) {
2951 					struct aac_nondasd *dvp =
2952 					    &softs->nondasds[index++];
2953 					dvp->dev.type = AAC_DEV_PD;
2954 					dvp->bus = bus;
2955 					dvp->tid = tgt;
2956 				}
2957 			}
2958 		}
2959 	}
2960 
2961 	/* Check dma & acc handles allocated in attach */
2962 	if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) {
2963 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2964 		goto error;
2965 	}
2966 
2967 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
2968 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2969 		goto error;
2970 	}
2971 
2972 	for (i = 0; i < softs->total_slots; i++) {
2973 		if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) !=
2974 		    DDI_SUCCESS) {
2975 			ddi_fm_service_impact(softs->devinfo_p,
2976 			    DDI_SERVICE_LOST);
2977 			goto error;
2978 		}
2979 	}
2980 
2981 	return (AACOK);
2982 error:
2983 	if (softs->state & AAC_STATE_RESET)
2984 		return (AACERR);
2985 	if (softs->nondasds) {
2986 		kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2987 		    sizeof (struct aac_nondasd));
2988 		softs->nondasds = NULL;
2989 	}
2990 	if (softs->total_fibs > 0)
2991 		aac_destroy_fibs(softs);
2992 	if (softs->total_slots > 0)
2993 		aac_destroy_slots(softs);
2994 	if (softs->comm_space_dma_handle)
2995 		aac_free_comm_space(softs);
2996 	return (AACERR);
2997 }
2998 
2999 /*
3000  * Hardware shutdown and resource release
3001  */
3002 static void
3003 aac_common_detach(struct aac_softstate *softs)
3004 {
3005 	DBCALLED(softs, 1);
3006 
3007 	aac_unregister_intrs(softs);
3008 
3009 	mutex_enter(&softs->io_lock);
3010 	(void) aac_shutdown(softs);
3011 
3012 	if (softs->nondasds) {
3013 		kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
3014 		    sizeof (struct aac_nondasd));
3015 		softs->nondasds = NULL;
3016 	}
3017 	aac_destroy_fibs(softs);
3018 	aac_destroy_slots(softs);
3019 	aac_free_comm_space(softs);
3020 	mutex_exit(&softs->io_lock);
3021 }
3022 
3023 /*
3024  * Send a synchronous command to the controller and wait for a result.
3025  * Indicate if the controller completed the command with an error status.
3026  */
3027 int
3028 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd,
3029     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3,
3030     uint32_t *statusp)
3031 {
3032 	int timeout;
3033 	uint32_t status;
3034 
3035 	if (statusp != NULL)
3036 		*statusp = SRB_STATUS_SUCCESS;
3037 
3038 	/* Fill in mailbox */
3039 	AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3);
3040 
3041 	/* Ensure the sync command doorbell flag is cleared */
3042 	AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
3043 
3044 	/* Then set it to signal the adapter */
3045 	AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND);
3046 
3047 	/* Spin waiting for the command to complete */
3048 	timeout = AAC_IMMEDIATE_TIMEOUT * 1000;
3049 	AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout);
3050 	if (!timeout) {
3051 		AACDB_PRINT(softs, CE_WARN,
3052 		    "Sync command timed out after %d seconds (0x%x)!",
3053 		    AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs));
3054 		return (AACERR);
3055 	}
3056 
3057 	/* Clear the completion flag */
3058 	AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
3059 
3060 	/* Get the command status */
3061 	status = AAC_MAILBOX_GET(softs, 0);
3062 	if (statusp != NULL)
3063 		*statusp = status;
3064 	if (status != SRB_STATUS_SUCCESS) {
3065 		AACDB_PRINT(softs, CE_WARN,
3066 		    "Sync command fail: status = 0x%x", status);
3067 		return (AACERR);
3068 	}
3069 
3070 	return (AACOK);
3071 }
3072 
3073 /*
3074  * Send a synchronous FIB to the adapter and wait for its completion
3075  */
3076 static int
3077 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize)
3078 {
3079 	struct aac_cmd *acp = &softs->sync_ac;
3080 
3081 	acp->flags = AAC_CMD_NO_CB | AAC_CMD_SYNC | AAC_CMD_IN_SYNC_SLOT;
3082 	acp->ac_comp = aac_sync_complete;
3083 	acp->timeout = AAC_SYNC_TIMEOUT;
3084 
3085 	acp->fib_size = fibsize;
3086 	/*
3087 	 * Only need to setup sync fib header, caller should have init
3088 	 * fib data
3089 	 */
3090 	aac_cmd_fib_header(softs, acp, cmd);
3091 
3092 	aac_start_io(softs, acp);
3093 
3094 	/* Check if acp completed in case fib send fail */
3095 	while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT)))
3096 		cv_wait(&softs->event, &softs->io_lock);
3097 
3098 	if (acp->flags & AAC_CMD_CMPLT)
3099 		return (AACOK);
3100 	return (AACERR);
3101 }
3102 
3103 static void
3104 aac_cmd_initq(struct aac_cmd_queue *q)
3105 {
3106 	q->q_head = NULL;
3107 	q->q_tail = (struct aac_cmd *)&q->q_head;
3108 }
3109 
3110 /*
3111  * Remove a cmd from the head of q
3112  */
3113 static struct aac_cmd *
3114 aac_cmd_dequeue(struct aac_cmd_queue *q)
3115 {
3116 	struct aac_cmd *acp;
3117 
3118 	_NOTE(ASSUMING_PROTECTED(*q))
3119 
3120 	if ((acp = q->q_head) != NULL) {
3121 		if ((q->q_head = acp->next) != NULL)
3122 			acp->next = NULL;
3123 		else
3124 			q->q_tail = (struct aac_cmd *)&q->q_head;
3125 		acp->prev = NULL;
3126 	}
3127 	return (acp);
3128 }
3129 
3130 /*
3131  * Add a cmd to the tail of q
3132  */
3133 static void
3134 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp)
3135 {
3136 	ASSERT(acp->next == NULL);
3137 	acp->prev = q->q_tail;
3138 	q->q_tail->next = acp;
3139 	q->q_tail = acp;
3140 }
3141 
3142 /*
3143  * Remove the cmd ac from q
3144  */
3145 static void
3146 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp)
3147 {
3148 	if (acp->prev) {
3149 		if ((acp->prev->next = acp->next) != NULL) {
3150 			acp->next->prev = acp->prev;
3151 			acp->next = NULL;
3152 		} else {
3153 			q->q_tail = acp->prev;
3154 		}
3155 		acp->prev = NULL;
3156 	}
3157 	/* ac is not in the queue */
3158 }
3159 
3160 /*
3161  * Atomically insert an entry into the nominated queue, returns 0 on success or
3162  * AACERR if the queue is full.
3163  *
3164  * Note: it would be more efficient to defer notifying the controller in
3165  *	 the case where we may be inserting several entries in rapid succession,
3166  *	 but implementing this usefully may be difficult (it would involve a
3167  *	 separate queue/notify interface).
3168  */
3169 static int
3170 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr,
3171     uint32_t fib_size)
3172 {
3173 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3174 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3175 	uint32_t pi, ci;
3176 
3177 	DBCALLED(softs, 2);
3178 
3179 	ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q);
3180 
3181 	/* Get the producer/consumer indices */
3182 	(void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3183 	    (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3184 	    DDI_DMA_SYNC_FORCPU);
3185 	if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3186 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3187 		return (AACERR);
3188 	}
3189 
3190 	pi = ddi_get32(acc,
3191 	    &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3192 	ci = ddi_get32(acc,
3193 	    &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3194 
3195 	/*
3196 	 * Wrap the queue first before we check the queue to see
3197 	 * if it is full
3198 	 */
3199 	if (pi >= aac_qinfo[queue].size)
3200 		pi = 0;
3201 
3202 	/* XXX queue full */
3203 	if ((pi + 1) == ci)
3204 		return (AACERR);
3205 
3206 	/* Fill in queue entry */
3207 	ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size);
3208 	ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr);
3209 	(void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3210 	    (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3211 	    DDI_DMA_SYNC_FORDEV);
3212 
3213 	/* Update producer index */
3214 	ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX],
3215 	    pi + 1);
3216 	(void) ddi_dma_sync(dma,
3217 	    (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \
3218 	    (uintptr_t)softs->comm_space, sizeof (uint32_t),
3219 	    DDI_DMA_SYNC_FORDEV);
3220 
3221 	if (aac_qinfo[queue].notify != 0)
3222 		AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3223 	return (AACOK);
3224 }
3225 
3226 /*
3227  * Atomically remove one entry from the nominated queue, returns 0 on
3228  * success or AACERR if the queue is empty.
3229  */
3230 static int
3231 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp)
3232 {
3233 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3234 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3235 	uint32_t pi, ci;
3236 	int unfull = 0;
3237 
3238 	DBCALLED(softs, 2);
3239 
3240 	ASSERT(idxp);
3241 
3242 	/* Get the producer/consumer indices */
3243 	(void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3244 	    (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3245 	    DDI_DMA_SYNC_FORCPU);
3246 	pi = ddi_get32(acc,
3247 	    &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3248 	ci = ddi_get32(acc,
3249 	    &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3250 
3251 	/* Check for queue empty */
3252 	if (ci == pi)
3253 		return (AACERR);
3254 
3255 	if (pi >= aac_qinfo[queue].size)
3256 		pi = 0;
3257 
3258 	/* Check for queue full */
3259 	if (ci == pi + 1)
3260 		unfull = 1;
3261 
3262 	/*
3263 	 * The controller does not wrap the queue,
3264 	 * so we have to do it by ourselves
3265 	 */
3266 	if (ci >= aac_qinfo[queue].size)
3267 		ci = 0;
3268 
3269 	/* Fetch the entry */
3270 	(void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3271 	    (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3272 	    DDI_DMA_SYNC_FORCPU);
3273 	if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3274 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3275 		return (AACERR);
3276 	}
3277 
3278 	switch (queue) {
3279 	case AAC_HOST_NORM_RESP_Q:
3280 	case AAC_HOST_HIGH_RESP_Q:
3281 		*idxp = ddi_get32(acc,
3282 		    &(softs->qentries[queue] + ci)->aq_fib_addr);
3283 		break;
3284 
3285 	case AAC_HOST_NORM_CMD_Q:
3286 	case AAC_HOST_HIGH_CMD_Q:
3287 		*idxp = ddi_get32(acc,
3288 		    &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE;
3289 		break;
3290 
3291 	default:
3292 		cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()");
3293 		return (AACERR);
3294 	}
3295 
3296 	/* Update consumer index */
3297 	ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX],
3298 	    ci + 1);
3299 	(void) ddi_dma_sync(dma,
3300 	    (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \
3301 	    (uintptr_t)softs->comm_space, sizeof (uint32_t),
3302 	    DDI_DMA_SYNC_FORDEV);
3303 
3304 	if (unfull && aac_qinfo[queue].notify != 0)
3305 		AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3306 	return (AACOK);
3307 }
3308 
3309 static struct aac_mntinforesp *
3310 aac_get_mntinfo(struct aac_softstate *softs, int cid)
3311 {
3312 	ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3313 	struct aac_fib *fibp = softs->sync_ac.slotp->fibp;
3314 	struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0];
3315 	struct aac_mntinforesp *mir;
3316 
3317 	ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */
3318 	    (softs->flags & AAC_FLAGS_LBA_64BIT) ?
3319 	    VM_NameServe64 : VM_NameServe);
3320 	ddi_put32(acc, &mi->MntType, FT_FILESYS);
3321 	ddi_put32(acc, &mi->MntCount, cid);
3322 
3323 	if (aac_sync_fib(softs, ContainerCommand,
3324 	    AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) {
3325 		AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid);
3326 		return (NULL);
3327 	}
3328 
3329 	mir = (struct aac_mntinforesp *)&fibp->data[0];
3330 	if (ddi_get32(acc, &mir->Status) == ST_OK)
3331 		return (mir);
3332 	return (NULL);
3333 }
3334 
3335 static int
3336 aac_get_container_count(struct aac_softstate *softs, int *count)
3337 {
3338 	ddi_acc_handle_t acc;
3339 	struct aac_mntinforesp *mir;
3340 	int rval;
3341 
3342 	(void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
3343 	acc = softs->sync_ac.slotp->fib_acc_handle;
3344 
3345 	if ((mir = aac_get_mntinfo(softs, 0)) == NULL) {
3346 		rval = AACERR;
3347 		goto finish;
3348 	}
3349 	*count = ddi_get32(acc, &mir->MntRespCount);
3350 	if (*count > AAC_MAX_LD) {
3351 		AACDB_PRINT(softs, CE_CONT,
3352 		    "container count(%d) > AAC_MAX_LD", *count);
3353 		rval = AACERR;
3354 		goto finish;
3355 	}
3356 	rval = AACOK;
3357 
3358 finish:
3359 	aac_sync_fib_slot_release(softs, &softs->sync_ac);
3360 	return (rval);
3361 }
3362 
3363 static int
3364 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid)
3365 {
3366 	ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3367 	struct aac_Container *ct = (struct aac_Container *) \
3368 	    &softs->sync_ac.slotp->fibp->data[0];
3369 
3370 	bzero(ct, sizeof (*ct) - CT_PACKET_SIZE);
3371 	ddi_put32(acc, &ct->Command, VM_ContainerConfig);
3372 	ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID);
3373 	ddi_put32(acc, &ct->CTCommand.param[0], cid);
3374 
3375 	if (aac_sync_fib(softs, ContainerCommand,
3376 	    AAC_FIB_SIZEOF(struct aac_Container)) == AACERR)
3377 		return (AACERR);
3378 	if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK)
3379 		return (AACERR);
3380 
3381 	*uid = ddi_get32(acc, &ct->CTCommand.param[1]);
3382 	return (AACOK);
3383 }
3384 
3385 /*
3386  * Request information of the container cid
3387  */
3388 static struct aac_mntinforesp *
3389 aac_get_container_info(struct aac_softstate *softs, int cid)
3390 {
3391 	ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3392 	struct aac_mntinforesp *mir;
3393 	int rval_uid;
3394 	uint32_t uid;
3395 
3396 	/* Get container UID first so that it will not overwrite mntinfo */
3397 	rval_uid = aac_get_container_uid(softs, cid, &uid);
3398 
3399 	/* Get container basic info */
3400 	if ((mir = aac_get_mntinfo(softs, cid)) == NULL) {
3401 		AACDB_PRINT(softs, CE_CONT,
3402 		    "query container %d info failed", cid);
3403 		return (NULL);
3404 	}
3405 	if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE)
3406 		return (mir);
3407 	if (rval_uid != AACOK) {
3408 		AACDB_PRINT(softs, CE_CONT,
3409 		    "query container %d uid failed", cid);
3410 		return (NULL);
3411 	}
3412 
3413 	ddi_put32(acc, &mir->Status, uid);
3414 	return (mir);
3415 }
3416 
3417 static int
3418 aac_probe_container(struct aac_softstate *softs, uint32_t cid)
3419 {
3420 	struct aac_container *dvp = &softs->containers[cid];
3421 	ddi_acc_handle_t acc;
3422 	struct aac_mntinforesp *mir;
3423 	uint64_t size;
3424 	uint32_t uid;
3425 	int rval;
3426 
3427 	(void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
3428 	acc = softs->sync_ac.slotp->fib_acc_handle;
3429 
3430 	/* Get container basic info */
3431 	if ((mir = aac_get_container_info(softs, cid)) == NULL) {
3432 		rval = AACERR;
3433 		goto finish;
3434 	}
3435 
3436 	if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) {
3437 		if (AAC_DEV_IS_VALID(&dvp->dev)) {
3438 			AACDB_PRINT(softs, CE_NOTE,
3439 			    ">>> Container %d deleted", cid);
3440 			dvp->dev.flags &= ~AAC_DFLAG_VALID;
3441 			(void) aac_dr_event(softs, dvp->cid, -1,
3442 			    AAC_EVT_OFFLINE);
3443 		}
3444 	} else {
3445 		size = AAC_MIR_SIZE(softs, acc, mir);
3446 		uid = ddi_get32(acc, &mir->Status);
3447 		if (AAC_DEV_IS_VALID(&dvp->dev)) {
3448 			if (dvp->uid != uid) {
3449 				AACDB_PRINT(softs, CE_WARN,
3450 				    ">>> Container %u uid changed to %d",
3451 				    cid, uid);
3452 				dvp->uid = uid;
3453 			}
3454 			if (dvp->size != size) {
3455 				AACDB_PRINT(softs, CE_NOTE,
3456 				    ">>> Container %u size changed to %"PRIu64,
3457 				    cid, size);
3458 				dvp->size = size;
3459 			}
3460 		} else { /* Init new container */
3461 			AACDB_PRINT(softs, CE_NOTE,
3462 			    ">>> Container %d added: " \
3463 			    "size=0x%x.%08x, type=%d, name=%s",
3464 			    cid,
3465 			    ddi_get32(acc, &mir->MntObj.CapacityHigh),
3466 			    ddi_get32(acc, &mir->MntObj.Capacity),
3467 			    ddi_get32(acc, &mir->MntObj.VolType),
3468 			    mir->MntObj.FileSystemName);
3469 			dvp->dev.flags |= AAC_DFLAG_VALID;
3470 			dvp->dev.type = AAC_DEV_LD;
3471 
3472 			dvp->cid = cid;
3473 			dvp->uid = uid;
3474 			dvp->size = size;
3475 			dvp->locked = 0;
3476 			dvp->deleted = 0;
3477 			(void) aac_dr_event(softs, dvp->cid, -1,
3478 			    AAC_EVT_ONLINE);
3479 		}
3480 	}
3481 	rval = AACOK;
3482 
3483 finish:
3484 	aac_sync_fib_slot_release(softs, &softs->sync_ac);
3485 	return (rval);
3486 }
3487 
3488 /*
3489  * Do a rescan of all the possible containers and update the container list
3490  * with newly online/offline containers, and prepare for autoconfiguration.
3491  */
3492 static int
3493 aac_probe_containers(struct aac_softstate *softs)
3494 {
3495 	int i, count, total;
3496 
3497 	/* Loop over possible containers */
3498 	count = softs->container_count;
3499 	if (aac_get_container_count(softs, &count) == AACERR)
3500 		return (AACERR);
3501 	for (i = total = 0; i < count; i++) {
3502 		if (aac_probe_container(softs, i) == AACOK)
3503 			total++;
3504 	}
3505 	if (count < softs->container_count) {
3506 		struct aac_container *dvp;
3507 
3508 		for (dvp = &softs->containers[count];
3509 		    dvp < &softs->containers[softs->container_count]; dvp++) {
3510 			if (!AAC_DEV_IS_VALID(&dvp->dev))
3511 				continue;
3512 			AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted",
3513 			    dvp->cid);
3514 			dvp->dev.flags &= ~AAC_DFLAG_VALID;
3515 			(void) aac_dr_event(softs, dvp->cid, -1,
3516 			    AAC_EVT_OFFLINE);
3517 		}
3518 	}
3519 	softs->container_count = count;
3520 	AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total);
3521 	return (AACOK);
3522 }
3523 
3524 static int
3525 aac_alloc_comm_space(struct aac_softstate *softs)
3526 {
3527 	size_t rlen;
3528 	ddi_dma_cookie_t cookie;
3529 	uint_t cookien;
3530 
3531 	/* Allocate DMA for comm. space */
3532 	if (ddi_dma_alloc_handle(
3533 	    softs->devinfo_p,
3534 	    &softs->addr_dma_attr,
3535 	    DDI_DMA_SLEEP,
3536 	    NULL,
3537 	    &softs->comm_space_dma_handle) != DDI_SUCCESS) {
3538 		AACDB_PRINT(softs, CE_WARN,
3539 		    "Cannot alloc dma handle for communication area");
3540 		goto error;
3541 	}
3542 	if (ddi_dma_mem_alloc(
3543 	    softs->comm_space_dma_handle,
3544 	    sizeof (struct aac_comm_space),
3545 	    &softs->acc_attr,
3546 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3547 	    DDI_DMA_SLEEP,
3548 	    NULL,
3549 	    (caddr_t *)&softs->comm_space,
3550 	    &rlen,
3551 	    &softs->comm_space_acc_handle) != DDI_SUCCESS) {
3552 		AACDB_PRINT(softs, CE_WARN,
3553 		    "Cannot alloc mem for communication area");
3554 		goto error;
3555 	}
3556 	if (ddi_dma_addr_bind_handle(
3557 	    softs->comm_space_dma_handle,
3558 	    NULL,
3559 	    (caddr_t)softs->comm_space,
3560 	    sizeof (struct aac_comm_space),
3561 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3562 	    DDI_DMA_SLEEP,
3563 	    NULL,
3564 	    &cookie,
3565 	    &cookien) != DDI_DMA_MAPPED) {
3566 		AACDB_PRINT(softs, CE_WARN,
3567 		    "DMA bind failed for communication area");
3568 		goto error;
3569 	}
3570 	softs->comm_space_phyaddr = cookie.dmac_address;
3571 
3572 	return (AACOK);
3573 error:
3574 	if (softs->comm_space_acc_handle) {
3575 		ddi_dma_mem_free(&softs->comm_space_acc_handle);
3576 		softs->comm_space_acc_handle = NULL;
3577 	}
3578 	if (softs->comm_space_dma_handle) {
3579 		ddi_dma_free_handle(&softs->comm_space_dma_handle);
3580 		softs->comm_space_dma_handle = NULL;
3581 	}
3582 	return (AACERR);
3583 }
3584 
3585 static void
3586 aac_free_comm_space(struct aac_softstate *softs)
3587 {
3588 
3589 	(void) ddi_dma_unbind_handle(softs->comm_space_dma_handle);
3590 	ddi_dma_mem_free(&softs->comm_space_acc_handle);
3591 	softs->comm_space_acc_handle = NULL;
3592 	ddi_dma_free_handle(&softs->comm_space_dma_handle);
3593 	softs->comm_space_dma_handle = NULL;
3594 	softs->comm_space_phyaddr = NULL;
3595 }
3596 
3597 /*
3598  * Initialize the data structures that are required for the communication
3599  * interface to operate
3600  */
3601 static int
3602 aac_setup_comm_space(struct aac_softstate *softs)
3603 {
3604 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3605 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3606 	uint32_t comm_space_phyaddr;
3607 	struct aac_adapter_init *initp;
3608 	int qoffset;
3609 
3610 	comm_space_phyaddr = softs->comm_space_phyaddr;
3611 
3612 	/* Setup adapter init struct */
3613 	initp = &softs->comm_space->init_data;
3614 	bzero(initp, sizeof (struct aac_adapter_init));
3615 
3616 	ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION);
3617 	ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time());
3618 
3619 	/* Setup new/old comm. specific data */
3620 	if (softs->flags & AAC_FLAGS_RAW_IO) {
3621 		uint32_t init_flags = 0;
3622 
3623 		if (softs->flags & AAC_FLAGS_NEW_COMM)
3624 			init_flags |= AAC_INIT_FLAGS_NEW_COMM_SUPPORTED;
3625 		/* AAC_SUPPORTED_POWER_MANAGEMENT */
3626 		init_flags |= AAC_INIT_FLAGS_DRIVER_SUPPORTS_PM;
3627 		init_flags |= AAC_INIT_FLAGS_DRIVER_USES_UTC_TIME;
3628 
3629 		ddi_put32(acc, &initp->InitStructRevision,
3630 		    AAC_INIT_STRUCT_REVISION_4);
3631 		ddi_put32(acc, &initp->InitFlags, init_flags);
3632 		/* Setup the preferred settings */
3633 		ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs);
3634 		ddi_put32(acc, &initp->MaxIoSize,
3635 		    (softs->aac_max_sectors << 9));
3636 		ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size);
3637 	} else {
3638 		/*
3639 		 * Tells the adapter about the physical location of various
3640 		 * important shared data structures
3641 		 */
3642 		ddi_put32(acc, &initp->AdapterFibsPhysicalAddress,
3643 		    comm_space_phyaddr + \
3644 		    offsetof(struct aac_comm_space, adapter_fibs));
3645 		ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0);
3646 		ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE);
3647 		ddi_put32(acc, &initp->AdapterFibsSize,
3648 		    AAC_ADAPTER_FIBS * AAC_FIB_SIZE);
3649 		ddi_put32(acc, &initp->PrintfBufferAddress,
3650 		    comm_space_phyaddr + \
3651 		    offsetof(struct aac_comm_space, adapter_print_buf));
3652 		ddi_put32(acc, &initp->PrintfBufferSize,
3653 		    AAC_ADAPTER_PRINT_BUFSIZE);
3654 		ddi_put32(acc, &initp->MiniPortRevision,
3655 		    AAC_INIT_STRUCT_MINIPORT_REVISION);
3656 		ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN);
3657 
3658 		qoffset = (comm_space_phyaddr + \
3659 		    offsetof(struct aac_comm_space, qtable)) % \
3660 		    AAC_QUEUE_ALIGN;
3661 		if (qoffset)
3662 			qoffset = AAC_QUEUE_ALIGN - qoffset;
3663 		softs->qtablep = (struct aac_queue_table *) \
3664 		    ((char *)&softs->comm_space->qtable + qoffset);
3665 		ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \
3666 		    offsetof(struct aac_comm_space, qtable) + qoffset);
3667 
3668 		/* Init queue table */
3669 		ddi_put32(acc, &softs->qtablep-> \
3670 		    qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3671 		    AAC_HOST_NORM_CMD_ENTRIES);
3672 		ddi_put32(acc, &softs->qtablep-> \
3673 		    qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3674 		    AAC_HOST_NORM_CMD_ENTRIES);
3675 		ddi_put32(acc, &softs->qtablep-> \
3676 		    qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3677 		    AAC_HOST_HIGH_CMD_ENTRIES);
3678 		ddi_put32(acc, &softs->qtablep-> \
3679 		    qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3680 		    AAC_HOST_HIGH_CMD_ENTRIES);
3681 		ddi_put32(acc, &softs->qtablep-> \
3682 		    qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3683 		    AAC_ADAP_NORM_CMD_ENTRIES);
3684 		ddi_put32(acc, &softs->qtablep-> \
3685 		    qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3686 		    AAC_ADAP_NORM_CMD_ENTRIES);
3687 		ddi_put32(acc, &softs->qtablep-> \
3688 		    qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3689 		    AAC_ADAP_HIGH_CMD_ENTRIES);
3690 		ddi_put32(acc, &softs->qtablep-> \
3691 		    qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3692 		    AAC_ADAP_HIGH_CMD_ENTRIES);
3693 		ddi_put32(acc, &softs->qtablep-> \
3694 		    qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3695 		    AAC_HOST_NORM_RESP_ENTRIES);
3696 		ddi_put32(acc, &softs->qtablep-> \
3697 		    qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3698 		    AAC_HOST_NORM_RESP_ENTRIES);
3699 		ddi_put32(acc, &softs->qtablep-> \
3700 		    qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3701 		    AAC_HOST_HIGH_RESP_ENTRIES);
3702 		ddi_put32(acc, &softs->qtablep-> \
3703 		    qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3704 		    AAC_HOST_HIGH_RESP_ENTRIES);
3705 		ddi_put32(acc, &softs->qtablep-> \
3706 		    qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3707 		    AAC_ADAP_NORM_RESP_ENTRIES);
3708 		ddi_put32(acc, &softs->qtablep-> \
3709 		    qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3710 		    AAC_ADAP_NORM_RESP_ENTRIES);
3711 		ddi_put32(acc, &softs->qtablep-> \
3712 		    qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3713 		    AAC_ADAP_HIGH_RESP_ENTRIES);
3714 		ddi_put32(acc, &softs->qtablep-> \
3715 		    qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3716 		    AAC_ADAP_HIGH_RESP_ENTRIES);
3717 
3718 		/* Init queue entries */
3719 		softs->qentries[AAC_HOST_NORM_CMD_Q] =
3720 		    &softs->qtablep->qt_HostNormCmdQueue[0];
3721 		softs->qentries[AAC_HOST_HIGH_CMD_Q] =
3722 		    &softs->qtablep->qt_HostHighCmdQueue[0];
3723 		softs->qentries[AAC_ADAP_NORM_CMD_Q] =
3724 		    &softs->qtablep->qt_AdapNormCmdQueue[0];
3725 		softs->qentries[AAC_ADAP_HIGH_CMD_Q] =
3726 		    &softs->qtablep->qt_AdapHighCmdQueue[0];
3727 		softs->qentries[AAC_HOST_NORM_RESP_Q] =
3728 		    &softs->qtablep->qt_HostNormRespQueue[0];
3729 		softs->qentries[AAC_HOST_HIGH_RESP_Q] =
3730 		    &softs->qtablep->qt_HostHighRespQueue[0];
3731 		softs->qentries[AAC_ADAP_NORM_RESP_Q] =
3732 		    &softs->qtablep->qt_AdapNormRespQueue[0];
3733 		softs->qentries[AAC_ADAP_HIGH_RESP_Q] =
3734 		    &softs->qtablep->qt_AdapHighRespQueue[0];
3735 	}
3736 	(void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV);
3737 
3738 	/* Send init structure to the card */
3739 	if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT,
3740 	    comm_space_phyaddr + \
3741 	    offsetof(struct aac_comm_space, init_data),
3742 	    0, 0, 0, NULL) == AACERR) {
3743 		AACDB_PRINT(softs, CE_WARN,
3744 		    "Cannot send init structure to adapter");
3745 		return (AACERR);
3746 	}
3747 
3748 	return (AACOK);
3749 }
3750 
3751 static uchar_t *
3752 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf)
3753 {
3754 	(void) memset(buf, ' ', AAC_VENDOR_LEN);
3755 	bcopy(softs->vendor_name, buf, strlen(softs->vendor_name));
3756 	return (buf + AAC_VENDOR_LEN);
3757 }
3758 
3759 static uchar_t *
3760 aac_product_id(struct aac_softstate *softs, uchar_t *buf)
3761 {
3762 	(void) memset(buf, ' ', AAC_PRODUCT_LEN);
3763 	bcopy(softs->product_name, buf, strlen(softs->product_name));
3764 	return (buf + AAC_PRODUCT_LEN);
3765 }
3766 
3767 /*
3768  * Construct unit serial number from container uid
3769  */
3770 static uchar_t *
3771 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf)
3772 {
3773 	int i, d;
3774 	uint32_t uid;
3775 
3776 	ASSERT(tgt >= 0 && tgt < AAC_MAX_LD);
3777 
3778 	uid = softs->containers[tgt].uid;
3779 	for (i = 7; i >= 0; i--) {
3780 		d = uid & 0xf;
3781 		buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d;
3782 		uid >>= 4;
3783 	}
3784 	return (buf + 8);
3785 }
3786 
3787 /*
3788  * SPC-3 7.5 INQUIRY command implementation
3789  */
3790 static void
3791 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt,
3792     union scsi_cdb *cdbp, struct buf *bp)
3793 {
3794 	int tgt = pkt->pkt_address.a_target;
3795 	char *b_addr = NULL;
3796 	uchar_t page = cdbp->cdb_opaque[2];
3797 
3798 	if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) {
3799 		/* Command Support Data is not supported */
3800 		aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0);
3801 		return;
3802 	}
3803 
3804 	if (bp && bp->b_un.b_addr && bp->b_bcount) {
3805 		if (bp->b_flags & (B_PHYS | B_PAGEIO))
3806 			bp_mapin(bp);
3807 		b_addr = bp->b_un.b_addr;
3808 	}
3809 
3810 	if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) {
3811 		uchar_t *vpdp = (uchar_t *)b_addr;
3812 		uchar_t *idp, *sp;
3813 
3814 		/* SPC-3 8.4 Vital product data parameters */
3815 		switch (page) {
3816 		case 0x00:
3817 			/* Supported VPD pages */
3818 			if (vpdp == NULL ||
3819 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 3))
3820 				return;
3821 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3822 			vpdp[AAC_VPD_PAGE_CODE] = 0x00;
3823 			vpdp[AAC_VPD_PAGE_LENGTH] = 3;
3824 
3825 			vpdp[AAC_VPD_PAGE_DATA] = 0x00;
3826 			vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80;
3827 			vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83;
3828 
3829 			pkt->pkt_state |= STATE_XFERRED_DATA;
3830 			break;
3831 
3832 		case 0x80:
3833 			/* Unit serial number page */
3834 			if (vpdp == NULL ||
3835 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 8))
3836 				return;
3837 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3838 			vpdp[AAC_VPD_PAGE_CODE] = 0x80;
3839 			vpdp[AAC_VPD_PAGE_LENGTH] = 8;
3840 
3841 			sp = &vpdp[AAC_VPD_PAGE_DATA];
3842 			(void) aac_lun_serialno(softs, tgt, sp);
3843 
3844 			pkt->pkt_state |= STATE_XFERRED_DATA;
3845 			break;
3846 
3847 		case 0x83:
3848 			/* Device identification page */
3849 			if (vpdp == NULL ||
3850 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 32))
3851 				return;
3852 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3853 			vpdp[AAC_VPD_PAGE_CODE] = 0x83;
3854 
3855 			idp = &vpdp[AAC_VPD_PAGE_DATA];
3856 			bzero(idp, AAC_VPD_ID_LENGTH);
3857 			idp[AAC_VPD_ID_CODESET] = 0x02;
3858 			idp[AAC_VPD_ID_TYPE] = 0x01;
3859 
3860 			/*
3861 			 * SPC-3 Table 111 - Identifier type
3862 			 * One recommanded method of constructing the remainder
3863 			 * of identifier field is to concatenate the product
3864 			 * identification field from the standard INQUIRY data
3865 			 * field and the product serial number field from the
3866 			 * unit serial number page.
3867 			 */
3868 			sp = &idp[AAC_VPD_ID_DATA];
3869 			sp = aac_vendor_id(softs, sp);
3870 			sp = aac_product_id(softs, sp);
3871 			sp = aac_lun_serialno(softs, tgt, sp);
3872 			idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \
3873 			    (uintptr_t)&idp[AAC_VPD_ID_DATA];
3874 
3875 			vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \
3876 			    (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA];
3877 			pkt->pkt_state |= STATE_XFERRED_DATA;
3878 			break;
3879 
3880 		default:
3881 			aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3882 			    0x24, 0x00, 0);
3883 			break;
3884 		}
3885 	} else {
3886 		struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr;
3887 		size_t len = sizeof (struct scsi_inquiry);
3888 
3889 		if (page != 0) {
3890 			aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3891 			    0x24, 0x00, 0);
3892 			return;
3893 		}
3894 		if (inqp == NULL || bp->b_bcount < len)
3895 			return;
3896 
3897 		bzero(inqp, len);
3898 		inqp->inq_len = AAC_ADDITIONAL_LEN;
3899 		inqp->inq_ansi = AAC_ANSI_VER;
3900 		inqp->inq_rdf = AAC_RESP_DATA_FORMAT;
3901 		(void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid);
3902 		(void) aac_product_id(softs, (uchar_t *)inqp->inq_pid);
3903 		bcopy("V1.0", inqp->inq_revision, 4);
3904 		inqp->inq_cmdque = 1; /* enable tagged-queuing */
3905 		/*
3906 		 * For "sd-max-xfer-size" property which may impact performance
3907 		 * when IO threads increase.
3908 		 */
3909 		inqp->inq_wbus32 = 1;
3910 
3911 		pkt->pkt_state |= STATE_XFERRED_DATA;
3912 	}
3913 }
3914 
3915 /*
3916  * SPC-3 7.10 MODE SENSE command implementation
3917  */
3918 static void
3919 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt,
3920     union scsi_cdb *cdbp, struct buf *bp, int capacity)
3921 {
3922 	uchar_t pagecode;
3923 	struct mode_header *headerp;
3924 	struct mode_header_g1 *g1_headerp;
3925 	unsigned int ncyl;
3926 	caddr_t sense_data;
3927 	caddr_t next_page;
3928 	size_t sdata_size;
3929 	size_t pages_size;
3930 	int unsupport_page = 0;
3931 
3932 	ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE ||
3933 	    cdbp->scc_cmd == SCMD_MODE_SENSE_G1);
3934 
3935 	if (!(bp && bp->b_un.b_addr && bp->b_bcount))
3936 		return;
3937 
3938 	if (bp->b_flags & (B_PHYS | B_PAGEIO))
3939 		bp_mapin(bp);
3940 	pkt->pkt_state |= STATE_XFERRED_DATA;
3941 	pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F;
3942 
3943 	/* calculate the size of needed buffer */
3944 	if (cdbp->scc_cmd == SCMD_MODE_SENSE)
3945 		sdata_size = MODE_HEADER_LENGTH;
3946 	else /* must be SCMD_MODE_SENSE_G1 */
3947 		sdata_size = MODE_HEADER_LENGTH_G1;
3948 
3949 	pages_size = 0;
3950 	switch (pagecode) {
3951 	case SD_MODE_SENSE_PAGE3_CODE:
3952 		pages_size += sizeof (struct mode_format);
3953 		break;
3954 
3955 	case SD_MODE_SENSE_PAGE4_CODE:
3956 		pages_size += sizeof (struct mode_geometry);
3957 		break;
3958 
3959 	case MODEPAGE_CTRL_MODE:
3960 		if (softs->flags & AAC_FLAGS_LBA_64BIT) {
3961 			pages_size += sizeof (struct mode_control_scsi3);
3962 		} else {
3963 			unsupport_page = 1;
3964 		}
3965 		break;
3966 
3967 	case MODEPAGE_ALLPAGES:
3968 		if (softs->flags & AAC_FLAGS_LBA_64BIT) {
3969 			pages_size += sizeof (struct mode_format) +
3970 			    sizeof (struct mode_geometry) +
3971 			    sizeof (struct mode_control_scsi3);
3972 		} else {
3973 			pages_size += sizeof (struct mode_format) +
3974 			    sizeof (struct mode_geometry);
3975 		}
3976 		break;
3977 
3978 	default:
3979 		/* unsupported pages */
3980 		unsupport_page = 1;
3981 	}
3982 
3983 	/* allocate buffer to fill the send data */
3984 	sdata_size += pages_size;
3985 	sense_data = kmem_zalloc(sdata_size, KM_SLEEP);
3986 
3987 	if (cdbp->scc_cmd == SCMD_MODE_SENSE) {
3988 		headerp = (struct mode_header *)sense_data;
3989 		headerp->length = MODE_HEADER_LENGTH + pages_size -
3990 		    sizeof (headerp->length);
3991 		headerp->bdesc_length = 0;
3992 		next_page = sense_data + sizeof (struct mode_header);
3993 	} else {
3994 		g1_headerp = (void *)sense_data;
3995 		g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size -
3996 		    sizeof (g1_headerp->length));
3997 		g1_headerp->bdesc_length = 0;
3998 		next_page = sense_data + sizeof (struct mode_header_g1);
3999 	}
4000 
4001 	if (unsupport_page)
4002 		goto finish;
4003 
4004 	if (pagecode == SD_MODE_SENSE_PAGE3_CODE ||
4005 	    pagecode == MODEPAGE_ALLPAGES) {
4006 		/* SBC-3 7.1.3.3 Format device page */
4007 		struct mode_format *page3p;
4008 
4009 		page3p = (void *)next_page;
4010 		page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE;
4011 		page3p->mode_page.length = sizeof (struct mode_format);
4012 		page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE);
4013 		page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK);
4014 
4015 		next_page += sizeof (struct mode_format);
4016 	}
4017 
4018 	if (pagecode == SD_MODE_SENSE_PAGE4_CODE ||
4019 	    pagecode == MODEPAGE_ALLPAGES) {
4020 		/* SBC-3 7.1.3.8 Rigid disk device geometry page */
4021 		struct mode_geometry *page4p;
4022 
4023 		page4p = (void *)next_page;
4024 		page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE;
4025 		page4p->mode_page.length = sizeof (struct mode_geometry);
4026 		page4p->heads = AAC_NUMBER_OF_HEADS;
4027 		page4p->rpm = BE_16(AAC_ROTATION_SPEED);
4028 		ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK);
4029 		page4p->cyl_lb = ncyl & 0xff;
4030 		page4p->cyl_mb = (ncyl >> 8) & 0xff;
4031 		page4p->cyl_ub = (ncyl >> 16) & 0xff;
4032 
4033 		next_page += sizeof (struct mode_geometry);
4034 	}
4035 
4036 	if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) &&
4037 	    softs->flags & AAC_FLAGS_LBA_64BIT) {
4038 		/* 64-bit LBA need large sense data */
4039 		struct mode_control_scsi3 *mctl;
4040 
4041 		mctl = (void *)next_page;
4042 		mctl->mode_page.code = MODEPAGE_CTRL_MODE;
4043 		mctl->mode_page.length =
4044 		    sizeof (struct mode_control_scsi3) -
4045 		    sizeof (struct mode_page);
4046 		mctl->d_sense = 1;
4047 	}
4048 
4049 finish:
4050 	/* copyout the valid data. */
4051 	bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount));
4052 	kmem_free(sense_data, sdata_size);
4053 }
4054 
4055 static int
4056 aac_name_node(dev_info_t *dip, char *name, int len)
4057 {
4058 	int tgt, lun;
4059 
4060 	tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4061 	    DDI_PROP_DONTPASS, "target", -1);
4062 	if (tgt == -1)
4063 		return (DDI_FAILURE);
4064 	lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4065 	    DDI_PROP_DONTPASS, "lun", -1);
4066 	if (lun == -1)
4067 		return (DDI_FAILURE);
4068 
4069 	(void) snprintf(name, len, "%x,%x", tgt, lun);
4070 	return (DDI_SUCCESS);
4071 }
4072 
4073 /*ARGSUSED*/
4074 static int
4075 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
4076     scsi_hba_tran_t *tran, struct scsi_device *sd)
4077 {
4078 	struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
4079 #if defined(DEBUG) || defined(__lock_lint)
4080 	int ctl = ddi_get_instance(softs->devinfo_p);
4081 #endif
4082 	uint16_t tgt = sd->sd_address.a_target;
4083 	uint8_t lun = sd->sd_address.a_lun;
4084 	struct aac_device *dvp;
4085 
4086 	DBCALLED(softs, 2);
4087 
4088 	if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
4089 		/*
4090 		 * If no persistent node exist, we don't allow .conf node
4091 		 * to be created.
4092 		 */
4093 		if (aac_find_child(softs, tgt, lun) != NULL) {
4094 			if (ndi_merge_node(tgt_dip, aac_name_node) !=
4095 			    DDI_SUCCESS)
4096 				/* Create this .conf node */
4097 				return (DDI_SUCCESS);
4098 		}
4099 		return (DDI_FAILURE);
4100 	}
4101 
4102 	/*
4103 	 * Only support container/phys. device that has been
4104 	 * detected and valid
4105 	 */
4106 	mutex_enter(&softs->io_lock);
4107 	if (tgt >= AAC_MAX_DEV(softs)) {
4108 		AACDB_PRINT_TRAN(softs,
4109 		    "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun);
4110 		mutex_exit(&softs->io_lock);
4111 		return (DDI_FAILURE);
4112 	}
4113 
4114 	if (tgt < AAC_MAX_LD) {
4115 		dvp = (struct aac_device *)&softs->containers[tgt];
4116 		if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) {
4117 			AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d",
4118 			    ctl, tgt, lun);
4119 			mutex_exit(&softs->io_lock);
4120 			return (DDI_FAILURE);
4121 		}
4122 		/*
4123 		 * Save the tgt_dip for the given target if one doesn't exist
4124 		 * already. Dip's for non-existance tgt's will be cleared in
4125 		 * tgt_free.
4126 		 */
4127 		if (softs->containers[tgt].dev.dip == NULL &&
4128 		    strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0)
4129 			softs->containers[tgt].dev.dip = tgt_dip;
4130 	} else {
4131 		dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)];
4132 	}
4133 
4134 	if (softs->flags & AAC_FLAGS_BRKUP) {
4135 		if (ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
4136 		    "buf_break", 1) != DDI_PROP_SUCCESS) {
4137 			cmn_err(CE_CONT, "unable to create "
4138 			    "property for t%dL%d (buf_break)", tgt, lun);
4139 		}
4140 	}
4141 
4142 	AACDB_PRINT(softs, CE_NOTE,
4143 	    "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun,
4144 	    (dvp->type == AAC_DEV_PD) ? "pd" : "ld");
4145 	mutex_exit(&softs->io_lock);
4146 	return (DDI_SUCCESS);
4147 }
4148 
4149 static void
4150 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
4151     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
4152 {
4153 #ifndef __lock_lint
4154 	_NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran))
4155 #endif
4156 
4157 	struct aac_softstate *softs = SD2AAC(sd);
4158 	int tgt = sd->sd_address.a_target;
4159 
4160 	mutex_enter(&softs->io_lock);
4161 	if (tgt < AAC_MAX_LD) {
4162 		if (softs->containers[tgt].dev.dip == tgt_dip)
4163 			softs->containers[tgt].dev.dip = NULL;
4164 	} else {
4165 		softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID;
4166 	}
4167 	mutex_exit(&softs->io_lock);
4168 }
4169 
4170 /*
4171  * Check if the firmware is Up And Running. If it is in the Kernel Panic
4172  * state, (BlinkLED code + 1) is returned.
4173  *    0 -- firmware up and running
4174  *   -1 -- firmware dead
4175  *   >0 -- firmware kernel panic
4176  */
4177 static int
4178 aac_check_adapter_health(struct aac_softstate *softs)
4179 {
4180 	int rval;
4181 
4182 	rval = PCI_MEM_GET32(softs, AAC_OMR0);
4183 
4184 	if (rval & AAC_KERNEL_UP_AND_RUNNING) {
4185 		rval = 0;
4186 	} else if (rval & AAC_KERNEL_PANIC) {
4187 		cmn_err(CE_WARN, "firmware panic");
4188 		rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */
4189 	} else {
4190 		cmn_err(CE_WARN, "firmware dead");
4191 		rval = -1;
4192 	}
4193 	return (rval);
4194 }
4195 
4196 static void
4197 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp,
4198     uchar_t reason)
4199 {
4200 	acp->flags |= AAC_CMD_ABORT;
4201 
4202 	if (acp->pkt) {
4203 		if (acp->slotp) { /* outstanding cmd */
4204 			acp->pkt->pkt_state |= STATE_GOT_STATUS;
4205 		}
4206 
4207 		switch (reason) {
4208 		case CMD_TIMEOUT:
4209 			AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p",
4210 			    acp);
4211 			aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
4212 			    STAT_TIMEOUT | STAT_BUS_RESET);
4213 			break;
4214 		case CMD_RESET:
4215 			/* aac support only RESET_ALL */
4216 			AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp);
4217 			aac_set_pkt_reason(softs, acp, CMD_RESET,
4218 			    STAT_BUS_RESET);
4219 			break;
4220 		case CMD_ABORTED:
4221 			AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p",
4222 			    acp);
4223 			aac_set_pkt_reason(softs, acp, CMD_ABORTED,
4224 			    STAT_ABORTED);
4225 			break;
4226 		}
4227 	}
4228 	aac_end_io(softs, acp);
4229 }
4230 
4231 /*
4232  * Abort all the pending commands of type iocmd or just the command pkt
4233  * corresponding to pkt
4234  */
4235 static void
4236 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt,
4237     int reason)
4238 {
4239 	struct aac_cmd *ac_arg, *acp;
4240 	int i;
4241 
4242 	if (pkt == NULL) {
4243 		ac_arg = NULL;
4244 	} else {
4245 		ac_arg = PKT2AC(pkt);
4246 		iocmd = (ac_arg->flags & AAC_CMD_SYNC) ?
4247 		    AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC;
4248 	}
4249 
4250 	/*
4251 	 * a) outstanding commands on the controller
4252 	 * Note: should abort outstanding commands only after one
4253 	 * IOP reset has been done.
4254 	 */
4255 	if (iocmd & AAC_IOCMD_OUTSTANDING) {
4256 		struct aac_cmd *acp;
4257 
4258 		for (i = 0; i < AAC_MAX_LD; i++) {
4259 			if (AAC_DEV_IS_VALID(&softs->containers[i].dev))
4260 				softs->containers[i].reset = 1;
4261 		}
4262 		while ((acp = softs->q_busy.q_head) != NULL)
4263 			aac_abort_iocmd(softs, acp, reason);
4264 	}
4265 
4266 	/* b) commands in the waiting queues */
4267 	for (i = 0; i < AAC_CMDQ_NUM; i++) {
4268 		if (iocmd & (1 << i)) {
4269 			if (ac_arg) {
4270 				aac_abort_iocmd(softs, ac_arg, reason);
4271 			} else {
4272 				while ((acp = softs->q_wait[i].q_head) != NULL)
4273 					aac_abort_iocmd(softs, acp, reason);
4274 			}
4275 		}
4276 	}
4277 }
4278 
4279 /*
4280  * The draining thread is shared among quiesce threads. It terminates
4281  * when the adapter is quiesced or stopped by aac_stop_drain().
4282  */
4283 static void
4284 aac_check_drain(void *arg)
4285 {
4286 	struct aac_softstate *softs = arg;
4287 
4288 	mutex_enter(&softs->io_lock);
4289 	if (softs->ndrains) {
4290 		softs->drain_timeid = 0;
4291 		/*
4292 		 * If both ASYNC and SYNC bus throttle are held,
4293 		 * wake up threads only when both are drained out.
4294 		 */
4295 		if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 ||
4296 		    softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) &&
4297 		    (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 ||
4298 		    softs->bus_ncmds[AAC_CMDQ_SYNC] == 0))
4299 			cv_broadcast(&softs->drain_cv);
4300 		else
4301 			softs->drain_timeid = timeout(aac_check_drain, softs,
4302 			    AAC_QUIESCE_TICK * drv_usectohz(1000000));
4303 	}
4304 	mutex_exit(&softs->io_lock);
4305 }
4306 
4307 /*
4308  * If not draining the outstanding cmds, drain them. Otherwise,
4309  * only update ndrains.
4310  */
4311 static void
4312 aac_start_drain(struct aac_softstate *softs)
4313 {
4314 	if (softs->ndrains == 0) {
4315 		ASSERT(softs->drain_timeid == 0);
4316 		softs->drain_timeid = timeout(aac_check_drain, softs,
4317 		    AAC_QUIESCE_TICK * drv_usectohz(1000000));
4318 	}
4319 	softs->ndrains++;
4320 }
4321 
4322 /*
4323  * Stop the draining thread when no other threads use it any longer.
4324  * Side effect: io_lock may be released in the middle.
4325  */
4326 static void
4327 aac_stop_drain(struct aac_softstate *softs)
4328 {
4329 	softs->ndrains--;
4330 	if (softs->ndrains == 0) {
4331 		if (softs->drain_timeid != 0) {
4332 			timeout_id_t tid = softs->drain_timeid;
4333 
4334 			softs->drain_timeid = 0;
4335 			mutex_exit(&softs->io_lock);
4336 			(void) untimeout(tid);
4337 			mutex_enter(&softs->io_lock);
4338 		}
4339 	}
4340 }
4341 
4342 /*
4343  * The following function comes from Adaptec:
4344  *
4345  * Once do an IOP reset, basically the driver have to re-initialize the card
4346  * as if up from a cold boot, and the driver is responsible for any IO that
4347  * is outstanding to the adapter at the time of the IOP RESET. And prepare
4348  * for IOP RESET by making the init code modular with the ability to call it
4349  * from multiple places.
4350  */
4351 static int
4352 aac_reset_adapter(struct aac_softstate *softs)
4353 {
4354 	int health;
4355 	uint32_t status;
4356 	int rval = AAC_IOP_RESET_FAILED;
4357 
4358 	DBCALLED(softs, 1);
4359 
4360 	ASSERT(softs->state & AAC_STATE_RESET);
4361 
4362 	ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0);
4363 	/* Disable interrupt */
4364 	AAC_DISABLE_INTR(softs);
4365 
4366 	health = aac_check_adapter_health(softs);
4367 	if (health == -1) {
4368 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
4369 		goto finish;
4370 	}
4371 	if (health == 0) /* flush drives if possible */
4372 		(void) aac_shutdown(softs);
4373 
4374 	/* Execute IOP reset */
4375 	if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0,
4376 	    &status)) != AACOK) {
4377 		ddi_acc_handle_t acc;
4378 		struct aac_fib *fibp;
4379 		struct aac_pause_command *pc;
4380 
4381 		if ((status & 0xf) == 0xf) {
4382 			uint32_t wait_count;
4383 
4384 			/*
4385 			 * Sunrise Lake has dual cores and we must drag the
4386 			 * other core with us to reset simultaneously. There
4387 			 * are 2 bits in the Inbound Reset Control and Status
4388 			 * Register (offset 0x38) of the Sunrise Lake to reset
4389 			 * the chip without clearing out the PCI configuration
4390 			 * info (COMMAND & BARS).
4391 			 */
4392 			PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST);
4393 
4394 			/*
4395 			 * We need to wait for 5 seconds before accessing the MU
4396 			 * again 10000 * 100us = 1000,000us = 1000ms = 1s
4397 			 */
4398 			wait_count = 5 * 10000;
4399 			while (wait_count) {
4400 				drv_usecwait(100); /* delay 100 microseconds */
4401 				wait_count--;
4402 			}
4403 		} else {
4404 			if (status == SRB_STATUS_INVALID_REQUEST)
4405 				cmn_err(CE_WARN, "!IOP_RESET not supported");
4406 			else /* probably timeout */
4407 				cmn_err(CE_WARN, "!IOP_RESET failed");
4408 
4409 			/* Unwind aac_shutdown() */
4410 			(void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
4411 			acc = softs->sync_ac.slotp->fib_acc_handle;
4412 
4413 			fibp = softs->sync_ac.slotp->fibp;
4414 			pc = (struct aac_pause_command *)&fibp->data[0];
4415 
4416 			bzero(pc, sizeof (*pc));
4417 			ddi_put32(acc, &pc->Command, VM_ContainerConfig);
4418 			ddi_put32(acc, &pc->Type, CT_PAUSE_IO);
4419 			ddi_put32(acc, &pc->Timeout, 1);
4420 			ddi_put32(acc, &pc->Min, 1);
4421 			ddi_put32(acc, &pc->NoRescan, 1);
4422 
4423 			(void) aac_sync_fib(softs, ContainerCommand,
4424 			    AAC_FIB_SIZEOF(struct aac_pause_command));
4425 			aac_sync_fib_slot_release(softs, &softs->sync_ac);
4426 
4427 			if (aac_check_adapter_health(softs) != 0)
4428 				ddi_fm_service_impact(softs->devinfo_p,
4429 				    DDI_SERVICE_LOST);
4430 			else
4431 				/*
4432 				 * IOP reset not supported or IOP not reseted
4433 				 */
4434 				rval = AAC_IOP_RESET_ABNORMAL;
4435 			goto finish;
4436 		}
4437 	}
4438 
4439 	/*
4440 	 * Re-read and renegotiate the FIB parameters, as one of the actions
4441 	 * that can result from an IOP reset is the running of a new firmware
4442 	 * image.
4443 	 */
4444 	if (aac_common_attach(softs) != AACOK)
4445 		goto finish;
4446 
4447 	rval = AAC_IOP_RESET_SUCCEED;
4448 
4449 finish:
4450 	AAC_ENABLE_INTR(softs);
4451 	return (rval);
4452 }
4453 
4454 static void
4455 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q,
4456     int throttle)
4457 {
4458 	/*
4459 	 * If the bus is draining/quiesced, no changes to the throttles
4460 	 * are allowed. All throttles should have been set to 0.
4461 	 */
4462 	if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains)
4463 		return;
4464 	dvp->throttle[q] = throttle;
4465 }
4466 
4467 static void
4468 aac_hold_bus(struct aac_softstate *softs, int iocmds)
4469 {
4470 	int i, q;
4471 
4472 	/* Hold bus by holding every device on the bus */
4473 	for (q = 0; q < AAC_CMDQ_NUM; q++) {
4474 		if (iocmds & (1 << q)) {
4475 			softs->bus_throttle[q] = 0;
4476 			for (i = 0; i < AAC_MAX_LD; i++)
4477 				aac_set_throttle(softs,
4478 				    &softs->containers[i].dev, q, 0);
4479 			for (i = 0; i < AAC_MAX_PD(softs); i++)
4480 				aac_set_throttle(softs,
4481 				    &softs->nondasds[i].dev, q, 0);
4482 		}
4483 	}
4484 }
4485 
4486 static void
4487 aac_unhold_bus(struct aac_softstate *softs, int iocmds)
4488 {
4489 	int i, q, max_throttle;
4490 
4491 	for (q = 0; q < AAC_CMDQ_NUM; q++) {
4492 		if (iocmds & (1 << q)) {
4493 			/*
4494 			 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been
4495 			 * quiesced or being drained by possibly some quiesce
4496 			 * threads.
4497 			 */
4498 			if (q == AAC_CMDQ_ASYNC && ((softs->state &
4499 			    AAC_STATE_QUIESCED) || softs->ndrains))
4500 				continue;
4501 			if (q == AAC_CMDQ_ASYNC)
4502 				max_throttle = softs->total_slots -
4503 				    AAC_MGT_SLOT_NUM;
4504 			else
4505 				max_throttle = softs->total_slots - 1;
4506 			softs->bus_throttle[q] = max_throttle;
4507 			for (i = 0; i < AAC_MAX_LD; i++)
4508 				aac_set_throttle(softs,
4509 				    &softs->containers[i].dev,
4510 				    q, max_throttle);
4511 			for (i = 0; i < AAC_MAX_PD(softs); i++)
4512 				aac_set_throttle(softs, &softs->nondasds[i].dev,
4513 				    q, max_throttle);
4514 		}
4515 	}
4516 }
4517 
4518 static int
4519 aac_do_reset(struct aac_softstate *softs)
4520 {
4521 	int health;
4522 	int rval;
4523 
4524 	softs->state |= AAC_STATE_RESET;
4525 	health = aac_check_adapter_health(softs);
4526 
4527 	/*
4528 	 * Hold off new io commands and wait all outstanding io
4529 	 * commands to complete.
4530 	 */
4531 	if (health == 0) {
4532 		int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC];
4533 		int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC];
4534 
4535 		if (sync_cmds == 0 && async_cmds == 0) {
4536 			rval = AAC_IOP_RESET_SUCCEED;
4537 			goto finish;
4538 		}
4539 		/*
4540 		 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds
4541 		 * to complete the outstanding io commands
4542 		 */
4543 		int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10;
4544 		int (*intr_handler)(struct aac_softstate *);
4545 
4546 		aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4547 		/*
4548 		 * Poll the adapter by ourselves in case interrupt is disabled
4549 		 * and to avoid releasing the io_lock.
4550 		 */
4551 		intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
4552 		    aac_process_intr_new : aac_process_intr_old;
4553 		while ((softs->bus_ncmds[AAC_CMDQ_SYNC] ||
4554 		    softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) {
4555 			drv_usecwait(100);
4556 			(void) intr_handler(softs);
4557 			timeout--;
4558 		}
4559 		aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4560 
4561 		if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 &&
4562 		    softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) {
4563 			/* Cmds drained out */
4564 			rval = AAC_IOP_RESET_SUCCEED;
4565 			goto finish;
4566 		} else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds ||
4567 		    softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) {
4568 			/* Cmds not drained out, adapter overloaded */
4569 			rval = AAC_IOP_RESET_ABNORMAL;
4570 			goto finish;
4571 		}
4572 	}
4573 
4574 	/*
4575 	 * If a longer waiting time still can't drain any outstanding io
4576 	 * commands, do IOP reset.
4577 	 */
4578 	if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED)
4579 		softs->state |= AAC_STATE_DEAD;
4580 
4581 finish:
4582 	softs->state &= ~AAC_STATE_RESET;
4583 	return (rval);
4584 }
4585 
4586 static int
4587 aac_tran_reset(struct scsi_address *ap, int level)
4588 {
4589 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4590 	int rval;
4591 
4592 	DBCALLED(softs, 1);
4593 
4594 	if (level != RESET_ALL) {
4595 		cmn_err(CE_NOTE, "!reset target/lun not supported");
4596 		return (0);
4597 	}
4598 
4599 	mutex_enter(&softs->io_lock);
4600 	switch (rval = aac_do_reset(softs)) {
4601 	case AAC_IOP_RESET_SUCCEED:
4602 		aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC,
4603 		    NULL, CMD_RESET);
4604 		aac_start_waiting_io(softs);
4605 		break;
4606 	case AAC_IOP_RESET_FAILED:
4607 		/* Abort IOCTL cmds when adapter is dead */
4608 		aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET);
4609 		break;
4610 	case AAC_IOP_RESET_ABNORMAL:
4611 		aac_start_waiting_io(softs);
4612 	}
4613 	mutex_exit(&softs->io_lock);
4614 
4615 	aac_drain_comp_q(softs);
4616 	return (rval == 0);
4617 }
4618 
4619 static int
4620 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4621 {
4622 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4623 
4624 	DBCALLED(softs, 1);
4625 
4626 	mutex_enter(&softs->io_lock);
4627 	aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED);
4628 	mutex_exit(&softs->io_lock);
4629 
4630 	aac_drain_comp_q(softs);
4631 	return (1);
4632 }
4633 
4634 void
4635 aac_free_dmamap(struct aac_cmd *acp)
4636 {
4637 	/* Free dma mapping */
4638 	if (acp->flags & AAC_CMD_DMA_VALID) {
4639 		ASSERT(acp->buf_dma_handle);
4640 		(void) ddi_dma_unbind_handle(acp->buf_dma_handle);
4641 		acp->flags &= ~AAC_CMD_DMA_VALID;
4642 	}
4643 
4644 	if (acp->abp != NULL) { /* free non-aligned buf DMA */
4645 		ASSERT(acp->buf_dma_handle);
4646 		if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp)
4647 			ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr,
4648 			    (uint8_t *)acp->abp, acp->bp->b_bcount,
4649 			    DDI_DEV_AUTOINCR);
4650 		ddi_dma_mem_free(&acp->abh);
4651 		acp->abp = NULL;
4652 	}
4653 
4654 	if (acp->buf_dma_handle) {
4655 		ddi_dma_free_handle(&acp->buf_dma_handle);
4656 		acp->buf_dma_handle = NULL;
4657 	}
4658 }
4659 
4660 static void
4661 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
4662 {
4663 	AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported",
4664 	    ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd);
4665 	aac_free_dmamap(acp);
4666 	aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0);
4667 	aac_soft_callback(softs, acp);
4668 }
4669 
4670 /*
4671  * Handle command to logical device
4672  */
4673 static int
4674 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp)
4675 {
4676 	struct aac_container *dvp;
4677 	struct scsi_pkt *pkt;
4678 	union scsi_cdb *cdbp;
4679 	struct buf *bp;
4680 	int rval;
4681 
4682 	dvp = (struct aac_container *)acp->dvp;
4683 	pkt = acp->pkt;
4684 	cdbp = (void *)pkt->pkt_cdbp;
4685 	bp = acp->bp;
4686 
4687 	switch (cdbp->scc_cmd) {
4688 	case SCMD_INQUIRY: /* inquiry */
4689 		aac_free_dmamap(acp);
4690 		aac_inquiry(softs, pkt, cdbp, bp);
4691 		aac_soft_callback(softs, acp);
4692 		rval = TRAN_ACCEPT;
4693 		break;
4694 
4695 	case SCMD_READ_CAPACITY: /* read capacity */
4696 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
4697 			struct scsi_capacity cap;
4698 			uint64_t last_lba;
4699 
4700 			/* check 64-bit LBA */
4701 			last_lba = dvp->size - 1;
4702 			if (last_lba > 0xffffffffull) {
4703 				cap.capacity = 0xfffffffful;
4704 			} else {
4705 				cap.capacity = BE_32(last_lba);
4706 			}
4707 			cap.lbasize = BE_32(AAC_SECTOR_SIZE);
4708 
4709 			aac_free_dmamap(acp);
4710 			if (bp->b_flags & (B_PHYS|B_PAGEIO))
4711 				bp_mapin(bp);
4712 			bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8));
4713 			pkt->pkt_state |= STATE_XFERRED_DATA;
4714 		}
4715 		aac_soft_callback(softs, acp);
4716 		rval = TRAN_ACCEPT;
4717 		break;
4718 
4719 	case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */
4720 		/* Check if containers need 64-bit LBA support */
4721 		if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) {
4722 			if (bp && bp->b_un.b_addr && bp->b_bcount) {
4723 				struct scsi_capacity_16 cap16;
4724 				int cap_len = sizeof (struct scsi_capacity_16);
4725 
4726 				bzero(&cap16, cap_len);
4727 				cap16.sc_capacity = BE_64(dvp->size - 1);
4728 				cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE);
4729 
4730 				aac_free_dmamap(acp);
4731 				if (bp->b_flags & (B_PHYS | B_PAGEIO))
4732 					bp_mapin(bp);
4733 				bcopy(&cap16, bp->b_un.b_addr,
4734 				    min(bp->b_bcount, cap_len));
4735 				pkt->pkt_state |= STATE_XFERRED_DATA;
4736 			}
4737 			aac_soft_callback(softs, acp);
4738 		} else {
4739 			aac_unknown_scmd(softs, acp);
4740 		}
4741 		rval = TRAN_ACCEPT;
4742 		break;
4743 
4744 	case SCMD_READ_G4: /* read_16 */
4745 	case SCMD_WRITE_G4: /* write_16 */
4746 		if (softs->flags & AAC_FLAGS_RAW_IO) {
4747 			/* NOTE: GETG4ADDRTL(cdbp) is int32_t */
4748 			acp->blkno = ((uint64_t) \
4749 			    GETG4ADDR(cdbp) << 32) | \
4750 			    (uint32_t)GETG4ADDRTL(cdbp);
4751 			goto do_io;
4752 		}
4753 		AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported");
4754 		aac_unknown_scmd(softs, acp);
4755 		rval = TRAN_ACCEPT;
4756 		break;
4757 
4758 	case SCMD_READ: /* read_6 */
4759 	case SCMD_WRITE: /* write_6 */
4760 		acp->blkno = GETG0ADDR(cdbp);
4761 		goto do_io;
4762 
4763 	case SCMD_READ_G5: /* read_12 */
4764 	case SCMD_WRITE_G5: /* write_12 */
4765 		acp->blkno = GETG5ADDR(cdbp);
4766 		goto do_io;
4767 
4768 	case SCMD_READ_G1: /* read_10 */
4769 	case SCMD_WRITE_G1: /* write_10 */
4770 		acp->blkno = (uint32_t)GETG1ADDR(cdbp);
4771 do_io:
4772 		if (acp->flags & AAC_CMD_DMA_VALID) {
4773 			uint64_t cnt_size = dvp->size;
4774 
4775 			/*
4776 			 * If LBA > array size AND rawio, the
4777 			 * adapter may hang. So check it before
4778 			 * sending.
4779 			 * NOTE: (blkno + blkcnt) may overflow
4780 			 */
4781 			if ((acp->blkno < cnt_size) &&
4782 			    ((acp->blkno + acp->bcount /
4783 			    AAC_BLK_SIZE) <= cnt_size)) {
4784 				rval = aac_do_io(softs, acp);
4785 			} else {
4786 			/*
4787 			 * Request exceeds the capacity of disk,
4788 			 * set error block number to last LBA
4789 			 * + 1.
4790 			 */
4791 				aac_set_arq_data(pkt,
4792 				    KEY_ILLEGAL_REQUEST, 0x21,
4793 				    0x00, cnt_size);
4794 				aac_soft_callback(softs, acp);
4795 				rval = TRAN_ACCEPT;
4796 			}
4797 		} else if (acp->bcount == 0) {
4798 			/* For 0 length IO, just return ok */
4799 			aac_soft_callback(softs, acp);
4800 			rval = TRAN_ACCEPT;
4801 		} else {
4802 			rval = TRAN_BADPKT;
4803 		}
4804 		break;
4805 
4806 	case SCMD_MODE_SENSE: /* mode_sense_6 */
4807 	case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */
4808 		int capacity;
4809 
4810 		aac_free_dmamap(acp);
4811 		if (dvp->size > 0xffffffffull)
4812 			capacity = 0xfffffffful; /* 64-bit LBA */
4813 		else
4814 			capacity = dvp->size;
4815 		aac_mode_sense(softs, pkt, cdbp, bp, capacity);
4816 		aac_soft_callback(softs, acp);
4817 		rval = TRAN_ACCEPT;
4818 		break;
4819 	}
4820 
4821 	case SCMD_START_STOP:
4822 		if (softs->support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
4823 			acp->aac_cmd_fib = aac_cmd_fib_startstop;
4824 			acp->ac_comp = aac_startstop_complete;
4825 			rval = aac_do_io(softs, acp);
4826 			break;
4827 		}
4828 	/* FALLTHRU */
4829 	case SCMD_TEST_UNIT_READY:
4830 	case SCMD_REQUEST_SENSE:
4831 	case SCMD_FORMAT:
4832 		aac_free_dmamap(acp);
4833 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
4834 			if (acp->flags & AAC_CMD_BUF_READ) {
4835 				if (bp->b_flags & (B_PHYS|B_PAGEIO))
4836 					bp_mapin(bp);
4837 				bzero(bp->b_un.b_addr, bp->b_bcount);
4838 			}
4839 			pkt->pkt_state |= STATE_XFERRED_DATA;
4840 		}
4841 		aac_soft_callback(softs, acp);
4842 		rval = TRAN_ACCEPT;
4843 		break;
4844 
4845 	case SCMD_SYNCHRONIZE_CACHE:
4846 		acp->flags |= AAC_CMD_NTAG;
4847 		acp->aac_cmd_fib = aac_cmd_fib_sync;
4848 		acp->ac_comp = aac_synccache_complete;
4849 		rval = aac_do_io(softs, acp);
4850 		break;
4851 
4852 	case SCMD_DOORLOCK:
4853 		aac_free_dmamap(acp);
4854 		dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0;
4855 		aac_soft_callback(softs, acp);
4856 		rval = TRAN_ACCEPT;
4857 		break;
4858 
4859 	default: /* unknown command */
4860 		aac_unknown_scmd(softs, acp);
4861 		rval = TRAN_ACCEPT;
4862 		break;
4863 	}
4864 
4865 	return (rval);
4866 }
4867 
4868 static int
4869 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
4870 {
4871 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4872 	struct aac_cmd *acp = PKT2AC(pkt);
4873 	struct aac_device *dvp = acp->dvp;
4874 	int rval;
4875 
4876 	DBCALLED(softs, 2);
4877 
4878 	/*
4879 	 * Reinitialize some fields of ac and pkt; the packet may
4880 	 * have been resubmitted
4881 	 */
4882 	acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \
4883 	    AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID;
4884 	acp->timeout = acp->pkt->pkt_time;
4885 	if (pkt->pkt_flags & FLAG_NOINTR)
4886 		acp->flags |= AAC_CMD_NO_INTR;
4887 #ifdef DEBUG
4888 	acp->fib_flags = AACDB_FLAGS_FIB_SCMD;
4889 #endif
4890 	pkt->pkt_reason = CMD_CMPLT;
4891 	pkt->pkt_state = 0;
4892 	pkt->pkt_statistics = 0;
4893 	*pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
4894 
4895 	if (acp->flags & AAC_CMD_DMA_VALID) {
4896 		pkt->pkt_resid = acp->bcount;
4897 		/* Consistent packets need to be sync'ed first */
4898 		if ((acp->flags & AAC_CMD_CONSISTENT) &&
4899 		    (acp->flags & AAC_CMD_BUF_WRITE))
4900 			if (aac_dma_sync_ac(acp) != AACOK) {
4901 				ddi_fm_service_impact(softs->devinfo_p,
4902 				    DDI_SERVICE_UNAFFECTED);
4903 				return (TRAN_BADPKT);
4904 			}
4905 	} else {
4906 		pkt->pkt_resid = 0;
4907 	}
4908 
4909 	mutex_enter(&softs->io_lock);
4910 	AACDB_PRINT_SCMD(softs, acp);
4911 	if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) &&
4912 	    !(softs->state & AAC_STATE_DEAD)) {
4913 		if (dvp->type == AAC_DEV_LD) {
4914 			if (ap->a_lun == 0)
4915 				rval = aac_tran_start_ld(softs, acp);
4916 			else
4917 				goto error;
4918 		} else {
4919 			rval = aac_do_io(softs, acp);
4920 		}
4921 	} else {
4922 error:
4923 #ifdef DEBUG
4924 		if (!(softs->state & AAC_STATE_DEAD)) {
4925 			AACDB_PRINT_TRAN(softs,
4926 			    "Cannot send cmd to target t%dL%d: %s",
4927 			    ap->a_target, ap->a_lun,
4928 			    "target invalid");
4929 		} else {
4930 			AACDB_PRINT(softs, CE_WARN,
4931 			    "Cannot send cmd to target t%dL%d: %s",
4932 			    ap->a_target, ap->a_lun,
4933 			    "adapter dead");
4934 		}
4935 #endif
4936 		rval = TRAN_FATAL_ERROR;
4937 	}
4938 	mutex_exit(&softs->io_lock);
4939 	return (rval);
4940 }
4941 
4942 static int
4943 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom)
4944 {
4945 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4946 	struct aac_device *dvp;
4947 	int rval;
4948 
4949 	DBCALLED(softs, 2);
4950 
4951 	/* We don't allow inquiring about capabilities for other targets */
4952 	if (cap == NULL || whom == 0) {
4953 		AACDB_PRINT(softs, CE_WARN,
4954 		    "GetCap> %s not supported: whom=%d", cap, whom);
4955 		return (-1);
4956 	}
4957 
4958 	mutex_enter(&softs->io_lock);
4959 	dvp = AAC_DEV(softs, ap->a_target);
4960 	if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
4961 		mutex_exit(&softs->io_lock);
4962 		AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap",
4963 		    ap->a_target, ap->a_lun);
4964 		return (-1);
4965 	}
4966 
4967 	switch (scsi_hba_lookup_capstr(cap)) {
4968 	case SCSI_CAP_ARQ: /* auto request sense */
4969 		rval = 1;
4970 		break;
4971 	case SCSI_CAP_UNTAGGED_QING:
4972 	case SCSI_CAP_TAGGED_QING:
4973 		rval = 1;
4974 		break;
4975 	case SCSI_CAP_DMA_MAX:
4976 		rval = softs->dma_max;
4977 		break;
4978 	default:
4979 		rval = -1;
4980 		break;
4981 	}
4982 	mutex_exit(&softs->io_lock);
4983 
4984 	AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d",
4985 	    cap, ap->a_target, ap->a_lun, rval);
4986 	return (rval);
4987 }
4988 
4989 /*ARGSUSED*/
4990 static int
4991 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
4992 {
4993 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4994 	struct aac_device *dvp;
4995 	int rval;
4996 
4997 	DBCALLED(softs, 2);
4998 
4999 	/* We don't allow inquiring about capabilities for other targets */
5000 	if (cap == NULL || whom == 0) {
5001 		AACDB_PRINT(softs, CE_WARN,
5002 		    "SetCap> %s not supported: whom=%d", cap, whom);
5003 		return (-1);
5004 	}
5005 
5006 	mutex_enter(&softs->io_lock);
5007 	dvp = AAC_DEV(softs, ap->a_target);
5008 	if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
5009 		mutex_exit(&softs->io_lock);
5010 		AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap",
5011 		    ap->a_target, ap->a_lun);
5012 		return (-1);
5013 	}
5014 
5015 	switch (scsi_hba_lookup_capstr(cap)) {
5016 	case SCSI_CAP_ARQ:
5017 		/* Force auto request sense */
5018 		rval = (value == 1) ? 1 : 0;
5019 		break;
5020 	case SCSI_CAP_UNTAGGED_QING:
5021 	case SCSI_CAP_TAGGED_QING:
5022 		rval = (value == 1) ? 1 : 0;
5023 		break;
5024 	default:
5025 		rval = -1;
5026 		break;
5027 	}
5028 	mutex_exit(&softs->io_lock);
5029 
5030 	AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d",
5031 	    cap, ap->a_target, ap->a_lun, value, rval);
5032 	return (rval);
5033 }
5034 
5035 static void
5036 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
5037 {
5038 	struct aac_cmd *acp = PKT2AC(pkt);
5039 
5040 	DBCALLED(NULL, 2);
5041 
5042 	if (acp->sgt) {
5043 		kmem_free(acp->sgt, sizeof (struct aac_sge) * \
5044 		    acp->left_cookien);
5045 	}
5046 	aac_free_dmamap(acp);
5047 	ASSERT(acp->slotp == NULL);
5048 	scsi_hba_pkt_free(ap, pkt);
5049 }
5050 
5051 int
5052 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp,
5053     struct buf *bp, int flags, int (*cb)(), caddr_t arg)
5054 {
5055 	int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
5056 	uint_t oldcookiec;
5057 	int bioerr;
5058 	int rval;
5059 
5060 	oldcookiec = acp->left_cookien;
5061 
5062 	/* Move window to build s/g map */
5063 	if (acp->total_nwin > 0) {
5064 		if (++acp->cur_win < acp->total_nwin) {
5065 			off_t off;
5066 			size_t len;
5067 
5068 			rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win,
5069 			    &off, &len, &acp->cookie, &acp->left_cookien);
5070 			if (rval == DDI_SUCCESS)
5071 				goto get_dma_cookies;
5072 			AACDB_PRINT(softs, CE_WARN,
5073 			    "ddi_dma_getwin() fail %d", rval);
5074 			return (AACERR);
5075 		}
5076 		AACDB_PRINT(softs, CE_WARN, "Nothing to transfer");
5077 		return (AACERR);
5078 	}
5079 
5080 	/* We need to transfer data, so we alloc DMA resources for this pkt */
5081 	if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) {
5082 		uint_t dma_flags = 0;
5083 		struct aac_sge *sge;
5084 
5085 		/*
5086 		 * We will still use this point to fake some
5087 		 * infomation in tran_start
5088 		 */
5089 		acp->bp = bp;
5090 
5091 		/* Set dma flags */
5092 		if (BUF_IS_READ(bp)) {
5093 			dma_flags |= DDI_DMA_READ;
5094 			acp->flags |= AAC_CMD_BUF_READ;
5095 		} else {
5096 			dma_flags |= DDI_DMA_WRITE;
5097 			acp->flags |= AAC_CMD_BUF_WRITE;
5098 		}
5099 		if (flags & PKT_CONSISTENT)
5100 			dma_flags |= DDI_DMA_CONSISTENT;
5101 		if (flags & PKT_DMA_PARTIAL)
5102 			dma_flags |= DDI_DMA_PARTIAL;
5103 
5104 		/* Alloc buf dma handle */
5105 		if (!acp->buf_dma_handle) {
5106 			rval = ddi_dma_alloc_handle(softs->devinfo_p,
5107 			    &softs->buf_dma_attr, cb, arg,
5108 			    &acp->buf_dma_handle);
5109 			if (rval != DDI_SUCCESS) {
5110 				AACDB_PRINT(softs, CE_WARN,
5111 				    "Can't allocate DMA handle, errno=%d",
5112 				    rval);
5113 				goto error_out;
5114 			}
5115 		}
5116 
5117 		/* Bind buf */
5118 		if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) {
5119 			rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle,
5120 			    bp, dma_flags, cb, arg, &acp->cookie,
5121 			    &acp->left_cookien);
5122 		} else {
5123 			size_t bufsz;
5124 
5125 			AACDB_PRINT_TRAN(softs,
5126 			    "non-aligned buffer: addr=0x%p, cnt=%lu",
5127 			    (void *)bp->b_un.b_addr, bp->b_bcount);
5128 			if (bp->b_flags & (B_PAGEIO|B_PHYS))
5129 				bp_mapin(bp);
5130 
5131 			rval = ddi_dma_mem_alloc(acp->buf_dma_handle,
5132 			    AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN),
5133 			    &softs->acc_attr, DDI_DMA_STREAMING,
5134 			    cb, arg, &acp->abp, &bufsz, &acp->abh);
5135 
5136 			if (rval != DDI_SUCCESS) {
5137 				AACDB_PRINT(softs, CE_NOTE,
5138 				    "Cannot alloc DMA to non-aligned buf");
5139 				bioerr = 0;
5140 				goto error_out;
5141 			}
5142 
5143 			if (acp->flags & AAC_CMD_BUF_WRITE)
5144 				ddi_rep_put8(acp->abh,
5145 				    (uint8_t *)bp->b_un.b_addr,
5146 				    (uint8_t *)acp->abp, bp->b_bcount,
5147 				    DDI_DEV_AUTOINCR);
5148 
5149 			rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle,
5150 			    NULL, acp->abp, bufsz, dma_flags, cb, arg,
5151 			    &acp->cookie, &acp->left_cookien);
5152 		}
5153 
5154 		switch (rval) {
5155 		case DDI_DMA_PARTIAL_MAP:
5156 			if (ddi_dma_numwin(acp->buf_dma_handle,
5157 			    &acp->total_nwin) == DDI_FAILURE) {
5158 				AACDB_PRINT(softs, CE_WARN,
5159 				    "Cannot get number of DMA windows");
5160 				bioerr = 0;
5161 				goto error_out;
5162 			}
5163 			AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
5164 			    acp->left_cookien);
5165 			acp->cur_win = 0;
5166 			break;
5167 
5168 		case DDI_DMA_MAPPED:
5169 			AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
5170 			    acp->left_cookien);
5171 			acp->cur_win = 0;
5172 			acp->total_nwin = 1;
5173 			break;
5174 
5175 		case DDI_DMA_NORESOURCES:
5176 			bioerr = 0;
5177 			AACDB_PRINT(softs, CE_WARN,
5178 			    "Cannot bind buf for DMA: DDI_DMA_NORESOURCES");
5179 			goto error_out;
5180 		case DDI_DMA_BADATTR:
5181 		case DDI_DMA_NOMAPPING:
5182 			bioerr = EFAULT;
5183 			AACDB_PRINT(softs, CE_WARN,
5184 			    "Cannot bind buf for DMA: DDI_DMA_NOMAPPING");
5185 			goto error_out;
5186 		case DDI_DMA_TOOBIG:
5187 			bioerr = EINVAL;
5188 			AACDB_PRINT(softs, CE_WARN,
5189 			    "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)",
5190 			    bp->b_bcount);
5191 			goto error_out;
5192 		default:
5193 			bioerr = EINVAL;
5194 			AACDB_PRINT(softs, CE_WARN,
5195 			    "Cannot bind buf for DMA: %d", rval);
5196 			goto error_out;
5197 		}
5198 		acp->flags |= AAC_CMD_DMA_VALID;
5199 
5200 get_dma_cookies:
5201 		ASSERT(acp->left_cookien > 0);
5202 		if (acp->left_cookien > softs->aac_sg_tablesize) {
5203 			AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d",
5204 			    acp->left_cookien);
5205 			bioerr = EINVAL;
5206 			goto error_out;
5207 		}
5208 		if (oldcookiec != acp->left_cookien && acp->sgt != NULL) {
5209 			kmem_free(acp->sgt, sizeof (struct aac_sge) * \
5210 			    oldcookiec);
5211 			acp->sgt = NULL;
5212 		}
5213 		if (acp->sgt == NULL) {
5214 			acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \
5215 			    acp->left_cookien, kf);
5216 			if (acp->sgt == NULL) {
5217 				AACDB_PRINT(softs, CE_WARN,
5218 				    "sgt kmem_alloc fail");
5219 				bioerr = ENOMEM;
5220 				goto error_out;
5221 			}
5222 		}
5223 
5224 		sge = &acp->sgt[0];
5225 		sge->bcount = acp->cookie.dmac_size;
5226 		sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
5227 		sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
5228 		acp->bcount = acp->cookie.dmac_size;
5229 		for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) {
5230 			ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie);
5231 			sge->bcount = acp->cookie.dmac_size;
5232 			sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
5233 			sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
5234 			acp->bcount += acp->cookie.dmac_size;
5235 		}
5236 
5237 		/*
5238 		 * Note: The old DMA engine do not correctly handle
5239 		 * dma_attr_maxxfer attribute. So we have to ensure
5240 		 * it by ourself.
5241 		 */
5242 		if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) {
5243 			AACDB_PRINT(softs, CE_NOTE,
5244 			    "large xfer size received %d\n", acp->bcount);
5245 			bioerr = EINVAL;
5246 			goto error_out;
5247 		}
5248 
5249 		acp->total_xfer += acp->bcount;
5250 
5251 		if (acp->pkt) {
5252 			/* Return remaining byte count */
5253 			if (acp->total_xfer <= bp->b_bcount) {
5254 				acp->pkt->pkt_resid = bp->b_bcount - \
5255 				    acp->total_xfer;
5256 			} else {
5257 				/*
5258 				 * Allocated DMA size is greater than the buf
5259 				 * size of bp. This is caused by devices like
5260 				 * tape. we have extra bytes allocated, but
5261 				 * the packet residual has to stay correct.
5262 				 */
5263 				acp->pkt->pkt_resid = 0;
5264 			}
5265 			AACDB_PRINT_TRAN(softs,
5266 			    "bp=0x%p, xfered=%d/%d, resid=%d",
5267 			    (void *)bp->b_un.b_addr, (int)acp->total_xfer,
5268 			    (int)bp->b_bcount, (int)acp->pkt->pkt_resid);
5269 		}
5270 	}
5271 	return (AACOK);
5272 
5273 error_out:
5274 	bioerror(bp, bioerr);
5275 	return (AACERR);
5276 }
5277 
5278 static struct scsi_pkt *
5279 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
5280     struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
5281     int (*callback)(), caddr_t arg)
5282 {
5283 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5284 	struct aac_cmd *acp, *new_acp;
5285 
5286 	DBCALLED(softs, 2);
5287 
5288 	/* Allocate pkt */
5289 	if (pkt == NULL) {
5290 		int slen;
5291 
5292 		/* Force auto request sense */
5293 		slen = (statuslen > softs->slen) ? statuslen : softs->slen;
5294 		pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen,
5295 		    slen, tgtlen, sizeof (struct aac_cmd), callback, arg);
5296 		if (pkt == NULL) {
5297 			AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed");
5298 			return (NULL);
5299 		}
5300 		acp = new_acp = PKT2AC(pkt);
5301 		acp->pkt = pkt;
5302 		acp->cmdlen = cmdlen;
5303 
5304 		if (ap->a_target < AAC_MAX_LD) {
5305 			acp->dvp = &softs->containers[ap->a_target].dev;
5306 			acp->aac_cmd_fib = softs->aac_cmd_fib;
5307 			acp->ac_comp = aac_ld_complete;
5308 		} else {
5309 			_NOTE(ASSUMING_PROTECTED(softs->nondasds))
5310 
5311 			acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev;
5312 			acp->aac_cmd_fib = softs->aac_cmd_fib_scsi;
5313 			acp->ac_comp = aac_pd_complete;
5314 		}
5315 	} else {
5316 		acp = PKT2AC(pkt);
5317 		new_acp = NULL;
5318 	}
5319 
5320 	if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK)
5321 		return (pkt);
5322 
5323 	if (new_acp)
5324 		aac_tran_destroy_pkt(ap, pkt);
5325 	return (NULL);
5326 }
5327 
5328 /*
5329  * tran_sync_pkt(9E) - explicit DMA synchronization
5330  */
5331 /*ARGSUSED*/
5332 static void
5333 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
5334 {
5335 	struct aac_cmd *acp = PKT2AC(pkt);
5336 
5337 	DBCALLED(NULL, 2);
5338 
5339 	if (aac_dma_sync_ac(acp) != AACOK)
5340 		ddi_fm_service_impact(
5341 		    (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p,
5342 		    DDI_SERVICE_UNAFFECTED);
5343 }
5344 
5345 /*
5346  * tran_dmafree(9E) - deallocate DMA resources allocated for command
5347  */
5348 /*ARGSUSED*/
5349 static void
5350 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
5351 {
5352 	struct aac_cmd *acp = PKT2AC(pkt);
5353 
5354 	DBCALLED(NULL, 2);
5355 
5356 	aac_free_dmamap(acp);
5357 }
5358 
5359 static int
5360 aac_do_quiesce(struct aac_softstate *softs)
5361 {
5362 	aac_hold_bus(softs, AAC_IOCMD_ASYNC);
5363 	if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) {
5364 		aac_start_drain(softs);
5365 		do {
5366 			if (cv_wait_sig(&softs->drain_cv,
5367 			    &softs->io_lock) == 0) {
5368 				/* Quiesce has been interrupted */
5369 				aac_stop_drain(softs);
5370 				aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5371 				aac_start_waiting_io(softs);
5372 				return (AACERR);
5373 			}
5374 		} while (softs->bus_ncmds[AAC_CMDQ_ASYNC]);
5375 		aac_stop_drain(softs);
5376 	}
5377 
5378 	softs->state |= AAC_STATE_QUIESCED;
5379 	return (AACOK);
5380 }
5381 
5382 static int
5383 aac_tran_quiesce(dev_info_t *dip)
5384 {
5385 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5386 	int rval;
5387 
5388 	DBCALLED(softs, 1);
5389 
5390 	mutex_enter(&softs->io_lock);
5391 	if (aac_do_quiesce(softs) == AACOK)
5392 		rval = 0;
5393 	else
5394 		rval = 1;
5395 	mutex_exit(&softs->io_lock);
5396 	return (rval);
5397 }
5398 
5399 static int
5400 aac_do_unquiesce(struct aac_softstate *softs)
5401 {
5402 	softs->state &= ~AAC_STATE_QUIESCED;
5403 	aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5404 
5405 	aac_start_waiting_io(softs);
5406 	return (AACOK);
5407 }
5408 
5409 static int
5410 aac_tran_unquiesce(dev_info_t *dip)
5411 {
5412 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5413 	int rval;
5414 
5415 	DBCALLED(softs, 1);
5416 
5417 	mutex_enter(&softs->io_lock);
5418 	if (aac_do_unquiesce(softs) == AACOK)
5419 		rval = 0;
5420 	else
5421 		rval = 1;
5422 	mutex_exit(&softs->io_lock);
5423 	return (rval);
5424 }
5425 
5426 static int
5427 aac_hba_setup(struct aac_softstate *softs)
5428 {
5429 	scsi_hba_tran_t *hba_tran;
5430 	int rval;
5431 
5432 	hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP);
5433 	if (hba_tran == NULL)
5434 		return (AACERR);
5435 	hba_tran->tran_hba_private = softs;
5436 	hba_tran->tran_tgt_init = aac_tran_tgt_init;
5437 	hba_tran->tran_tgt_free = aac_tran_tgt_free;
5438 	hba_tran->tran_tgt_probe = scsi_hba_probe;
5439 	hba_tran->tran_start = aac_tran_start;
5440 	hba_tran->tran_getcap = aac_tran_getcap;
5441 	hba_tran->tran_setcap = aac_tran_setcap;
5442 	hba_tran->tran_init_pkt = aac_tran_init_pkt;
5443 	hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt;
5444 	hba_tran->tran_reset = aac_tran_reset;
5445 	hba_tran->tran_abort = aac_tran_abort;
5446 	hba_tran->tran_sync_pkt = aac_tran_sync_pkt;
5447 	hba_tran->tran_dmafree = aac_tran_dmafree;
5448 	hba_tran->tran_quiesce = aac_tran_quiesce;
5449 	hba_tran->tran_unquiesce = aac_tran_unquiesce;
5450 	hba_tran->tran_bus_config = aac_tran_bus_config;
5451 	rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr,
5452 	    hba_tran, 0);
5453 	if (rval != DDI_SUCCESS) {
5454 		scsi_hba_tran_free(hba_tran);
5455 		AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed");
5456 		return (AACERR);
5457 	}
5458 
5459 	softs->hba_tran = hba_tran;
5460 	return (AACOK);
5461 }
5462 
5463 /*
5464  * FIB setup operations
5465  */
5466 
5467 /*
5468  * Init FIB header
5469  */
5470 static void
5471 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_cmd *acp,
5472     uint16_t cmd)
5473 {
5474 	struct aac_slot *slotp = acp->slotp;
5475 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
5476 	struct aac_fib *fibp = slotp->fibp;
5477 	uint32_t xfer_state;
5478 
5479 	xfer_state =
5480 	    AAC_FIBSTATE_HOSTOWNED |
5481 	    AAC_FIBSTATE_INITIALISED |
5482 	    AAC_FIBSTATE_EMPTY |
5483 	    AAC_FIBSTATE_FROMHOST |
5484 	    AAC_FIBSTATE_REXPECTED |
5485 	    AAC_FIBSTATE_NORM;
5486 
5487 	if (!(acp->flags & AAC_CMD_SYNC)) {
5488 		xfer_state |=
5489 		    AAC_FIBSTATE_ASYNC |
5490 		    AAC_FIBSTATE_FAST_RESPONSE; /* enable fast io */
5491 	}
5492 
5493 	ddi_put32(acc, &fibp->Header.XferState, xfer_state);
5494 	ddi_put16(acc, &fibp->Header.Command, cmd);
5495 	ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB);
5496 	ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */
5497 	ddi_put16(acc, &fibp->Header.Size, acp->fib_size);
5498 	ddi_put16(acc, &fibp->Header.SenderSize, softs->aac_max_fib_size);
5499 	ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2));
5500 	ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5501 	ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */
5502 }
5503 
5504 /*
5505  * Init FIB for raw IO command
5506  */
5507 static void
5508 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp)
5509 {
5510 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5511 	struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0];
5512 	struct aac_sg_entryraw *sgp;
5513 	struct aac_sge *sge;
5514 
5515 	/* Calculate FIB size */
5516 	acp->fib_size = sizeof (struct aac_fib_header) + \
5517 	    sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \
5518 	    sizeof (struct aac_sg_entryraw);
5519 
5520 	aac_cmd_fib_header(softs, acp, RawIo);
5521 
5522 	ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0);
5523 	ddi_put16(acc, &io->BpTotal, 0);
5524 	ddi_put16(acc, &io->BpComplete, 0);
5525 
5526 	ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno));
5527 	ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno));
5528 	ddi_put16(acc, &io->ContainerId,
5529 	    ((struct aac_container *)acp->dvp)->cid);
5530 
5531 	/* Fill SG table */
5532 	ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien);
5533 	ddi_put32(acc, &io->ByteCount, acp->bcount);
5534 
5535 	for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0];
5536 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5537 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5538 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5539 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5540 		sgp->Next = 0;
5541 		sgp->Prev = 0;
5542 		sgp->Flags = 0;
5543 	}
5544 }
5545 
5546 /* Init FIB for 64-bit block IO command */
5547 static void
5548 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp)
5549 {
5550 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5551 	struct aac_blockread64 *br = (struct aac_blockread64 *) \
5552 	    &acp->slotp->fibp->data[0];
5553 	struct aac_sg_entry64 *sgp;
5554 	struct aac_sge *sge;
5555 
5556 	acp->fib_size = sizeof (struct aac_fib_header) + \
5557 	    sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \
5558 	    sizeof (struct aac_sg_entry64);
5559 
5560 	aac_cmd_fib_header(softs, acp, ContainerCommand64);
5561 
5562 	/*
5563 	 * The definitions for aac_blockread64 and aac_blockwrite64
5564 	 * are the same.
5565 	 */
5566 	ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5567 	ddi_put16(acc, &br->ContainerId,
5568 	    ((struct aac_container *)acp->dvp)->cid);
5569 	ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ?
5570 	    VM_CtHostRead64 : VM_CtHostWrite64);
5571 	ddi_put16(acc, &br->Pad, 0);
5572 	ddi_put16(acc, &br->Flags, 0);
5573 
5574 	/* Fill SG table */
5575 	ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien);
5576 	ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE);
5577 
5578 	for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0];
5579 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5580 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5581 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5582 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5583 	}
5584 }
5585 
5586 /* Init FIB for block IO command */
5587 static void
5588 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp)
5589 {
5590 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5591 	struct aac_blockread *br = (struct aac_blockread *) \
5592 	    &acp->slotp->fibp->data[0];
5593 	struct aac_sg_entry *sgp;
5594 	struct aac_sge *sge = &acp->sgt[0];
5595 
5596 	if (acp->flags & AAC_CMD_BUF_READ) {
5597 		acp->fib_size = sizeof (struct aac_fib_header) + \
5598 		    sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \
5599 		    sizeof (struct aac_sg_entry);
5600 
5601 		ddi_put32(acc, &br->Command, VM_CtBlockRead);
5602 		ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien);
5603 		sgp = &br->SgMap.SgEntry[0];
5604 	} else {
5605 		struct aac_blockwrite *bw = (struct aac_blockwrite *)br;
5606 
5607 		acp->fib_size = sizeof (struct aac_fib_header) + \
5608 		    sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \
5609 		    sizeof (struct aac_sg_entry);
5610 
5611 		ddi_put32(acc, &bw->Command, VM_CtBlockWrite);
5612 		ddi_put32(acc, &bw->Stable, CUNSTABLE);
5613 		ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien);
5614 		sgp = &bw->SgMap.SgEntry[0];
5615 	}
5616 	aac_cmd_fib_header(softs, acp, ContainerCommand);
5617 
5618 	/*
5619 	 * aac_blockread and aac_blockwrite have the similar
5620 	 * structure head, so use br for bw here
5621 	 */
5622 	ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5623 	ddi_put32(acc, &br->ContainerId,
5624 	    ((struct aac_container *)acp->dvp)->cid);
5625 	ddi_put32(acc, &br->ByteCount, acp->bcount);
5626 
5627 	/* Fill SG table */
5628 	for (sge = &acp->sgt[0];
5629 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5630 		ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5631 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5632 	}
5633 }
5634 
5635 /*ARGSUSED*/
5636 void
5637 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp)
5638 {
5639 	struct aac_slot *slotp = acp->slotp;
5640 	struct aac_fib *fibp = slotp->fibp;
5641 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
5642 
5643 	ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp,
5644 	    acp->fib_size,   /* only copy data of needed length */
5645 	    DDI_DEV_AUTOINCR);
5646 	ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5647 	ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2);
5648 }
5649 
5650 static void
5651 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp)
5652 {
5653 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5654 	struct aac_synchronize_command *sync =
5655 	    (struct aac_synchronize_command *)&acp->slotp->fibp->data[0];
5656 
5657 	acp->fib_size = AAC_FIB_SIZEOF(struct aac_synchronize_command);
5658 
5659 	aac_cmd_fib_header(softs, acp, ContainerCommand);
5660 	ddi_put32(acc, &sync->Command, VM_ContainerConfig);
5661 	ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE);
5662 	ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid);
5663 	ddi_put32(acc, &sync->Count,
5664 	    sizeof (((struct aac_synchronize_reply *)0)->Data));
5665 }
5666 
5667 /*
5668  * Start/Stop unit (Power Management)
5669  */
5670 static void
5671 aac_cmd_fib_startstop(struct aac_softstate *softs, struct aac_cmd *acp)
5672 {
5673 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5674 	struct aac_Container *cmd =
5675 	    (struct aac_Container *)&acp->slotp->fibp->data[0];
5676 	union scsi_cdb *cdbp = (void *)acp->pkt->pkt_cdbp;
5677 
5678 	acp->fib_size = AAC_FIB_SIZEOF(struct aac_Container);
5679 
5680 	aac_cmd_fib_header(softs, acp, ContainerCommand);
5681 	bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
5682 	ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
5683 	ddi_put32(acc, &cmd->CTCommand.command, CT_PM_DRIVER_SUPPORT);
5684 	ddi_put32(acc, &cmd->CTCommand.param[0], cdbp->cdb_opaque[4] & 1 ? \
5685 	    AAC_PM_DRIVERSUP_START_UNIT : AAC_PM_DRIVERSUP_STOP_UNIT);
5686 	ddi_put32(acc, &cmd->CTCommand.param[1],
5687 	    ((struct aac_container *)acp->dvp)->cid);
5688 	ddi_put32(acc, &cmd->CTCommand.param[2], cdbp->cdb_opaque[1] & 1);
5689 }
5690 
5691 /*
5692  * Init FIB for pass-through SCMD
5693  */
5694 static void
5695 aac_cmd_fib_srb(struct aac_cmd *acp)
5696 {
5697 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5698 	struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5699 	uint8_t *cdb;
5700 
5701 	ddi_put32(acc, &srb->function, SRBF_ExecuteScsi);
5702 	ddi_put32(acc, &srb->retry_limit, 0);
5703 	ddi_put32(acc, &srb->cdb_size, acp->cmdlen);
5704 	ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */
5705 	if (acp->fibp == NULL) {
5706 		if (acp->flags & AAC_CMD_BUF_READ)
5707 			ddi_put32(acc, &srb->flags, SRB_DataIn);
5708 		else if (acp->flags & AAC_CMD_BUF_WRITE)
5709 			ddi_put32(acc, &srb->flags, SRB_DataOut);
5710 		ddi_put32(acc, &srb->channel,
5711 		    ((struct aac_nondasd *)acp->dvp)->bus);
5712 		ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid);
5713 		ddi_put32(acc, &srb->lun, 0);
5714 		cdb = acp->pkt->pkt_cdbp;
5715 	} else {
5716 		struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0];
5717 
5718 		ddi_put32(acc, &srb->flags, srb0->flags);
5719 		ddi_put32(acc, &srb->channel, srb0->channel);
5720 		ddi_put32(acc, &srb->id, srb0->id);
5721 		ddi_put32(acc, &srb->lun, srb0->lun);
5722 		cdb = srb0->cdb;
5723 	}
5724 	ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR);
5725 }
5726 
5727 static void
5728 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp)
5729 {
5730 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5731 	struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5732 	struct aac_sg_entry *sgp;
5733 	struct aac_sge *sge;
5734 
5735 	acp->fib_size = sizeof (struct aac_fib_header) + \
5736 	    sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5737 	    acp->left_cookien * sizeof (struct aac_sg_entry);
5738 
5739 	/* Fill FIB and SRB headers, and copy cdb */
5740 	aac_cmd_fib_header(softs, acp, ScsiPortCommand);
5741 	aac_cmd_fib_srb(acp);
5742 
5743 	/* Fill SG table */
5744 	ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5745 	ddi_put32(acc, &srb->count, acp->bcount);
5746 
5747 	for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0];
5748 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5749 		ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5750 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5751 	}
5752 }
5753 
5754 static void
5755 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp)
5756 {
5757 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5758 	struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5759 	struct aac_sg_entry64 *sgp;
5760 	struct aac_sge *sge;
5761 
5762 	acp->fib_size = sizeof (struct aac_fib_header) + \
5763 	    sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5764 	    acp->left_cookien * sizeof (struct aac_sg_entry64);
5765 
5766 	/* Fill FIB and SRB headers, and copy cdb */
5767 	aac_cmd_fib_header(softs, acp, ScsiPortCommandU64);
5768 	aac_cmd_fib_srb(acp);
5769 
5770 	/* Fill SG table */
5771 	ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5772 	ddi_put32(acc, &srb->count, acp->bcount);
5773 
5774 	for (sge = &acp->sgt[0],
5775 	    sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0];
5776 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5777 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5778 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5779 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5780 	}
5781 }
5782 
5783 static int
5784 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp)
5785 {
5786 	struct aac_slot *slotp;
5787 
5788 	if (slotp = aac_get_slot(softs)) {
5789 		acp->slotp = slotp;
5790 		slotp->acp = acp;
5791 		acp->aac_cmd_fib(softs, acp);
5792 		(void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0,
5793 		    DDI_DMA_SYNC_FORDEV);
5794 		return (AACOK);
5795 	}
5796 	return (AACERR);
5797 }
5798 
5799 static int
5800 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp)
5801 {
5802 	struct aac_device *dvp = acp->dvp;
5803 	int q = AAC_CMDQ(acp);
5804 
5805 	if (dvp) {
5806 		if (dvp->ncmds[q] < dvp->throttle[q]) {
5807 			if (!(acp->flags & AAC_CMD_NTAG) ||
5808 			    dvp->ncmds[q] == 0) {
5809 do_bind:
5810 				return (aac_cmd_slot_bind(softs, acp));
5811 			}
5812 			ASSERT(q == AAC_CMDQ_ASYNC);
5813 			aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC,
5814 			    AAC_THROTTLE_DRAIN);
5815 		}
5816 	} else {
5817 		if (softs->bus_ncmds[q] < softs->bus_throttle[q])
5818 			goto do_bind;
5819 	}
5820 	return (AACERR);
5821 }
5822 
5823 static int
5824 aac_sync_fib_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp)
5825 {
5826 	struct aac_slot *slotp;
5827 
5828 	if (slotp = aac_get_slot(softs)) {
5829 		ASSERT(acp->slotp == NULL);
5830 
5831 		acp->slotp = slotp;
5832 		slotp->acp = acp;
5833 		return (AACOK);
5834 	}
5835 	return (AACERR);
5836 }
5837 
5838 static void
5839 aac_sync_fib_slot_release(struct aac_softstate *softs, struct aac_cmd *acp)
5840 {
5841 	ASSERT(acp->slotp);
5842 
5843 	aac_release_slot(softs, acp->slotp);
5844 	acp->slotp->acp = NULL;
5845 	acp->slotp = NULL;
5846 }
5847 
5848 static void
5849 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp)
5850 {
5851 	struct aac_slot *slotp = acp->slotp;
5852 	int q = AAC_CMDQ(acp);
5853 	int rval;
5854 
5855 	/* Set ac and pkt */
5856 	if (acp->pkt) { /* ac from ioctl has no pkt */
5857 		acp->pkt->pkt_state |=
5858 		    STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
5859 	}
5860 	if (acp->timeout) /* 0 indicates no timeout */
5861 		acp->timeout += aac_timebase + aac_tick;
5862 
5863 	if (acp->dvp)
5864 		acp->dvp->ncmds[q]++;
5865 	softs->bus_ncmds[q]++;
5866 	aac_cmd_enqueue(&softs->q_busy, acp);
5867 
5868 	AACDB_PRINT_FIB(softs, slotp);
5869 
5870 	if (softs->flags & AAC_FLAGS_NEW_COMM) {
5871 		rval = aac_send_command(softs, slotp);
5872 	} else {
5873 		/*
5874 		 * If fib can not be enqueued, the adapter is in an abnormal
5875 		 * state, there will be no interrupt to us.
5876 		 */
5877 		rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q,
5878 		    slotp->fib_phyaddr, acp->fib_size);
5879 	}
5880 
5881 	if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS)
5882 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
5883 
5884 	/*
5885 	 * NOTE: We send command only when slots availabe, so should never
5886 	 * reach here.
5887 	 */
5888 	if (rval != AACOK) {
5889 		AACDB_PRINT(softs, CE_NOTE, "SCMD send failed");
5890 		if (acp->pkt) {
5891 			acp->pkt->pkt_state &= ~STATE_SENT_CMD;
5892 			aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0);
5893 		}
5894 		aac_end_io(softs, acp);
5895 		if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB)))
5896 			ddi_trigger_softintr(softs->softint_id);
5897 	}
5898 }
5899 
5900 static void
5901 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q)
5902 {
5903 	struct aac_cmd *acp, *next_acp;
5904 
5905 	/* Serve as many waiting io's as possible */
5906 	for (acp = q->q_head; acp; acp = next_acp) {
5907 		next_acp = acp->next;
5908 		if (aac_bind_io(softs, acp) == AACOK) {
5909 			aac_cmd_delete(q, acp);
5910 			aac_start_io(softs, acp);
5911 		}
5912 		if (softs->free_io_slot_head == NULL)
5913 			break;
5914 	}
5915 }
5916 
5917 static void
5918 aac_start_waiting_io(struct aac_softstate *softs)
5919 {
5920 	/*
5921 	 * Sync FIB io is served before async FIB io so that io requests
5922 	 * sent by interactive userland commands get responded asap.
5923 	 */
5924 	if (softs->q_wait[AAC_CMDQ_SYNC].q_head)
5925 		aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]);
5926 	if (softs->q_wait[AAC_CMDQ_ASYNC].q_head)
5927 		aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]);
5928 }
5929 
5930 static void
5931 aac_drain_comp_q(struct aac_softstate *softs)
5932 {
5933 	struct aac_cmd *acp;
5934 	struct scsi_pkt *pkt;
5935 
5936 	/*CONSTCOND*/
5937 	while (1) {
5938 		mutex_enter(&softs->q_comp_mutex);
5939 		acp = aac_cmd_dequeue(&softs->q_comp);
5940 		mutex_exit(&softs->q_comp_mutex);
5941 		if (acp != NULL) {
5942 			ASSERT(acp->pkt != NULL);
5943 			pkt = acp->pkt;
5944 
5945 			if (pkt->pkt_reason == CMD_CMPLT) {
5946 				/*
5947 				 * Consistent packets need to be sync'ed first
5948 				 */
5949 				if ((acp->flags & AAC_CMD_CONSISTENT) &&
5950 				    (acp->flags & AAC_CMD_BUF_READ)) {
5951 					if (aac_dma_sync_ac(acp) != AACOK) {
5952 						ddi_fm_service_impact(
5953 						    softs->devinfo_p,
5954 						    DDI_SERVICE_UNAFFECTED);
5955 						pkt->pkt_reason = CMD_TRAN_ERR;
5956 						pkt->pkt_statistics = 0;
5957 					}
5958 				}
5959 				if ((aac_check_acc_handle(softs-> \
5960 				    comm_space_acc_handle) != DDI_SUCCESS) ||
5961 				    (aac_check_acc_handle(softs-> \
5962 				    pci_mem_handle) != DDI_SUCCESS)) {
5963 					ddi_fm_service_impact(softs->devinfo_p,
5964 					    DDI_SERVICE_UNAFFECTED);
5965 					ddi_fm_acc_err_clear(softs-> \
5966 					    pci_mem_handle, DDI_FME_VER0);
5967 					pkt->pkt_reason = CMD_TRAN_ERR;
5968 					pkt->pkt_statistics = 0;
5969 				}
5970 				if (aac_check_dma_handle(softs-> \
5971 				    comm_space_dma_handle) != DDI_SUCCESS) {
5972 					ddi_fm_service_impact(softs->devinfo_p,
5973 					    DDI_SERVICE_UNAFFECTED);
5974 					pkt->pkt_reason = CMD_TRAN_ERR;
5975 					pkt->pkt_statistics = 0;
5976 				}
5977 			}
5978 			scsi_hba_pkt_comp(pkt);
5979 		} else {
5980 			break;
5981 		}
5982 	}
5983 }
5984 
5985 static int
5986 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp)
5987 {
5988 	size_t rlen;
5989 	ddi_dma_cookie_t cookie;
5990 	uint_t cookien;
5991 
5992 	/* Allocate FIB dma resource */
5993 	if (ddi_dma_alloc_handle(
5994 	    softs->devinfo_p,
5995 	    &softs->addr_dma_attr,
5996 	    DDI_DMA_SLEEP,
5997 	    NULL,
5998 	    &slotp->fib_dma_handle) != DDI_SUCCESS) {
5999 		AACDB_PRINT(softs, CE_WARN,
6000 		    "Cannot alloc dma handle for slot fib area");
6001 		goto error;
6002 	}
6003 	if (ddi_dma_mem_alloc(
6004 	    slotp->fib_dma_handle,
6005 	    softs->aac_max_fib_size,
6006 	    &softs->acc_attr,
6007 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
6008 	    DDI_DMA_SLEEP,
6009 	    NULL,
6010 	    (caddr_t *)&slotp->fibp,
6011 	    &rlen,
6012 	    &slotp->fib_acc_handle) != DDI_SUCCESS) {
6013 		AACDB_PRINT(softs, CE_WARN,
6014 		    "Cannot alloc mem for slot fib area");
6015 		goto error;
6016 	}
6017 	if (ddi_dma_addr_bind_handle(
6018 	    slotp->fib_dma_handle,
6019 	    NULL,
6020 	    (caddr_t)slotp->fibp,
6021 	    softs->aac_max_fib_size,
6022 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
6023 	    DDI_DMA_SLEEP,
6024 	    NULL,
6025 	    &cookie,
6026 	    &cookien) != DDI_DMA_MAPPED) {
6027 		AACDB_PRINT(softs, CE_WARN,
6028 		    "dma bind failed for slot fib area");
6029 		goto error;
6030 	}
6031 
6032 	/* Check dma handles allocated in fib attach */
6033 	if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) {
6034 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
6035 		goto error;
6036 	}
6037 
6038 	/* Check acc handles allocated in fib attach */
6039 	if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) {
6040 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
6041 		goto error;
6042 	}
6043 
6044 	slotp->fib_phyaddr = cookie.dmac_laddress;
6045 	return (AACOK);
6046 
6047 error:
6048 	if (slotp->fib_acc_handle) {
6049 		ddi_dma_mem_free(&slotp->fib_acc_handle);
6050 		slotp->fib_acc_handle = NULL;
6051 	}
6052 	if (slotp->fib_dma_handle) {
6053 		ddi_dma_free_handle(&slotp->fib_dma_handle);
6054 		slotp->fib_dma_handle = NULL;
6055 	}
6056 	return (AACERR);
6057 }
6058 
6059 static void
6060 aac_free_fib(struct aac_slot *slotp)
6061 {
6062 	(void) ddi_dma_unbind_handle(slotp->fib_dma_handle);
6063 	ddi_dma_mem_free(&slotp->fib_acc_handle);
6064 	slotp->fib_acc_handle = NULL;
6065 	ddi_dma_free_handle(&slotp->fib_dma_handle);
6066 	slotp->fib_dma_handle = NULL;
6067 	slotp->fib_phyaddr = 0;
6068 }
6069 
6070 static void
6071 aac_alloc_fibs(struct aac_softstate *softs)
6072 {
6073 	int i;
6074 	struct aac_slot *slotp;
6075 
6076 	for (i = 0; i < softs->total_slots &&
6077 	    softs->total_fibs < softs->total_slots; i++) {
6078 		slotp = &(softs->io_slot[i]);
6079 		if (slotp->fib_phyaddr)
6080 			continue;
6081 		if (aac_alloc_fib(softs, slotp) != AACOK)
6082 			break;
6083 
6084 		/* Insert the slot to the free slot list */
6085 		aac_release_slot(softs, slotp);
6086 		softs->total_fibs++;
6087 	}
6088 }
6089 
6090 static void
6091 aac_destroy_fibs(struct aac_softstate *softs)
6092 {
6093 	struct aac_slot *slotp;
6094 
6095 	while ((slotp = softs->free_io_slot_head) != NULL) {
6096 		ASSERT(slotp->fib_phyaddr);
6097 		softs->free_io_slot_head = slotp->next;
6098 		aac_free_fib(slotp);
6099 		ASSERT(slotp->index == (slotp - softs->io_slot));
6100 		softs->total_fibs--;
6101 	}
6102 	ASSERT(softs->total_fibs == 0);
6103 }
6104 
6105 static int
6106 aac_create_slots(struct aac_softstate *softs)
6107 {
6108 	int i;
6109 
6110 	softs->total_slots = softs->aac_max_fibs;
6111 	softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \
6112 	    softs->total_slots, KM_SLEEP);
6113 	if (softs->io_slot == NULL) {
6114 		AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot");
6115 		return (AACERR);
6116 	}
6117 	for (i = 0; i < softs->total_slots; i++)
6118 		softs->io_slot[i].index = i;
6119 	softs->free_io_slot_head = NULL;
6120 	softs->total_fibs = 0;
6121 	return (AACOK);
6122 }
6123 
6124 static void
6125 aac_destroy_slots(struct aac_softstate *softs)
6126 {
6127 	ASSERT(softs->free_io_slot_head == NULL);
6128 
6129 	kmem_free(softs->io_slot, sizeof (struct aac_slot) * \
6130 	    softs->total_slots);
6131 	softs->io_slot = NULL;
6132 	softs->total_slots = 0;
6133 }
6134 
6135 struct aac_slot *
6136 aac_get_slot(struct aac_softstate *softs)
6137 {
6138 	struct aac_slot *slotp;
6139 
6140 	if ((slotp = softs->free_io_slot_head) != NULL) {
6141 		softs->free_io_slot_head = slotp->next;
6142 		slotp->next = NULL;
6143 	}
6144 	return (slotp);
6145 }
6146 
6147 static void
6148 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp)
6149 {
6150 	ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots));
6151 	ASSERT(slotp == &softs->io_slot[slotp->index]);
6152 
6153 	slotp->acp = NULL;
6154 	slotp->next = softs->free_io_slot_head;
6155 	softs->free_io_slot_head = slotp;
6156 }
6157 
6158 int
6159 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp)
6160 {
6161 	if (aac_bind_io(softs, acp) == AACOK)
6162 		aac_start_io(softs, acp);
6163 	else
6164 		aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp);
6165 
6166 	if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR)))
6167 		return (TRAN_ACCEPT);
6168 	/*
6169 	 * Because sync FIB is always 512 bytes and used for critical
6170 	 * functions, async FIB is used for poll IO.
6171 	 */
6172 	if (acp->flags & AAC_CMD_NO_INTR) {
6173 		if (aac_do_poll_io(softs, acp) == AACOK)
6174 			return (TRAN_ACCEPT);
6175 	} else {
6176 		if (aac_do_sync_io(softs, acp) == AACOK)
6177 			return (TRAN_ACCEPT);
6178 	}
6179 	return (TRAN_BADPKT);
6180 }
6181 
6182 static int
6183 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp)
6184 {
6185 	int (*intr_handler)(struct aac_softstate *);
6186 
6187 	/*
6188 	 * Interrupt is disabled, we have to poll the adapter by ourselves.
6189 	 */
6190 	intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
6191 	    aac_process_intr_new : aac_process_intr_old;
6192 	while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) {
6193 		int i = AAC_POLL_TIME * 1000;
6194 
6195 		AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i);
6196 		if (i == 0)
6197 			aac_cmd_timeout(softs, acp);
6198 	}
6199 
6200 	ddi_trigger_softintr(softs->softint_id);
6201 
6202 	if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR))
6203 		return (AACOK);
6204 	return (AACERR);
6205 }
6206 
6207 static int
6208 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp)
6209 {
6210 	ASSERT(softs && acp);
6211 
6212 	while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT)))
6213 		cv_wait(&softs->event, &softs->io_lock);
6214 
6215 	if (acp->flags & AAC_CMD_CMPLT)
6216 		return (AACOK);
6217 	return (AACERR);
6218 }
6219 
6220 static int
6221 aac_dma_sync_ac(struct aac_cmd *acp)
6222 {
6223 	if (acp->buf_dma_handle) {
6224 		if (acp->flags & AAC_CMD_BUF_WRITE) {
6225 			if (acp->abp != NULL)
6226 				ddi_rep_put8(acp->abh,
6227 				    (uint8_t *)acp->bp->b_un.b_addr,
6228 				    (uint8_t *)acp->abp, acp->bp->b_bcount,
6229 				    DDI_DEV_AUTOINCR);
6230 			(void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
6231 			    DDI_DMA_SYNC_FORDEV);
6232 		} else {
6233 			(void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
6234 			    DDI_DMA_SYNC_FORCPU);
6235 			if (aac_check_dma_handle(acp->buf_dma_handle) !=
6236 			    DDI_SUCCESS)
6237 				return (AACERR);
6238 			if (acp->abp != NULL)
6239 				ddi_rep_get8(acp->abh,
6240 				    (uint8_t *)acp->bp->b_un.b_addr,
6241 				    (uint8_t *)acp->abp, acp->bp->b_bcount,
6242 				    DDI_DEV_AUTOINCR);
6243 		}
6244 	}
6245 	return (AACOK);
6246 }
6247 
6248 /*
6249  * The following function comes from Adaptec:
6250  *
6251  * When driver sees a particular event that means containers are changed, it
6252  * will rescan containers. However a change may not be complete until some
6253  * other event is received. For example, creating or deleting an array will
6254  * incur as many as six AifEnConfigChange events which would generate six
6255  * container rescans. To diminish rescans, driver set a flag to wait for
6256  * another particular event. When sees that events come in, it will do rescan.
6257  */
6258 static int
6259 aac_handle_aif(struct aac_softstate *softs, struct aac_fib *fibp)
6260 {
6261 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
6262 	uint16_t fib_command;
6263 	struct aac_aif_command *aif;
6264 	int en_type;
6265 	int devcfg_needed;
6266 	int current, next;
6267 
6268 	fib_command = LE_16(fibp->Header.Command);
6269 	if (fib_command != AifRequest) {
6270 		cmn_err(CE_NOTE, "!Unknown command from controller: 0x%x",
6271 		    fib_command);
6272 		return (AACERR);
6273 	}
6274 
6275 	/* Update internal container state */
6276 	aif = (struct aac_aif_command *)&fibp->data[0];
6277 
6278 	AACDB_PRINT_AIF(softs, aif);
6279 	devcfg_needed = 0;
6280 	en_type = LE_32((uint32_t)aif->data.EN.type);
6281 
6282 	switch (LE_32((uint32_t)aif->command)) {
6283 	case AifCmdDriverNotify: {
6284 		int cid = LE_32(aif->data.EN.data.ECC.container[0]);
6285 
6286 		switch (en_type) {
6287 		case AifDenMorphComplete:
6288 		case AifDenVolumeExtendComplete:
6289 			if (AAC_DEV_IS_VALID(&softs->containers[cid].dev))
6290 				softs->devcfg_wait_on = AifEnConfigChange;
6291 			break;
6292 		}
6293 		if (softs->devcfg_wait_on == en_type)
6294 			devcfg_needed = 1;
6295 		break;
6296 	}
6297 
6298 	case AifCmdEventNotify:
6299 		switch (en_type) {
6300 		case AifEnAddContainer:
6301 		case AifEnDeleteContainer:
6302 			softs->devcfg_wait_on = AifEnConfigChange;
6303 			break;
6304 		case AifEnContainerChange:
6305 			if (!softs->devcfg_wait_on)
6306 				softs->devcfg_wait_on = AifEnConfigChange;
6307 			break;
6308 		case AifEnContainerEvent:
6309 			if (ddi_get32(acc, &aif-> \
6310 			    data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE)
6311 				devcfg_needed = 1;
6312 			break;
6313 		}
6314 		if (softs->devcfg_wait_on == en_type)
6315 			devcfg_needed = 1;
6316 		break;
6317 
6318 	case AifCmdJobProgress:
6319 		if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) {
6320 			int pr_status;
6321 			uint32_t pr_ftick, pr_ctick;
6322 
6323 			pr_status = LE_32((uint32_t)aif->data.PR[0].status);
6324 			pr_ctick = LE_32(aif->data.PR[0].currentTick);
6325 			pr_ftick = LE_32(aif->data.PR[0].finalTick);
6326 
6327 			if ((pr_ctick == pr_ftick) ||
6328 			    (pr_status == AifJobStsSuccess))
6329 				softs->devcfg_wait_on = AifEnContainerChange;
6330 			else if ((pr_ctick == 0) &&
6331 			    (pr_status == AifJobStsRunning))
6332 				softs->devcfg_wait_on = AifEnContainerChange;
6333 		}
6334 		break;
6335 	}
6336 
6337 	mutex_exit(&softs->aifq_mutex);
6338 	if (devcfg_needed) {
6339 		softs->devcfg_wait_on = 0;
6340 		(void) aac_probe_containers(softs);
6341 	}
6342 	mutex_enter(&softs->aifq_mutex);
6343 
6344 	/* Modify AIF contexts */
6345 	current = softs->aifq_idx;
6346 	next = (current + 1) % AAC_AIFQ_LENGTH;
6347 	if (next == 0) {
6348 		struct aac_fib_context *ctx;
6349 
6350 		softs->aifq_wrap = 1;
6351 		for (ctx = softs->fibctx; ctx; ctx = ctx->next) {
6352 			if (next == ctx->ctx_idx) {
6353 				ctx->ctx_filled = 1;
6354 			} else if (current == ctx->ctx_idx && ctx->ctx_filled) {
6355 				ctx->ctx_idx = next;
6356 				AACDB_PRINT(softs, CE_NOTE,
6357 				    "-- AIF queue(%x) overrun", ctx->unique);
6358 			}
6359 		}
6360 	}
6361 	softs->aifq_idx = next;
6362 
6363 	/* Wakeup applications */
6364 	cv_broadcast(&softs->aifv);
6365 	return (AACOK);
6366 }
6367 
6368 /*
6369  * Timeout recovery
6370  */
6371 /*ARGSUSED*/
6372 static void
6373 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp)
6374 {
6375 #ifdef DEBUG
6376 	acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT;
6377 	AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp);
6378 	AACDB_PRINT_FIB(softs, acp->slotp);
6379 #endif
6380 
6381 	/*
6382 	 * Besides the firmware in unhealthy state, an overloaded
6383 	 * adapter may also incur pkt timeout.
6384 	 * There is a chance for an adapter with a slower IOP to take
6385 	 * longer than 60 seconds to process the commands, such as when
6386 	 * to perform IOs. So the adapter is doing a build on a RAID-5
6387 	 * while being required longer completion times should be
6388 	 * tolerated.
6389 	 */
6390 	switch (aac_do_reset(softs)) {
6391 	case AAC_IOP_RESET_SUCCEED:
6392 		aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET);
6393 		aac_start_waiting_io(softs);
6394 		break;
6395 	case AAC_IOP_RESET_FAILED:
6396 		/* Abort all waiting cmds when adapter is dead */
6397 		aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT);
6398 		break;
6399 	case AAC_IOP_RESET_ABNORMAL:
6400 		aac_start_waiting_io(softs);
6401 	}
6402 }
6403 
6404 /*
6405  * The following function comes from Adaptec:
6406  *
6407  * Time sync. command added to synchronize time with firmware every 30
6408  * minutes (required for correct AIF timestamps etc.)
6409  */
6410 static int
6411 aac_sync_tick(struct aac_softstate *softs)
6412 {
6413 	ddi_acc_handle_t acc;
6414 	int rval;
6415 
6416 	/* Time sync. with firmware every AAC_SYNC_TICK */
6417 	(void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
6418 	acc = softs->sync_ac.slotp->fib_acc_handle;
6419 
6420 	ddi_put32(acc, (void *)&softs->sync_ac.slotp->fibp->data[0],
6421 	    ddi_get_time());
6422 	rval = aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t));
6423 	aac_sync_fib_slot_release(softs, &softs->sync_ac);
6424 	return (rval);
6425 }
6426 
6427 static void
6428 aac_daemon(void *arg)
6429 {
6430 	struct aac_softstate *softs = (struct aac_softstate *)arg;
6431 	struct aac_cmd *acp;
6432 
6433 	DBCALLED(softs, 2);
6434 
6435 	mutex_enter(&softs->io_lock);
6436 	/* Check slot for timeout pkts */
6437 	aac_timebase += aac_tick;
6438 	for (acp = softs->q_busy.q_head; acp; acp = acp->next) {
6439 		if (acp->timeout) {
6440 			if (acp->timeout <= aac_timebase) {
6441 				aac_cmd_timeout(softs, acp);
6442 				ddi_trigger_softintr(softs->softint_id);
6443 			}
6444 			break;
6445 		}
6446 	}
6447 
6448 	/* Time sync. with firmware every AAC_SYNC_TICK */
6449 	if (aac_sync_time <= aac_timebase) {
6450 		aac_sync_time = aac_timebase;
6451 		if (aac_sync_tick(softs) != AACOK)
6452 			aac_sync_time += aac_tick << 1; /* retry shortly */
6453 		else
6454 			aac_sync_time += AAC_SYNC_TICK;
6455 	}
6456 
6457 	if ((softs->state & AAC_STATE_RUN) && (softs->timeout_id != 0))
6458 		softs->timeout_id = timeout(aac_daemon, (void *)softs,
6459 		    (aac_tick * drv_usectohz(1000000)));
6460 	mutex_exit(&softs->io_lock);
6461 }
6462 
6463 /*
6464  * Architecture dependent functions
6465  */
6466 static int
6467 aac_rx_get_fwstatus(struct aac_softstate *softs)
6468 {
6469 	return (PCI_MEM_GET32(softs, AAC_OMR0));
6470 }
6471 
6472 static int
6473 aac_rx_get_mailbox(struct aac_softstate *softs, int mb)
6474 {
6475 	return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4));
6476 }
6477 
6478 static void
6479 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6480     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6481 {
6482 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd);
6483 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0);
6484 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1);
6485 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2);
6486 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3);
6487 }
6488 
6489 static int
6490 aac_rkt_get_fwstatus(struct aac_softstate *softs)
6491 {
6492 	return (PCI_MEM_GET32(softs, AAC_OMR0));
6493 }
6494 
6495 static int
6496 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb)
6497 {
6498 	return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4));
6499 }
6500 
6501 static void
6502 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6503     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6504 {
6505 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd);
6506 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0);
6507 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1);
6508 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2);
6509 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3);
6510 }
6511 
6512 /*
6513  * cb_ops functions
6514  */
6515 static int
6516 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred)
6517 {
6518 	struct aac_softstate *softs;
6519 	int minor0, minor;
6520 	int instance;
6521 
6522 	DBCALLED(NULL, 2);
6523 
6524 	if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6525 		return (EINVAL);
6526 
6527 	minor0 = getminor(*devp);
6528 	minor = AAC_SCSA_MINOR(minor0);
6529 
6530 	if (AAC_IS_SCSA_NODE(minor))
6531 		return (scsi_hba_open(devp, flag, otyp, cred));
6532 
6533 	instance = MINOR2INST(minor0);
6534 	if (instance >= AAC_MAX_ADAPTERS)
6535 		return (ENXIO);
6536 
6537 	softs = ddi_get_soft_state(aac_softstatep, instance);
6538 	if (softs == NULL)
6539 		return (ENXIO);
6540 
6541 	return (0);
6542 }
6543 
6544 /*ARGSUSED*/
6545 static int
6546 aac_close(dev_t dev, int flag, int otyp, cred_t *cred)
6547 {
6548 	int minor0, minor;
6549 	int instance;
6550 
6551 	DBCALLED(NULL, 2);
6552 
6553 	if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6554 		return (EINVAL);
6555 
6556 	minor0 = getminor(dev);
6557 	minor = AAC_SCSA_MINOR(minor0);
6558 
6559 	if (AAC_IS_SCSA_NODE(minor))
6560 		return (scsi_hba_close(dev, flag, otyp, cred));
6561 
6562 	instance = MINOR2INST(minor0);
6563 	if (instance >= AAC_MAX_ADAPTERS)
6564 		return (ENXIO);
6565 
6566 	return (0);
6567 }
6568 
6569 static int
6570 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p,
6571     int *rval_p)
6572 {
6573 	struct aac_softstate *softs;
6574 	int minor0, minor;
6575 	int instance;
6576 
6577 	DBCALLED(NULL, 2);
6578 
6579 	if (drv_priv(cred_p) != 0)
6580 		return (EPERM);
6581 
6582 	minor0 = getminor(dev);
6583 	minor = AAC_SCSA_MINOR(minor0);
6584 
6585 	if (AAC_IS_SCSA_NODE(minor))
6586 		return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p));
6587 
6588 	instance = MINOR2INST(minor0);
6589 	if (instance < AAC_MAX_ADAPTERS) {
6590 		softs = ddi_get_soft_state(aac_softstatep, instance);
6591 		return (aac_do_ioctl(softs, dev, cmd, arg, flag));
6592 	}
6593 	return (ENXIO);
6594 }
6595 
6596 /*
6597  * The IO fault service error handling callback function
6598  */
6599 /*ARGSUSED*/
6600 static int
6601 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6602 {
6603 	/*
6604 	 * as the driver can always deal with an error in any dma or
6605 	 * access handle, we can just return the fme_status value.
6606 	 */
6607 	pci_ereport_post(dip, err, NULL);
6608 	return (err->fme_status);
6609 }
6610 
6611 /*
6612  * aac_fm_init - initialize fma capabilities and register with IO
6613  *               fault services.
6614  */
6615 static void
6616 aac_fm_init(struct aac_softstate *softs)
6617 {
6618 	/*
6619 	 * Need to change iblock to priority for new MSI intr
6620 	 */
6621 	ddi_iblock_cookie_t fm_ibc;
6622 
6623 	softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p,
6624 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
6625 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
6626 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
6627 
6628 	/* Only register with IO Fault Services if we have some capability */
6629 	if (softs->fm_capabilities) {
6630 		/* Adjust access and dma attributes for FMA */
6631 		softs->reg_attr.devacc_attr_access = DDI_FLAGERR_ACC;
6632 		softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6633 		softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6634 
6635 		/*
6636 		 * Register capabilities with IO Fault Services.
6637 		 * fm_capabilities will be updated to indicate
6638 		 * capabilities actually supported (not requested.)
6639 		 */
6640 		ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc);
6641 
6642 		/*
6643 		 * Initialize pci ereport capabilities if ereport
6644 		 * capable (should always be.)
6645 		 */
6646 		if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
6647 		    DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6648 			pci_ereport_setup(softs->devinfo_p);
6649 		}
6650 
6651 		/*
6652 		 * Register error callback if error callback capable.
6653 		 */
6654 		if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6655 			ddi_fm_handler_register(softs->devinfo_p,
6656 			    aac_fm_error_cb, (void *) softs);
6657 		}
6658 	}
6659 }
6660 
6661 /*
6662  * aac_fm_fini - Releases fma capabilities and un-registers with IO
6663  *               fault services.
6664  */
6665 static void
6666 aac_fm_fini(struct aac_softstate *softs)
6667 {
6668 	/* Only unregister FMA capabilities if registered */
6669 	if (softs->fm_capabilities) {
6670 		/*
6671 		 * Un-register error callback if error callback capable.
6672 		 */
6673 		if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6674 			ddi_fm_handler_unregister(softs->devinfo_p);
6675 		}
6676 
6677 		/*
6678 		 * Release any resources allocated by pci_ereport_setup()
6679 		 */
6680 		if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
6681 		    DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6682 			pci_ereport_teardown(softs->devinfo_p);
6683 		}
6684 
6685 		/* Unregister from IO Fault Services */
6686 		ddi_fm_fini(softs->devinfo_p);
6687 
6688 		/* Adjust access and dma attributes for FMA */
6689 		softs->reg_attr.devacc_attr_access = DDI_DEFAULT_ACC;
6690 		softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
6691 		softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
6692 	}
6693 }
6694 
6695 int
6696 aac_check_acc_handle(ddi_acc_handle_t handle)
6697 {
6698 	ddi_fm_error_t de;
6699 
6700 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
6701 	return (de.fme_status);
6702 }
6703 
6704 int
6705 aac_check_dma_handle(ddi_dma_handle_t handle)
6706 {
6707 	ddi_fm_error_t de;
6708 
6709 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
6710 	return (de.fme_status);
6711 }
6712 
6713 void
6714 aac_fm_ereport(struct aac_softstate *softs, char *detail)
6715 {
6716 	uint64_t ena;
6717 	char buf[FM_MAX_CLASS];
6718 
6719 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
6720 	ena = fm_ena_generate(0, FM_ENA_FMT1);
6721 	if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) {
6722 		ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP,
6723 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
6724 	}
6725 }
6726 
6727 /*
6728  * Autoconfiguration support
6729  */
6730 static int
6731 aac_parse_devname(char *devnm, int *tgt, int *lun)
6732 {
6733 	char devbuf[SCSI_MAXNAMELEN];
6734 	char *addr;
6735 	char *p,  *tp, *lp;
6736 	long num;
6737 
6738 	/* Parse dev name and address */
6739 	(void) strcpy(devbuf, devnm);
6740 	addr = "";
6741 	for (p = devbuf; *p != '\0'; p++) {
6742 		if (*p == '@') {
6743 			addr = p + 1;
6744 			*p = '\0';
6745 		} else if (*p == ':') {
6746 			*p = '\0';
6747 			break;
6748 		}
6749 	}
6750 
6751 	/* Parse taget and lun */
6752 	for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
6753 		if (*p == ',') {
6754 			lp = p + 1;
6755 			*p = '\0';
6756 			break;
6757 		}
6758 	}
6759 	if (tgt && tp) {
6760 		if (ddi_strtol(tp, NULL, 0x10, &num))
6761 			return (AACERR);
6762 		*tgt = (int)num;
6763 	}
6764 	if (lun && lp) {
6765 		if (ddi_strtol(lp, NULL, 0x10, &num))
6766 			return (AACERR);
6767 		*lun = (int)num;
6768 	}
6769 	return (AACOK);
6770 }
6771 
6772 static dev_info_t *
6773 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun)
6774 {
6775 	dev_info_t *child = NULL;
6776 	char addr[SCSI_MAXNAMELEN];
6777 	char tmp[MAXNAMELEN];
6778 
6779 	if (tgt < AAC_MAX_LD) {
6780 		if (lun == 0) {
6781 			struct aac_device *dvp = &softs->containers[tgt].dev;
6782 
6783 			child = dvp->dip;
6784 		}
6785 	} else {
6786 		(void) sprintf(addr, "%x,%x", tgt, lun);
6787 		for (child = ddi_get_child(softs->devinfo_p);
6788 		    child; child = ddi_get_next_sibling(child)) {
6789 			/* We don't care about non-persistent node */
6790 			if (ndi_dev_is_persistent_node(child) == 0)
6791 				continue;
6792 
6793 			if (aac_name_node(child, tmp, MAXNAMELEN) !=
6794 			    DDI_SUCCESS)
6795 				continue;
6796 			if (strcmp(addr, tmp) == 0)
6797 				break;
6798 		}
6799 	}
6800 	return (child);
6801 }
6802 
6803 static int
6804 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd,
6805     dev_info_t **dipp)
6806 {
6807 	char *nodename = NULL;
6808 	char **compatible = NULL;
6809 	int ncompatible = 0;
6810 	char *childname;
6811 	dev_info_t *ldip = NULL;
6812 	int tgt = sd->sd_address.a_target;
6813 	int lun = sd->sd_address.a_lun;
6814 	int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
6815 	int rval;
6816 
6817 	DBCALLED(softs, 2);
6818 
6819 	scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
6820 	    NULL, &nodename, &compatible, &ncompatible);
6821 	if (nodename == NULL) {
6822 		AACDB_PRINT(softs, CE_WARN,
6823 		    "found no comptible driver for t%dL%d", tgt, lun);
6824 		rval = NDI_FAILURE;
6825 		goto finish;
6826 	}
6827 	childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename;
6828 
6829 	/* Create dev node */
6830 	rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID,
6831 	    &ldip);
6832 	if (rval == NDI_SUCCESS) {
6833 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt)
6834 		    != DDI_PROP_SUCCESS) {
6835 			AACDB_PRINT(softs, CE_WARN, "unable to create "
6836 			    "property for t%dL%d (target)", tgt, lun);
6837 			rval = NDI_FAILURE;
6838 			goto finish;
6839 		}
6840 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun)
6841 		    != DDI_PROP_SUCCESS) {
6842 			AACDB_PRINT(softs, CE_WARN, "unable to create "
6843 			    "property for t%dL%d (lun)", tgt, lun);
6844 			rval = NDI_FAILURE;
6845 			goto finish;
6846 		}
6847 		if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
6848 		    "compatible", compatible, ncompatible)
6849 		    != DDI_PROP_SUCCESS) {
6850 			AACDB_PRINT(softs, CE_WARN, "unable to create "
6851 			    "property for t%dL%d (compatible)", tgt, lun);
6852 			rval = NDI_FAILURE;
6853 			goto finish;
6854 		}
6855 
6856 		rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
6857 		if (rval != NDI_SUCCESS) {
6858 			AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d",
6859 			    tgt, lun);
6860 			ndi_prop_remove_all(ldip);
6861 			(void) ndi_devi_free(ldip);
6862 		}
6863 	}
6864 finish:
6865 	if (dipp)
6866 		*dipp = ldip;
6867 
6868 	scsi_hba_nodename_compatible_free(nodename, compatible);
6869 	return (rval);
6870 }
6871 
6872 /*ARGSUSED*/
6873 static int
6874 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd)
6875 {
6876 	int tgt = sd->sd_address.a_target;
6877 	int lun = sd->sd_address.a_lun;
6878 
6879 	DBCALLED(softs, 2);
6880 
6881 	if (tgt < AAC_MAX_LD) {
6882 		int rval;
6883 
6884 		if (lun == 0) {
6885 			mutex_enter(&softs->io_lock);
6886 			rval = aac_probe_container(softs, tgt);
6887 			mutex_exit(&softs->io_lock);
6888 			if (rval == AACOK) {
6889 				if (scsi_hba_probe(sd, NULL) ==
6890 				    SCSIPROBE_EXISTS)
6891 					return (NDI_SUCCESS);
6892 			}
6893 		}
6894 		return (NDI_FAILURE);
6895 	} else {
6896 		int dtype;
6897 
6898 		if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS)
6899 			return (NDI_FAILURE);
6900 
6901 		dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
6902 
6903 		AACDB_PRINT(softs, CE_NOTE,
6904 		    "Phys. device found: tgt %d dtype %d: %s",
6905 		    tgt, dtype, sd->sd_inq->inq_vid);
6906 
6907 		/* Only non-DASD exposed */
6908 		if (dtype != DTYPE_RODIRECT /* CDROM */ &&
6909 		    dtype != DTYPE_SEQUENTIAL /* TAPE */ &&
6910 		    dtype != DTYPE_ESI /* SES */)
6911 			return (NDI_FAILURE);
6912 
6913 		AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt);
6914 		mutex_enter(&softs->io_lock);
6915 		softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID;
6916 		mutex_exit(&softs->io_lock);
6917 		return (NDI_SUCCESS);
6918 	}
6919 }
6920 
6921 static int
6922 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun,
6923     dev_info_t **ldip)
6924 {
6925 	struct scsi_device sd;
6926 	dev_info_t *child;
6927 	int rval;
6928 
6929 	DBCALLED(softs, 2);
6930 
6931 	if ((child = aac_find_child(softs, tgt, lun)) != NULL) {
6932 		if (ldip)
6933 			*ldip = child;
6934 		return (NDI_SUCCESS);
6935 	}
6936 
6937 	bzero(&sd, sizeof (struct scsi_device));
6938 	sd.sd_address.a_hba_tran = softs->hba_tran;
6939 	sd.sd_address.a_target = (uint16_t)tgt;
6940 	sd.sd_address.a_lun = (uint8_t)lun;
6941 	if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS)
6942 		rval = aac_config_child(softs, &sd, ldip);
6943 	/* scsi_unprobe is blank now. Free buffer manually */
6944 	if (sd.sd_inq) {
6945 		kmem_free(sd.sd_inq, SUN_INQSIZE);
6946 		sd.sd_inq = (struct scsi_inquiry *)NULL;
6947 	}
6948 	return (rval);
6949 }
6950 
6951 static int
6952 aac_config_tgt(struct aac_softstate *softs, int tgt)
6953 {
6954 	struct scsi_address ap;
6955 	struct buf *bp = NULL;
6956 	int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE;
6957 	int list_len = 0;
6958 	int lun_total = 0;
6959 	dev_info_t *ldip;
6960 	int i;
6961 
6962 	ap.a_hba_tran = softs->hba_tran;
6963 	ap.a_target = (uint16_t)tgt;
6964 	ap.a_lun = 0;
6965 
6966 	for (i = 0; i < 2; i++) {
6967 		struct scsi_pkt *pkt;
6968 		uchar_t *cdb;
6969 		uchar_t *p;
6970 		uint32_t data;
6971 
6972 		if (bp == NULL) {
6973 			if ((bp = scsi_alloc_consistent_buf(&ap, NULL,
6974 			    buf_len, B_READ, NULL_FUNC, NULL)) == NULL)
6975 			return (AACERR);
6976 		}
6977 		if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5,
6978 		    sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT,
6979 		    NULL, NULL)) == NULL) {
6980 			scsi_free_consistent_buf(bp);
6981 			return (AACERR);
6982 		}
6983 		cdb = pkt->pkt_cdbp;
6984 		bzero(cdb, CDB_GROUP5);
6985 		cdb[0] = SCMD_REPORT_LUNS;
6986 
6987 		/* Convert buffer len from local to LE_32 */
6988 		data = buf_len;
6989 		for (p = &cdb[9]; p > &cdb[5]; p--) {
6990 			*p = data & 0xff;
6991 			data >>= 8;
6992 		}
6993 
6994 		if (scsi_poll(pkt) < 0 ||
6995 		    ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) {
6996 			scsi_destroy_pkt(pkt);
6997 			break;
6998 		}
6999 
7000 		/* Convert list_len from LE_32 to local */
7001 		for (p = (uchar_t *)bp->b_un.b_addr;
7002 		    p < (uchar_t *)bp->b_un.b_addr + 4; p++) {
7003 			data <<= 8;
7004 			data |= *p;
7005 		}
7006 		list_len = data;
7007 		if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) {
7008 			scsi_free_consistent_buf(bp);
7009 			bp = NULL;
7010 			buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE;
7011 		}
7012 		scsi_destroy_pkt(pkt);
7013 	}
7014 	if (i >= 2) {
7015 		uint8_t *buf = (uint8_t *)(bp->b_un.b_addr +
7016 		    AAC_SCSI_RPTLUNS_HEAD_SIZE);
7017 
7018 		for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) {
7019 			uint16_t lun;
7020 
7021 			/* Determine report luns addressing type */
7022 			switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) {
7023 			/*
7024 			 * Vendors in the field have been found to be
7025 			 * concatenating bus/target/lun to equal the
7026 			 * complete lun value instead of switching to
7027 			 * flat space addressing
7028 			 */
7029 			case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL:
7030 			case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT:
7031 			case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE:
7032 				lun = ((buf[0] & 0x3f) << 8) | buf[1];
7033 				if (lun > UINT8_MAX) {
7034 					AACDB_PRINT(softs, CE_WARN,
7035 					    "abnormal lun number: %d", lun);
7036 					break;
7037 				}
7038 				if (aac_config_lun(softs, tgt, lun, &ldip) ==
7039 				    NDI_SUCCESS)
7040 					lun_total++;
7041 				break;
7042 			}
7043 
7044 			buf += AAC_SCSI_RPTLUNS_ADDR_SIZE;
7045 		}
7046 	} else {
7047 		/* The target may do not support SCMD_REPORT_LUNS. */
7048 		if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS)
7049 			lun_total++;
7050 	}
7051 	scsi_free_consistent_buf(bp);
7052 	return (lun_total);
7053 }
7054 
7055 static void
7056 aac_devcfg(struct aac_softstate *softs, int tgt, int en)
7057 {
7058 	struct aac_device *dvp;
7059 
7060 	mutex_enter(&softs->io_lock);
7061 	dvp = AAC_DEV(softs, tgt);
7062 	if (en)
7063 		dvp->flags |= AAC_DFLAG_CONFIGURING;
7064 	else
7065 		dvp->flags &= ~AAC_DFLAG_CONFIGURING;
7066 	mutex_exit(&softs->io_lock);
7067 }
7068 
7069 static int
7070 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op,
7071     void *arg, dev_info_t **childp)
7072 {
7073 	struct aac_softstate *softs;
7074 	int circ = 0;
7075 	int rval;
7076 
7077 	if ((softs = ddi_get_soft_state(aac_softstatep,
7078 	    ddi_get_instance(parent))) == NULL)
7079 		return (NDI_FAILURE);
7080 
7081 	/* Commands for bus config should be blocked as the bus is quiesced */
7082 	mutex_enter(&softs->io_lock);
7083 	if (softs->state & AAC_STATE_QUIESCED) {
7084 		AACDB_PRINT(softs, CE_NOTE,
7085 		    "bus_config abroted because bus is quiesced");
7086 		mutex_exit(&softs->io_lock);
7087 		return (NDI_FAILURE);
7088 	}
7089 	mutex_exit(&softs->io_lock);
7090 
7091 	DBCALLED(softs, 1);
7092 
7093 	/* Hold the nexus across the bus_config */
7094 	ndi_devi_enter(parent, &circ);
7095 	switch (op) {
7096 	case BUS_CONFIG_ONE: {
7097 		int tgt, lun;
7098 
7099 		if (aac_parse_devname(arg, &tgt, &lun) != AACOK) {
7100 			rval = NDI_FAILURE;
7101 			break;
7102 		}
7103 
7104 		AAC_DEVCFG_BEGIN(softs, tgt);
7105 		rval = aac_config_lun(softs, tgt, lun, childp);
7106 		AAC_DEVCFG_END(softs, tgt);
7107 		break;
7108 	}
7109 
7110 	case BUS_CONFIG_DRIVER:
7111 	case BUS_CONFIG_ALL: {
7112 		uint32_t bus, tgt;
7113 		int index, total;
7114 
7115 		for (tgt = 0; tgt < AAC_MAX_LD; tgt++) {
7116 			AAC_DEVCFG_BEGIN(softs, tgt);
7117 			(void) aac_config_lun(softs, tgt, 0, NULL);
7118 			AAC_DEVCFG_END(softs, tgt);
7119 		}
7120 
7121 		/* Config the non-DASD devices connected to the card */
7122 		total = 0;
7123 		index = AAC_MAX_LD;
7124 		for (bus = 0; bus < softs->bus_max; bus++) {
7125 			AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus);
7126 			for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) {
7127 				AAC_DEVCFG_BEGIN(softs, index);
7128 				if (aac_config_tgt(softs, index))
7129 					total++;
7130 				AAC_DEVCFG_END(softs, index);
7131 			}
7132 		}
7133 		AACDB_PRINT(softs, CE_CONT,
7134 		    "?Total %d phys. device(s) found", total);
7135 		rval = NDI_SUCCESS;
7136 		break;
7137 	}
7138 	}
7139 
7140 	if (rval == NDI_SUCCESS)
7141 		rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7142 	ndi_devi_exit(parent, circ);
7143 	return (rval);
7144 }
7145 
7146 static void
7147 aac_handle_dr(struct aac_drinfo *drp)
7148 {
7149 	struct aac_softstate *softs = drp->softs;
7150 	struct aac_device *dvp;
7151 	dev_info_t *dip;
7152 	int valid;
7153 	int circ1 = 0;
7154 
7155 	DBCALLED(softs, 1);
7156 
7157 	/* Hold the nexus across the bus_config */
7158 	mutex_enter(&softs->io_lock);
7159 	dvp = AAC_DEV(softs, drp->tgt);
7160 	valid = AAC_DEV_IS_VALID(dvp);
7161 	dip = dvp->dip;
7162 	mutex_exit(&softs->io_lock);
7163 
7164 	switch (drp->event) {
7165 	case AAC_EVT_ONLINE:
7166 	case AAC_EVT_OFFLINE:
7167 		/* Device onlined */
7168 		if (dip == NULL && valid) {
7169 			ndi_devi_enter(softs->devinfo_p, &circ1);
7170 			(void) aac_config_lun(softs, drp->tgt, 0, NULL);
7171 			AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined",
7172 			    softs->instance, drp->tgt, drp->lun);
7173 			ndi_devi_exit(softs->devinfo_p, circ1);
7174 		}
7175 		/* Device offlined */
7176 		if (dip && !valid) {
7177 			mutex_enter(&softs->io_lock);
7178 			(void) aac_do_reset(softs);
7179 			mutex_exit(&softs->io_lock);
7180 
7181 			(void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7182 			AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined",
7183 			    softs->instance, drp->tgt, drp->lun);
7184 		}
7185 		break;
7186 	}
7187 	kmem_free(drp, sizeof (struct aac_drinfo));
7188 }
7189 
7190 static int
7191 aac_dr_event(struct aac_softstate *softs, int tgt, int lun, int event)
7192 {
7193 	struct aac_drinfo *drp;
7194 
7195 	DBCALLED(softs, 1);
7196 
7197 	if (softs->taskq == NULL ||
7198 	    (drp = kmem_zalloc(sizeof (struct aac_drinfo), KM_NOSLEEP)) == NULL)
7199 		return (AACERR);
7200 
7201 	drp->softs = softs;
7202 	drp->tgt = tgt;
7203 	drp->lun = lun;
7204 	drp->event = event;
7205 	if ((ddi_taskq_dispatch(softs->taskq, (void (*)(void *))aac_handle_dr,
7206 	    drp, DDI_NOSLEEP)) != DDI_SUCCESS) {
7207 		AACDB_PRINT(softs, CE_WARN, "DR task start failed");
7208 		kmem_free(drp, sizeof (struct aac_drinfo));
7209 		return (AACERR);
7210 	}
7211 	return (AACOK);
7212 }
7213 
7214 #ifdef DEBUG
7215 
7216 /* -------------------------debug aid functions-------------------------- */
7217 
7218 #define	AAC_FIB_CMD_KEY_STRINGS \
7219 	TestCommandResponse, "TestCommandResponse", \
7220 	TestAdapterCommand, "TestAdapterCommand", \
7221 	LastTestCommand, "LastTestCommand", \
7222 	ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \
7223 	ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \
7224 	ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \
7225 	ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \
7226 	ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \
7227 	ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \
7228 	ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \
7229 	ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \
7230 	InterfaceShutdown, "InterfaceShutdown", \
7231 	DmaCommandFib, "DmaCommandFib", \
7232 	StartProfile, "StartProfile", \
7233 	TermProfile, "TermProfile", \
7234 	SpeedTest, "SpeedTest", \
7235 	TakeABreakPt, "TakeABreakPt", \
7236 	RequestPerfData, "RequestPerfData", \
7237 	SetInterruptDefTimer, "SetInterruptDefTimer", \
7238 	SetInterruptDefCount, "SetInterruptDefCount", \
7239 	GetInterruptDefStatus, "GetInterruptDefStatus", \
7240 	LastCommCommand, "LastCommCommand", \
7241 	NuFileSystem, "NuFileSystem", \
7242 	UFS, "UFS", \
7243 	HostFileSystem, "HostFileSystem", \
7244 	LastFileSystemCommand, "LastFileSystemCommand", \
7245 	ContainerCommand, "ContainerCommand", \
7246 	ContainerCommand64, "ContainerCommand64", \
7247 	ClusterCommand, "ClusterCommand", \
7248 	ScsiPortCommand, "ScsiPortCommand", \
7249 	ScsiPortCommandU64, "ScsiPortCommandU64", \
7250 	AifRequest, "AifRequest", \
7251 	CheckRevision, "CheckRevision", \
7252 	FsaHostShutdown, "FsaHostShutdown", \
7253 	RequestAdapterInfo, "RequestAdapterInfo", \
7254 	IsAdapterPaused, "IsAdapterPaused", \
7255 	SendHostTime, "SendHostTime", \
7256 	LastMiscCommand, "LastMiscCommand"
7257 
7258 #define	AAC_CTVM_SUBCMD_KEY_STRINGS \
7259 	VM_Null, "VM_Null", \
7260 	VM_NameServe, "VM_NameServe", \
7261 	VM_ContainerConfig, "VM_ContainerConfig", \
7262 	VM_Ioctl, "VM_Ioctl", \
7263 	VM_FilesystemIoctl, "VM_FilesystemIoctl", \
7264 	VM_CloseAll, "VM_CloseAll", \
7265 	VM_CtBlockRead, "VM_CtBlockRead", \
7266 	VM_CtBlockWrite, "VM_CtBlockWrite", \
7267 	VM_SliceBlockRead, "VM_SliceBlockRead", \
7268 	VM_SliceBlockWrite, "VM_SliceBlockWrite", \
7269 	VM_DriveBlockRead, "VM_DriveBlockRead", \
7270 	VM_DriveBlockWrite, "VM_DriveBlockWrite", \
7271 	VM_EnclosureMgt, "VM_EnclosureMgt", \
7272 	VM_Unused, "VM_Unused", \
7273 	VM_CtBlockVerify, "VM_CtBlockVerify", \
7274 	VM_CtPerf, "VM_CtPerf", \
7275 	VM_CtBlockRead64, "VM_CtBlockRead64", \
7276 	VM_CtBlockWrite64, "VM_CtBlockWrite64", \
7277 	VM_CtBlockVerify64, "VM_CtBlockVerify64", \
7278 	VM_CtHostRead64, "VM_CtHostRead64", \
7279 	VM_CtHostWrite64, "VM_CtHostWrite64", \
7280 	VM_NameServe64, "VM_NameServe64"
7281 
7282 #define	AAC_CT_SUBCMD_KEY_STRINGS \
7283 	CT_Null, "CT_Null", \
7284 	CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \
7285 	CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \
7286 	CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \
7287 	CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \
7288 	CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \
7289 	CT_WRITE_MBR, "CT_WRITE_MBR", \
7290 	CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \
7291 	CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \
7292 	CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \
7293 	CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \
7294 	CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \
7295 	CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \
7296 	CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \
7297 	CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \
7298 	CT_READ_MBR, "CT_READ_MBR", \
7299 	CT_READ_PARTITION, "CT_READ_PARTITION", \
7300 	CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \
7301 	CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \
7302 	CT_SLICE_SIZE, "CT_SLICE_SIZE", \
7303 	CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \
7304 	CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \
7305 	CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \
7306 	CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \
7307 	CT_UNMIRROR, "CT_UNMIRROR", \
7308 	CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \
7309 	CT_GEN_MIRROR, "CT_GEN_MIRROR", \
7310 	CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \
7311 	CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \
7312 	CT_MOVE2, "CT_MOVE2", \
7313 	CT_SPLIT, "CT_SPLIT", \
7314 	CT_SPLIT2, "CT_SPLIT2", \
7315 	CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \
7316 	CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \
7317 	CT_RECONFIG, "CT_RECONFIG", \
7318 	CT_BREAK2, "CT_BREAK2", \
7319 	CT_BREAK, "CT_BREAK", \
7320 	CT_MERGE2, "CT_MERGE2", \
7321 	CT_MERGE, "CT_MERGE", \
7322 	CT_FORCE_ERROR, "CT_FORCE_ERROR", \
7323 	CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \
7324 	CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \
7325 	CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \
7326 	CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \
7327 	CT_VOLUME_ADD, "CT_VOLUME_ADD", \
7328 	CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \
7329 	CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \
7330 	CT_COPY_STATUS, "CT_COPY_STATUS", \
7331 	CT_COPY, "CT_COPY", \
7332 	CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \
7333 	CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \
7334 	CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \
7335 	CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \
7336 	CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \
7337 	CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \
7338 	CT_SET, "CT_SET", \
7339 	CT_GET, "CT_GET", \
7340 	CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \
7341 	CT_GET_DELAY, "CT_GET_DELAY", \
7342 	CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \
7343 	CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \
7344 	CT_SCRUB, "CT_SCRUB", \
7345 	CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \
7346 	CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \
7347 	CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \
7348 	CT_PAUSE_IO, "CT_PAUSE_IO", \
7349 	CT_RELEASE_IO, "CT_RELEASE_IO", \
7350 	CT_SCRUB2, "CT_SCRUB2", \
7351 	CT_MCHECK, "CT_MCHECK", \
7352 	CT_CORRUPT, "CT_CORRUPT", \
7353 	CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \
7354 	CT_PROMOTE, "CT_PROMOTE", \
7355 	CT_SET_DEAD, "CT_SET_DEAD", \
7356 	CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \
7357 	CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \
7358 	CT_GET_PARAM, "CT_GET_PARAM", \
7359 	CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \
7360 	CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \
7361 	CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \
7362 	CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \
7363 	CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \
7364 	CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \
7365 	CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \
7366 	CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \
7367 	CT_STOP_DATA, "CT_STOP_DATA", \
7368 	CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \
7369 	CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \
7370 	CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \
7371 	CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \
7372 	CT_GET_TIME, "CT_GET_TIME", \
7373 	CT_READ_DATA, "CT_READ_DATA", \
7374 	CT_CTR, "CT_CTR", \
7375 	CT_CTL, "CT_CTL", \
7376 	CT_DRAINIO, "CT_DRAINIO", \
7377 	CT_RELEASEIO, "CT_RELEASEIO", \
7378 	CT_GET_NVRAM, "CT_GET_NVRAM", \
7379 	CT_GET_MEMORY, "CT_GET_MEMORY", \
7380 	CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \
7381 	CT_ADD_LEVEL, "CT_ADD_LEVEL", \
7382 	CT_NV_ZERO, "CT_NV_ZERO", \
7383 	CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \
7384 	CT_THROTTLE_ON, "CT_THROTTLE_ON", \
7385 	CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \
7386 	CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \
7387 	CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \
7388 	CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \
7389 	CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \
7390 	CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \
7391 	CT_MONITOR, "CT_MONITOR", \
7392 	CT_GEN_MORPH, "CT_GEN_MORPH", \
7393 	CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \
7394 	CT_CACHE_SET, "CT_CACHE_SET", \
7395 	CT_CACHE_STAT, "CT_CACHE_STAT", \
7396 	CT_TRACE_START, "CT_TRACE_START", \
7397 	CT_TRACE_STOP, "CT_TRACE_STOP", \
7398 	CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \
7399 	CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \
7400 	CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \
7401 	CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \
7402 	CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \
7403 	CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \
7404 	CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \
7405 	CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \
7406 	CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \
7407 	CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \
7408 	CT_STOP_DUMPS, "CT_STOP_DUMPS", \
7409 	CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \
7410 	CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \
7411 	CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \
7412 	CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \
7413 	CT_READ_NAME, "CT_READ_NAME", \
7414 	CT_WRITE_NAME, "CT_WRITE_NAME", \
7415 	CT_TOSS_CACHE, "CT_TOSS_CACHE", \
7416 	CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \
7417 	CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \
7418 	CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \
7419 	CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \
7420 	CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \
7421 	CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \
7422 	CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \
7423 	CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \
7424 	CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \
7425 	CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \
7426 	CT_FLUSH, "CT_FLUSH", \
7427 	CT_REBUILD, "CT_REBUILD", \
7428 	CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \
7429 	CT_RESTART, "CT_RESTART", \
7430 	CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \
7431 	CT_TRACE_FLAG, "CT_TRACE_FLAG", \
7432 	CT_RESTART_MORPH, "CT_RESTART_MORPH", \
7433 	CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \
7434 	CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \
7435 	CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \
7436 	CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \
7437 	CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \
7438 	CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \
7439 	CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \
7440 	CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \
7441 	CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \
7442 	CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \
7443 	CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \
7444 	CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \
7445 	CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \
7446 	CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \
7447 	CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \
7448 	CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \
7449 	CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \
7450 	CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \
7451 	CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \
7452 	CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \
7453 	CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \
7454 	CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \
7455 	CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \
7456 	CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \
7457 	CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \
7458 	CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \
7459 	CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \
7460 	CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \
7461 	CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \
7462 	CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \
7463 	CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \
7464 	CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \
7465 	CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \
7466 	CT_IS_CONTAINER_MEATADATA_STANDARD, \
7467 	    "CT_IS_CONTAINER_MEATADATA_STANDARD", \
7468 	CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \
7469 	CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \
7470 	CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \
7471 	CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \
7472 	CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \
7473 	CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \
7474 	CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \
7475 	CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \
7476 	CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \
7477 	CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \
7478 	CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \
7479 	CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \
7480 	CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \
7481 	CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \
7482 	CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \
7483 	CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \
7484 	CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \
7485 	CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \
7486 	CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE"
7487 
7488 #define	AAC_CL_SUBCMD_KEY_STRINGS \
7489 	CL_NULL, "CL_NULL", \
7490 	DS_INIT, "DS_INIT", \
7491 	DS_RESCAN, "DS_RESCAN", \
7492 	DS_CREATE, "DS_CREATE", \
7493 	DS_DELETE, "DS_DELETE", \
7494 	DS_ADD_DISK, "DS_ADD_DISK", \
7495 	DS_REMOVE_DISK, "DS_REMOVE_DISK", \
7496 	DS_MOVE_DISK, "DS_MOVE_DISK", \
7497 	DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \
7498 	DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \
7499 	DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \
7500 	DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \
7501 	DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \
7502 	DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \
7503 	DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \
7504 	DS_GET_DRIVES, "DS_GET_DRIVES", \
7505 	DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \
7506 	DS_ONLINE, "DS_ONLINE", \
7507 	DS_OFFLINE, "DS_OFFLINE", \
7508 	DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \
7509 	DS_FSAPRINT, "DS_FSAPRINT", \
7510 	CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \
7511 	CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \
7512 	CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \
7513 	CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \
7514 	CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \
7515 	CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \
7516 	CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \
7517 	CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \
7518 	CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \
7519 	CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \
7520 	CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \
7521 	CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \
7522 	CC_GET_BUSINFO, "CC_GET_BUSINFO", \
7523 	CC_GET_PORTINFO, "CC_GET_PORTINFO", \
7524 	CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \
7525 	CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \
7526 	CQ_QUORUM_OP, "CQ_QUORUM_OP"
7527 
7528 #define	AAC_AIF_SUBCMD_KEY_STRINGS \
7529 	AifCmdEventNotify, "AifCmdEventNotify", \
7530 	AifCmdJobProgress, "AifCmdJobProgress", \
7531 	AifCmdAPIReport, "AifCmdAPIReport", \
7532 	AifCmdDriverNotify, "AifCmdDriverNotify", \
7533 	AifReqJobList, "AifReqJobList", \
7534 	AifReqJobsForCtr, "AifReqJobsForCtr", \
7535 	AifReqJobsForScsi, "AifReqJobsForScsi", \
7536 	AifReqJobReport, "AifReqJobReport", \
7537 	AifReqTerminateJob, "AifReqTerminateJob", \
7538 	AifReqSuspendJob, "AifReqSuspendJob", \
7539 	AifReqResumeJob, "AifReqResumeJob", \
7540 	AifReqSendAPIReport, "AifReqSendAPIReport", \
7541 	AifReqAPIJobStart, "AifReqAPIJobStart", \
7542 	AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \
7543 	AifReqAPIJobFinish, "AifReqAPIJobFinish"
7544 
7545 #define	AAC_IOCTL_SUBCMD_KEY_STRINGS \
7546 	Reserved_IOCTL, "Reserved_IOCTL", \
7547 	GetDeviceHandle, "GetDeviceHandle", \
7548 	BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \
7549 	DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \
7550 	RescanBus, "RescanBus", \
7551 	GetDeviceProbeInfo, "GetDeviceProbeInfo", \
7552 	GetDeviceCapacity, "GetDeviceCapacity", \
7553 	GetContainerProbeInfo, "GetContainerProbeInfo", \
7554 	GetRequestedMemorySize, "GetRequestedMemorySize", \
7555 	GetBusInfo, "GetBusInfo", \
7556 	GetVendorSpecific, "GetVendorSpecific", \
7557 	EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \
7558 	EnhancedGetBusInfo, "EnhancedGetBusInfo", \
7559 	SetupExtendedCounters, "SetupExtendedCounters", \
7560 	GetPerformanceCounters, "GetPerformanceCounters", \
7561 	ResetPerformanceCounters, "ResetPerformanceCounters", \
7562 	ReadModePage, "ReadModePage", \
7563 	WriteModePage, "WriteModePage", \
7564 	ReadDriveParameter, "ReadDriveParameter", \
7565 	WriteDriveParameter, "WriteDriveParameter", \
7566 	ResetAdapter, "ResetAdapter", \
7567 	ResetBus, "ResetBus", \
7568 	ResetBusDevice, "ResetBusDevice", \
7569 	ExecuteSrb, "ExecuteSrb", \
7570 	Create_IO_Task, "Create_IO_Task", \
7571 	Delete_IO_Task, "Delete_IO_Task", \
7572 	Get_IO_Task_Info, "Get_IO_Task_Info", \
7573 	Check_Task_Progress, "Check_Task_Progress", \
7574 	InjectError, "InjectError", \
7575 	GetDeviceDefectCounts, "GetDeviceDefectCounts", \
7576 	GetDeviceDefectInfo, "GetDeviceDefectInfo", \
7577 	GetDeviceStatus, "GetDeviceStatus", \
7578 	ClearDeviceStatus, "ClearDeviceStatus", \
7579 	DiskSpinControl, "DiskSpinControl", \
7580 	DiskSmartControl, "DiskSmartControl", \
7581 	WriteSame, "WriteSame", \
7582 	ReadWriteLong, "ReadWriteLong", \
7583 	FormatUnit, "FormatUnit", \
7584 	TargetDeviceControl, "TargetDeviceControl", \
7585 	TargetChannelControl, "TargetChannelControl", \
7586 	FlashNewCode, "FlashNewCode", \
7587 	DiskCheck, "DiskCheck", \
7588 	RequestSense, "RequestSense", \
7589 	DiskPERControl, "DiskPERControl", \
7590 	Read10, "Read10", \
7591 	Write10, "Write10"
7592 
7593 #define	AAC_AIFEN_KEY_STRINGS \
7594 	AifEnGeneric, "Generic", \
7595 	AifEnTaskComplete, "TaskComplete", \
7596 	AifEnConfigChange, "Config change", \
7597 	AifEnContainerChange, "Container change", \
7598 	AifEnDeviceFailure, "device failed", \
7599 	AifEnMirrorFailover, "Mirror failover", \
7600 	AifEnContainerEvent, "container event", \
7601 	AifEnFileSystemChange, "File system changed", \
7602 	AifEnConfigPause, "Container pause event", \
7603 	AifEnConfigResume, "Container resume event", \
7604 	AifEnFailoverChange, "Failover space assignment changed", \
7605 	AifEnRAID5RebuildDone, "RAID5 rebuild finished", \
7606 	AifEnEnclosureManagement, "Enclosure management event", \
7607 	AifEnBatteryEvent, "battery event", \
7608 	AifEnAddContainer, "Add container", \
7609 	AifEnDeleteContainer, "Delete container", \
7610 	AifEnSMARTEvent, "SMART Event", \
7611 	AifEnBatteryNeedsRecond, "battery needs reconditioning", \
7612 	AifEnClusterEvent, "cluster event", \
7613 	AifEnDiskSetEvent, "disk set event occured", \
7614 	AifDenMorphComplete, "morph operation completed", \
7615 	AifDenVolumeExtendComplete, "VolumeExtendComplete"
7616 
7617 struct aac_key_strings {
7618 	int key;
7619 	char *message;
7620 };
7621 
7622 extern struct scsi_key_strings scsi_cmds[];
7623 
7624 static struct aac_key_strings aac_fib_cmds[] = {
7625 	AAC_FIB_CMD_KEY_STRINGS,
7626 	-1,			NULL
7627 };
7628 
7629 static struct aac_key_strings aac_ctvm_subcmds[] = {
7630 	AAC_CTVM_SUBCMD_KEY_STRINGS,
7631 	-1,			NULL
7632 };
7633 
7634 static struct aac_key_strings aac_ct_subcmds[] = {
7635 	AAC_CT_SUBCMD_KEY_STRINGS,
7636 	-1,			NULL
7637 };
7638 
7639 static struct aac_key_strings aac_cl_subcmds[] = {
7640 	AAC_CL_SUBCMD_KEY_STRINGS,
7641 	-1,			NULL
7642 };
7643 
7644 static struct aac_key_strings aac_aif_subcmds[] = {
7645 	AAC_AIF_SUBCMD_KEY_STRINGS,
7646 	-1,			NULL
7647 };
7648 
7649 static struct aac_key_strings aac_ioctl_subcmds[] = {
7650 	AAC_IOCTL_SUBCMD_KEY_STRINGS,
7651 	-1,			NULL
7652 };
7653 
7654 static struct aac_key_strings aac_aifens[] = {
7655 	AAC_AIFEN_KEY_STRINGS,
7656 	-1,			NULL
7657 };
7658 
7659 /*
7660  * The following function comes from Adaptec:
7661  *
7662  * Get the firmware print buffer parameters from the firmware,
7663  * if the command was successful map in the address.
7664  */
7665 static int
7666 aac_get_fw_debug_buffer(struct aac_softstate *softs)
7667 {
7668 	if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP,
7669 	    0, 0, 0, 0, NULL) == AACOK) {
7670 		uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1);
7671 		uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2);
7672 		uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3);
7673 		uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4);
7674 
7675 		if (mondrv_buf_size) {
7676 			uint32_t offset = mondrv_buf_paddrl - \
7677 			    softs->pci_mem_base_paddr;
7678 
7679 			/*
7680 			 * See if the address is already mapped in, and
7681 			 * if so set it up from the base address
7682 			 */
7683 			if ((mondrv_buf_paddrh == 0) &&
7684 			    (offset + mondrv_buf_size < softs->map_size)) {
7685 				mutex_enter(&aac_prt_mutex);
7686 				softs->debug_buf_offset = offset;
7687 				softs->debug_header_size = mondrv_hdr_size;
7688 				softs->debug_buf_size = mondrv_buf_size;
7689 				softs->debug_fw_flags = 0;
7690 				softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
7691 				mutex_exit(&aac_prt_mutex);
7692 
7693 				return (AACOK);
7694 			}
7695 		}
7696 	}
7697 	return (AACERR);
7698 }
7699 
7700 int
7701 aac_dbflag_on(struct aac_softstate *softs, int flag)
7702 {
7703 	int debug_flags = softs ? softs->debug_flags : aac_debug_flags;
7704 
7705 	return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \
7706 	    AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag));
7707 }
7708 
7709 static void
7710 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader)
7711 {
7712 	if (noheader) {
7713 		if (sl) {
7714 			aac_fmt[0] = sl;
7715 			cmn_err(lev, aac_fmt, aac_prt_buf);
7716 		} else {
7717 			cmn_err(lev, &aac_fmt[1], aac_prt_buf);
7718 		}
7719 	} else {
7720 		if (sl) {
7721 			aac_fmt_header[0] = sl;
7722 			cmn_err(lev, aac_fmt_header,
7723 			    softs->vendor_name, softs->instance,
7724 			    aac_prt_buf);
7725 		} else {
7726 			cmn_err(lev, &aac_fmt_header[1],
7727 			    softs->vendor_name, softs->instance,
7728 			    aac_prt_buf);
7729 		}
7730 	}
7731 }
7732 
7733 /*
7734  * The following function comes from Adaptec:
7735  *
7736  * Format and print out the data passed in to UART or console
7737  * as specified by debug flags.
7738  */
7739 void
7740 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...)
7741 {
7742 	va_list args;
7743 	char sl; /* system log character */
7744 
7745 	mutex_enter(&aac_prt_mutex);
7746 	/* Set up parameters and call sprintf function to format the data */
7747 	if (strchr("^!?", fmt[0]) == NULL) {
7748 		sl = 0;
7749 	} else {
7750 		sl = fmt[0];
7751 		fmt++;
7752 	}
7753 	va_start(args, fmt);
7754 	(void) vsprintf(aac_prt_buf, fmt, args);
7755 	va_end(args);
7756 
7757 	/* Make sure the softs structure has been passed in for this section */
7758 	if (softs) {
7759 		if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) &&
7760 		    /* If we are set up for a Firmware print */
7761 		    (softs->debug_buf_size)) {
7762 			uint32_t count, i;
7763 
7764 			/* Make sure the string size is within boundaries */
7765 			count = strlen(aac_prt_buf);
7766 			if (count > softs->debug_buf_size)
7767 				count = (uint16_t)softs->debug_buf_size;
7768 
7769 			/*
7770 			 * Wait for no more than AAC_PRINT_TIMEOUT for the
7771 			 * previous message length to clear (the handshake).
7772 			 */
7773 			for (i = 0; i < AAC_PRINT_TIMEOUT; i++) {
7774 				if (!PCI_MEM_GET32(softs,
7775 				    softs->debug_buf_offset + \
7776 				    AAC_FW_DBG_STRLEN_OFFSET))
7777 					break;
7778 
7779 				drv_usecwait(1000);
7780 			}
7781 
7782 			/*
7783 			 * If the length is clear, copy over the message, the
7784 			 * flags, and the length. Make sure the length is the
7785 			 * last because that is the signal for the Firmware to
7786 			 * pick it up.
7787 			 */
7788 			if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \
7789 			    AAC_FW_DBG_STRLEN_OFFSET)) {
7790 				PCI_MEM_REP_PUT8(softs,
7791 				    softs->debug_buf_offset + \
7792 				    softs->debug_header_size,
7793 				    aac_prt_buf, count);
7794 				PCI_MEM_PUT32(softs,
7795 				    softs->debug_buf_offset + \
7796 				    AAC_FW_DBG_FLAGS_OFFSET,
7797 				    softs->debug_fw_flags);
7798 				PCI_MEM_PUT32(softs,
7799 				    softs->debug_buf_offset + \
7800 				    AAC_FW_DBG_STRLEN_OFFSET, count);
7801 			} else {
7802 				cmn_err(CE_WARN, "UART output fail");
7803 				softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
7804 			}
7805 		}
7806 
7807 		/*
7808 		 * If the Kernel Debug Print flag is set, send it off
7809 		 * to the Kernel Debugger
7810 		 */
7811 		if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT)
7812 			aac_cmn_err(softs, lev, sl,
7813 			    (softs->debug_flags & AACDB_FLAGS_NO_HEADERS));
7814 	} else {
7815 		/* Driver not initialized yet, no firmware or header output */
7816 		if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT)
7817 			aac_cmn_err(softs, lev, sl, 1);
7818 	}
7819 	mutex_exit(&aac_prt_mutex);
7820 }
7821 
7822 /*
7823  * Translate command number to description string
7824  */
7825 static char *
7826 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist)
7827 {
7828 	int i;
7829 
7830 	for (i = 0; cmdlist[i].key != -1; i++) {
7831 		if (cmd == cmdlist[i].key)
7832 			return (cmdlist[i].message);
7833 	}
7834 	return (NULL);
7835 }
7836 
7837 static void
7838 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
7839 {
7840 	struct scsi_pkt *pkt = acp->pkt;
7841 	struct scsi_address *ap = &pkt->pkt_address;
7842 	int is_pd = 0;
7843 	int ctl = ddi_get_instance(softs->devinfo_p);
7844 	int tgt = ap->a_target;
7845 	int lun = ap->a_lun;
7846 	union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp;
7847 	uchar_t cmd = cdbp->scc_cmd;
7848 	char *desc;
7849 
7850 	if (tgt >= AAC_MAX_LD) {
7851 		is_pd = 1;
7852 		ctl = ((struct aac_nondasd *)acp->dvp)->bus;
7853 		tgt = ((struct aac_nondasd *)acp->dvp)->tid;
7854 		lun = 0;
7855 	}
7856 
7857 	if ((desc = aac_cmd_name(cmd,
7858 	    (struct aac_key_strings *)scsi_cmds)) == NULL) {
7859 		aac_printf(softs, CE_NOTE,
7860 		    "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s",
7861 		    cmd, ctl, tgt, lun, is_pd ? "(pd)" : "");
7862 		return;
7863 	}
7864 
7865 	switch (cmd) {
7866 	case SCMD_READ:
7867 	case SCMD_WRITE:
7868 		aac_printf(softs, CE_NOTE,
7869 		    "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
7870 		    desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp),
7871 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7872 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7873 		break;
7874 	case SCMD_READ_G1:
7875 	case SCMD_WRITE_G1:
7876 		aac_printf(softs, CE_NOTE,
7877 		    "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
7878 		    desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp),
7879 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7880 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7881 		break;
7882 	case SCMD_READ_G4:
7883 	case SCMD_WRITE_G4:
7884 		aac_printf(softs, CE_NOTE,
7885 		    "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s",
7886 		    desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp),
7887 		    GETG4COUNT(cdbp),
7888 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7889 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7890 		break;
7891 	case SCMD_READ_G5:
7892 	case SCMD_WRITE_G5:
7893 		aac_printf(softs, CE_NOTE,
7894 		    "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
7895 		    desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp),
7896 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7897 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7898 		break;
7899 	default:
7900 		aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s",
7901 		    desc, ctl, tgt, lun, is_pd ? "(pd)" : "");
7902 	}
7903 }
7904 
7905 void
7906 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp)
7907 {
7908 	struct aac_cmd *acp = slotp->acp;
7909 	struct aac_fib *fibp = slotp->fibp;
7910 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
7911 	uint16_t fib_size;
7912 	uint32_t fib_cmd, sub_cmd;
7913 	char *cmdstr, *subcmdstr;
7914 	char *caller;
7915 	int i;
7916 
7917 	if (acp) {
7918 		if (!(softs->debug_fib_flags & acp->fib_flags))
7919 			return;
7920 		if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD)
7921 			caller = "SCMD";
7922 		else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL)
7923 			caller = "IOCTL";
7924 		else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB)
7925 			caller = "SRB";
7926 		else
7927 			return;
7928 	} else {
7929 		if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC))
7930 			return;
7931 		caller = "SYNC";
7932 	}
7933 
7934 	fib_cmd = ddi_get16(acc, &fibp->Header.Command);
7935 	cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds);
7936 	sub_cmd = (uint32_t)-1;
7937 	subcmdstr = NULL;
7938 
7939 	/* Print FIB header */
7940 	if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) {
7941 		aac_printf(softs, CE_NOTE, "FIB> from %s", caller);
7942 		aac_printf(softs, CE_NOTE, "     XferState  %d",
7943 		    ddi_get32(acc, &fibp->Header.XferState));
7944 		aac_printf(softs, CE_NOTE, "     Command    %d",
7945 		    ddi_get16(acc, &fibp->Header.Command));
7946 		aac_printf(softs, CE_NOTE, "     StructType %d",
7947 		    ddi_get8(acc, &fibp->Header.StructType));
7948 		aac_printf(softs, CE_NOTE, "     Flags      0x%x",
7949 		    ddi_get8(acc, &fibp->Header.Flags));
7950 		aac_printf(softs, CE_NOTE, "     Size       %d",
7951 		    ddi_get16(acc, &fibp->Header.Size));
7952 		aac_printf(softs, CE_NOTE, "     SenderSize %d",
7953 		    ddi_get16(acc, &fibp->Header.SenderSize));
7954 		aac_printf(softs, CE_NOTE, "     SenderAddr 0x%x",
7955 		    ddi_get32(acc, &fibp->Header.SenderFibAddress));
7956 		aac_printf(softs, CE_NOTE, "     RcvrAddr   0x%x",
7957 		    ddi_get32(acc, &fibp->Header.ReceiverFibAddress));
7958 		aac_printf(softs, CE_NOTE, "     SenderData 0x%x",
7959 		    ddi_get32(acc, &fibp->Header.SenderData));
7960 	}
7961 
7962 	/* Print FIB data */
7963 	switch (fib_cmd) {
7964 	case ContainerCommand:
7965 		sub_cmd = ddi_get32(acc,
7966 		    (void *)&(((uint32_t *)(void *)&fibp->data[0])[0]));
7967 		subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds);
7968 		if (subcmdstr == NULL)
7969 			break;
7970 
7971 		switch (sub_cmd) {
7972 		case VM_ContainerConfig: {
7973 			struct aac_Container *pContainer =
7974 			    (struct aac_Container *)fibp->data;
7975 
7976 			fib_cmd = sub_cmd;
7977 			cmdstr = subcmdstr;
7978 			sub_cmd = (uint32_t)-1;
7979 			subcmdstr = NULL;
7980 
7981 			sub_cmd = ddi_get32(acc,
7982 			    &pContainer->CTCommand.command);
7983 			subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds);
7984 			if (subcmdstr == NULL)
7985 				break;
7986 			aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)",
7987 			    subcmdstr,
7988 			    ddi_get32(acc, &pContainer->CTCommand.param[0]),
7989 			    ddi_get32(acc, &pContainer->CTCommand.param[1]),
7990 			    ddi_get32(acc, &pContainer->CTCommand.param[2]));
7991 			return;
7992 		}
7993 
7994 		case VM_Ioctl:
7995 			fib_cmd = sub_cmd;
7996 			cmdstr = subcmdstr;
7997 			sub_cmd = (uint32_t)-1;
7998 			subcmdstr = NULL;
7999 
8000 			sub_cmd = ddi_get32(acc,
8001 			    (void *)&(((uint32_t *)(void *)&fibp->data[0])[4]));
8002 			subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds);
8003 			break;
8004 
8005 		case VM_CtBlockRead:
8006 		case VM_CtBlockWrite: {
8007 			struct aac_blockread *br =
8008 			    (struct aac_blockread *)fibp->data;
8009 			struct aac_sg_table *sg = &br->SgMap;
8010 			uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8011 
8012 			aac_printf(softs, CE_NOTE,
8013 			    "FIB> %s Container %d  0x%x/%d", subcmdstr,
8014 			    ddi_get32(acc, &br->ContainerId),
8015 			    ddi_get32(acc, &br->BlockNumber),
8016 			    ddi_get32(acc, &br->ByteCount));
8017 			for (i = 0; i < sgcount; i++)
8018 				aac_printf(softs, CE_NOTE,
8019 				    "     %d: 0x%08x/%d", i,
8020 				    ddi_get32(acc, &sg->SgEntry[i].SgAddress),
8021 				    ddi_get32(acc, &sg->SgEntry[i]. \
8022 				    SgByteCount));
8023 			return;
8024 		}
8025 		}
8026 		break;
8027 
8028 	case ContainerCommand64: {
8029 		struct aac_blockread64 *br =
8030 		    (struct aac_blockread64 *)fibp->data;
8031 		struct aac_sg_table64 *sg = &br->SgMap64;
8032 		uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8033 		uint64_t sgaddr;
8034 
8035 		sub_cmd = br->Command;
8036 		subcmdstr = NULL;
8037 		if (sub_cmd == VM_CtHostRead64)
8038 			subcmdstr = "VM_CtHostRead64";
8039 		else if (sub_cmd == VM_CtHostWrite64)
8040 			subcmdstr = "VM_CtHostWrite64";
8041 		else
8042 			break;
8043 
8044 		aac_printf(softs, CE_NOTE,
8045 		    "FIB> %s Container %d  0x%x/%d", subcmdstr,
8046 		    ddi_get16(acc, &br->ContainerId),
8047 		    ddi_get32(acc, &br->BlockNumber),
8048 		    ddi_get16(acc, &br->SectorCount));
8049 		for (i = 0; i < sgcount; i++) {
8050 			sgaddr = ddi_get64(acc,
8051 			    &sg->SgEntry64[i].SgAddress);
8052 			aac_printf(softs, CE_NOTE,
8053 			    "     %d: 0x%08x.%08x/%d", i,
8054 			    AAC_MS32(sgaddr), AAC_LS32(sgaddr),
8055 			    ddi_get32(acc, &sg->SgEntry64[i]. \
8056 			    SgByteCount));
8057 		}
8058 		return;
8059 	}
8060 
8061 	case RawIo: {
8062 		struct aac_raw_io *io = (struct aac_raw_io *)fibp->data;
8063 		struct aac_sg_tableraw *sg = &io->SgMapRaw;
8064 		uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8065 		uint64_t sgaddr;
8066 
8067 		aac_printf(softs, CE_NOTE,
8068 		    "FIB> RawIo Container %d  0x%llx/%d 0x%x",
8069 		    ddi_get16(acc, &io->ContainerId),
8070 		    ddi_get64(acc, &io->BlockNumber),
8071 		    ddi_get32(acc, &io->ByteCount),
8072 		    ddi_get16(acc, &io->Flags));
8073 		for (i = 0; i < sgcount; i++) {
8074 			sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress);
8075 			aac_printf(softs, CE_NOTE, "     %d: 0x%08x.%08x/%d", i,
8076 			    AAC_MS32(sgaddr), AAC_LS32(sgaddr),
8077 			    ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount));
8078 		}
8079 		return;
8080 	}
8081 
8082 	case ClusterCommand:
8083 		sub_cmd = ddi_get32(acc,
8084 		    (void *)&(((uint32_t *)(void *)fibp->data)[0]));
8085 		subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds);
8086 		break;
8087 
8088 	case AifRequest:
8089 		sub_cmd = ddi_get32(acc,
8090 		    (void *)&(((uint32_t *)(void *)fibp->data)[0]));
8091 		subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds);
8092 		break;
8093 
8094 	default:
8095 		break;
8096 	}
8097 
8098 	fib_size = ddi_get16(acc, &(fibp->Header.Size));
8099 	if (subcmdstr)
8100 		aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
8101 		    subcmdstr, fib_size);
8102 	else if (cmdstr && sub_cmd == (uint32_t)-1)
8103 		aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
8104 		    cmdstr, fib_size);
8105 	else if (cmdstr)
8106 		aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d",
8107 		    cmdstr, sub_cmd, fib_size);
8108 	else
8109 		aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d",
8110 		    fib_cmd, fib_size);
8111 }
8112 
8113 static void
8114 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif)
8115 {
8116 	int aif_command;
8117 	uint32_t aif_seqnumber;
8118 	int aif_en_type;
8119 	char *str;
8120 
8121 	aif_command = LE_32(aif->command);
8122 	aif_seqnumber = LE_32(aif->seqNumber);
8123 	aif_en_type = LE_32(aif->data.EN.type);
8124 
8125 	switch (aif_command) {
8126 	case AifCmdEventNotify:
8127 		str = aac_cmd_name(aif_en_type, aac_aifens);
8128 		if (str)
8129 			aac_printf(softs, CE_NOTE, "AIF! %s", str);
8130 		else
8131 			aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)",
8132 			    aif_en_type);
8133 		break;
8134 
8135 	case AifCmdJobProgress:
8136 		switch (LE_32(aif->data.PR[0].status)) {
8137 		case AifJobStsSuccess:
8138 			str = "success"; break;
8139 		case AifJobStsFinished:
8140 			str = "finished"; break;
8141 		case AifJobStsAborted:
8142 			str = "aborted"; break;
8143 		case AifJobStsFailed:
8144 			str = "failed"; break;
8145 		case AifJobStsSuspended:
8146 			str = "suspended"; break;
8147 		case AifJobStsRunning:
8148 			str = "running"; break;
8149 		default:
8150 			str = "unknown"; break;
8151 		}
8152 		aac_printf(softs, CE_NOTE,
8153 		    "AIF! JobProgress (%d) - %s (%d, %d)",
8154 		    aif_seqnumber, str,
8155 		    LE_32(aif->data.PR[0].currentTick),
8156 		    LE_32(aif->data.PR[0].finalTick));
8157 		break;
8158 
8159 	case AifCmdAPIReport:
8160 		aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)",
8161 		    aif_seqnumber);
8162 		break;
8163 
8164 	case AifCmdDriverNotify:
8165 		aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)",
8166 		    aif_seqnumber);
8167 		break;
8168 
8169 	default:
8170 		aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)",
8171 		    aif_command, aif_seqnumber);
8172 		break;
8173 	}
8174 }
8175 
8176 #endif /* DEBUG */
8177