xref: /illumos-gate/usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c (revision d656abb5804319b33c85955a73ee450ef7ff9739)
1 /*
2  *       O.S   : Solaris
3  *  FILE NAME  : arcmsr.c
4  *       BY    : Erich Chen
5  *  Description: SCSI RAID Device Driver for
6  *               ARECA RAID Host adapter
7  *
8  *  Copyright (C) 2002,2007 Areca Technology Corporation All rights reserved.
9  *  Copyright (C) 2002,2007 Erich Chen
10  *	    Web site: www.areca.com.tw
11  *	      E-mail: erich@areca.com.tw
12  *
13  *	Redistribution and use in source and binary forms, with or without
14  *	modification, are permitted provided that the following conditions
15  *	are met:
16  *	1. Redistributions of source code must retain the above copyright
17  *	   notice, this list of conditions and the following disclaimer.
18  *	2. Redistributions in binary form must reproduce the above copyright
19  *	   notice, this list of conditions and the following disclaimer in the
20  *	   documentation and/or other materials provided with the distribution.
21  *  3. The party using or redistributing the source code and binary forms
22  *     agrees to the disclaimer below and the terms and conditions set forth
23  *     herein.
24  *
25  *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  *  ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  *  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  *  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  *  OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  *  HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  *  OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  *  SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
40  * Use is subject to license terms.
41  */
42 
43 #include <sys/types.h>
44 #include <sys/ddidmareq.h>
45 #include <sys/scsi/scsi.h>
46 #include <sys/ddi.h>
47 #include <sys/sunddi.h>
48 #include <sys/file.h>
49 #include <sys/disp.h>
50 #include <sys/signal.h>
51 #include <sys/debug.h>
52 #include <sys/pci.h>
53 #include <sys/policy.h>
54 #include <sys/atomic.h>
55 
56 #include "arcmsr.h"
57 
58 static int arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd);
59 static int arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg,
60     int mode, cred_t *credp, int *rvalp);
61 static int arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd);
62 static int arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd);
63 static int arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
64 static int arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
65 static int arcmsr_tran_reset(struct scsi_address *ap, int level);
66 static int arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
67 static int arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
68     int whom);
69 static int arcmsr_tran_tgt_init(dev_info_t *host_dev_info,
70     dev_info_t *target_dev_info, scsi_hba_tran_t *hosttran,
71     struct scsi_device *sd);
72 static void arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
73 static void arcmsr_tran_destroy_pkt(struct scsi_address *ap,
74     struct scsi_pkt *pkt);
75 static void arcmsr_tran_sync_pkt(struct scsi_address *ap,
76     struct scsi_pkt *pkt);
77 static struct scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
78     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
79     int tgtlen, int flags, int (*callback)(), caddr_t arg);
80 
81 static int arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun,
82     dev_info_t **ldip);
83 static uint_t arcmsr_interrupt(caddr_t arg);
84 static int arcmsr_initialize(struct ACB *acb);
85 static int arcmsr_dma_alloc(struct ACB *acb,
86     struct scsi_pkt *pkt, struct buf *bp, int flags, int (*callback)());
87 static int arcmsr_dma_move(struct ACB *acb,
88     struct scsi_pkt *pkt, struct buf *bp);
89 static void arcmsr_pcidev_disattach(struct ACB *acb);
90 static void arcmsr_ccb_complete(struct CCB *ccb, int flag);
91 static void arcmsr_iop_init(struct ACB *acb);
92 static void arcmsr_iop_parking(struct ACB *acb);
93 static void arcmsr_log(struct ACB *acb, int level, char *fmt, ...);
94 static struct CCB *arcmsr_get_freeccb(struct ACB *acb);
95 static void arcmsr_flush_hba_cache(struct ACB *acb);
96 static void arcmsr_flush_hbb_cache(struct ACB *acb);
97 static void arcmsr_stop_hba_bgrb(struct ACB *acb);
98 static void arcmsr_stop_hbb_bgrb(struct ACB *acb);
99 static void arcmsr_start_hba_bgrb(struct ACB *acb);
100 static void arcmsr_start_hba_bgrb(struct ACB *acb);
101 static void arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
102 static void arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
103 static void arcmsr_build_ccb(struct CCB *ccb);
104 static int arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
105     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
106 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
107 static dev_info_t *arcmsr_find_child(struct ACB *acb, uint16_t tgt,
108     uint8_t lun);
109 
110 static struct ACB *ArcMSRHBA[ARCMSR_MAX_ADAPTER];
111 static int arcmsr_hba_count;
112 static void *arcmsr_soft_state = NULL;
113 static kmutex_t arcmsr_global_mutex;
114 
115 #define	MSR_MINOR	32
116 #define	INST2MSR(x)	(((x) << INST_MINOR_SHIFT) | MSR_MINOR)
117 
118 static ddi_dma_attr_t arcmsr_dma_attr = {
119 	DMA_ATTR_V0,		/* ddi_dma_attr version */
120 	0,			/* low DMA address range */
121 	0xffffffff,		/* high DMA address range */
122 	0x00ffffff,		/* DMA counter counter upper bound */
123 	1,			/* DMA address alignment requirements */
124 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* burst sizes */
125 	1,			/* minimum effective DMA size */
126 	ARCMSR_MAX_XFER_LEN,	/* maximum DMA xfer size */
127 	/*
128 	 * The dma_attr_seg field supplies the limit of each Scatter/Gather
129 	 * list element's "address+length". The Intel IOP331 can not use
130 	 * segments over the 4G boundary due to segment boundary restrictions
131 	 */
132 	0x00ffffff,
133 	ARCMSR_MAX_SG_ENTRIES,	/* scatter/gather list count */
134 	1, 			/* device granularity */
135 	DDI_DMA_FORCE_PHYSICAL	/* Bus specific DMA flags */
136 };
137 
138 static ddi_dma_attr_t arcmsr_ccb_attr = {
139 	DMA_ATTR_V0,	/* ddi_dma_attr version */
140 	0,		/* low DMA address range */
141 	0xffffffff,	/* high DMA address range */
142 	0x00ffffff,	/* DMA counter counter upper bound */
143 	1,		/* default byte alignment */
144 	DEFAULT_BURSTSIZE | BURST32 | BURST64,   /* burst sizes */
145 	1,		/* minimum effective DMA size */
146 	0xffffffff,	/* maximum DMA xfer size */
147 	0x00ffffff,	/* max segment size, segment boundary restrictions */
148 	1,		/* scatter/gather list count */
149 	1,		/* device granularity */
150 	DDI_DMA_FORCE_PHYSICAL	/* Bus specific DMA flags */
151 };
152 
153 static struct cb_ops arcmsr_cb_ops = {
154 	scsi_hba_open,		/* open(9E) */
155 	scsi_hba_close,		/* close(9E) */
156 	nodev,			/* strategy(9E), returns ENXIO */
157 	nodev,			/* print(9E) */
158 	nodev,			/* dump(9E) Cannot be used as a dump device */
159 	nodev,			/* read(9E) */
160 	nodev,			/* write(9E) */
161 	arcmsr_cb_ioctl,	/* ioctl(9E) */
162 	nodev,			/* devmap(9E) */
163 	nodev,			/* mmap(9E) */
164 	nodev,			/* segmap(9E) */
165 	NULL,			/* chpoll(9E) returns ENXIO */
166 	nodev,			/* prop_op(9E) */
167 	NULL,			/* streamtab(9S) */
168 #ifdef _LP64
169 	/*
170 	 * cb_ops cb_flag:
171 	 *	D_NEW | D_MP	compatibility flags, see conf.h
172 	 *	D_MP 		flag indicates that the driver is safe for
173 	 *			multi-threaded operation
174 	 *	D_64BIT		flag driver properly handles 64-bit offsets
175 	 */
176 	D_HOTPLUG | D_MP | D_64BIT,
177 #else
178 	D_HOTPLUG | D_MP,
179 #endif
180 	CB_REV,
181 	nodev,			/* aread(9E) */
182 	nodev			/* awrite(9E) */
183 };
184 
185 static struct dev_ops arcmsr_ops = {
186 	DEVO_REV,		/* devo_rev */
187 	0,			/* reference count */
188 	nodev,			/* getinfo */
189 	nulldev,		/* identify */
190 	nulldev,		/* probe */
191 	arcmsr_attach,		/* attach */
192 	arcmsr_detach,		/* detach */
193 	arcmsr_reset,		/* reset, shutdown, reboot notify */
194 	&arcmsr_cb_ops,		/* driver operations */
195 	NULL,			/* bus operations */
196 	nulldev			/* power */
197 };
198 
199 char _depends_on[] = "misc/scsi";
200 
201 static struct modldrv arcmsr_modldrv = {
202 	&mod_driverops, 	/* Type of module. This is a driver. */
203 	ARCMSR_DRIVER_VERSION,  /* module name, from arcmsr.h */
204 	&arcmsr_ops,		/* driver ops */
205 };
206 
207 static struct modlinkage arcmsr_modlinkage = {
208 	MODREV_1,
209 	&arcmsr_modldrv,
210 	NULL
211 };
212 
213 
214 int
215 _init(void) {
216 	int ret;
217 
218 
219 	mutex_init(&arcmsr_global_mutex, "arcmsr global mutex",
220 	    MUTEX_DRIVER, NULL);
221 	ret = ddi_soft_state_init(&arcmsr_soft_state,
222 	    sizeof (struct ACB), ARCMSR_MAX_ADAPTER);
223 	if (ret != 0) {
224 		return (ret);
225 	}
226 	if ((ret = scsi_hba_init(&arcmsr_modlinkage)) != 0) {
227 		ddi_soft_state_fini(&arcmsr_soft_state);
228 		return (ret);
229 	}
230 
231 	if ((ret = mod_install(&arcmsr_modlinkage)) != 0) {
232 		mutex_destroy(&arcmsr_global_mutex);
233 		scsi_hba_fini(&arcmsr_modlinkage);
234 		if (arcmsr_soft_state != NULL) {
235 			ddi_soft_state_fini(&arcmsr_soft_state);
236 		}
237 	}
238 	return (ret);
239 }
240 
241 
242 int
243 _fini(void) {
244 	int ret;
245 
246 	ret = mod_remove(&arcmsr_modlinkage);
247 	if (ret == 0) {
248 		/* if ret = 0 , said driver can remove */
249 		mutex_destroy(&arcmsr_global_mutex);
250 		scsi_hba_fini(&arcmsr_modlinkage);
251 		if (arcmsr_soft_state != NULL) {
252 			ddi_soft_state_fini(&arcmsr_soft_state);
253 		}
254 	}
255 	return (ret);
256 }
257 
258 
259 int
260 _info(struct modinfo *modinfop) {
261 	return (mod_info(&arcmsr_modlinkage, modinfop));
262 }
263 
264 
265 
266 #if defined(ARCMSR_DEBUG)
267 static void
268 arcmsr_dump_scsi_cdb(struct scsi_address *ap, struct scsi_pkt *pkt) {
269 
270 	static char hex[] = "0123456789abcdef";
271 	struct ACB *acb =
272 	    (struct ACB *)ap->a_hba_tran->tran_hba_private;
273 	struct CCB *ccb =
274 	    (struct CCB *)pkt->pkt_ha_private;
275 	uint8_t	*cdb = pkt->pkt_cdbp;
276 	char buf [256];
277 	char *p;
278 	int i;
279 
280 
281 	(void) sprintf(buf, "arcmsr%d: sgcount=%d <%d, %d> "
282 	    "cdb ",
283 	    ddi_get_instance(acb->dev_info), ccb->arcmsr_cdb.sgcount,
284 	    ap->a_target, ap->a_lun);
285 
286 	p = buf + strlen(buf);
287 	*p++ = '[';
288 
289 	for (i = 0; i < ccb->arcmsr_cdb.CdbLength; i++, cdb++) {
290 		if (i != 0) {
291 			*p++ = ' ';
292 		}
293 		*p++ = hex[(*cdb >> 4) & 0x0f];
294 		*p++ = hex[*cdb & 0x0f];
295 	}
296 	*p++ = ']';
297 	*p++ = '.';
298 	*p = 0;
299 	cmn_err(CE_CONT, buf);
300 }
301 #endif  /* ARCMSR_DEBUG */
302 
303 static void
304 arcmsr_devmap_req_timeout(void* arg) {
305 
306 	struct ACB *acb = (struct ACB *)arg;
307 	switch (acb->adapter_type) {
308 	    case ACB_ADAPTER_TYPE_A:
309 	    {
310 		    struct HBA_msgUnit *phbamu;
311 
312 		    phbamu = (struct HBA_msgUnit *)acb->pmu;
313 		    CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
314 			&phbamu->inbound_msgaddr0,
315 			ARCMSR_INBOUND_MESG0_GET_CONFIG);
316 	    }
317 	    break;
318 	    case ACB_ADAPTER_TYPE_B:
319 	    {
320 		    struct HBB_msgUnit *phbbmu;
321 		    phbbmu = (struct HBB_msgUnit *)acb->pmu;
322 		    CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
323 			&phbbmu->hbb_doorbell->drv2iop_doorbell,
324 			ARCMSR_MESSAGE_GET_CONFIG);
325 	    }
326 	    break;
327 	}
328 
329 	if ((acb->timeout_id != 0) &&
330 	    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
331 		/* do pkt timeout check each 5 secs */
332 		acb->timeout_id = timeout(arcmsr_devmap_req_timeout,
333 		    (void*)acb, (5 * drv_usectohz(1000000)));
334 	}
335 }
336 
337 
338 static void
339 arcmsr_ccbs_timeout(void* arg) {
340 
341 	struct ACB *acb = (struct ACB *)arg;
342 	struct CCB *ccb;
343 	int i;
344 	int current_time = ddi_get_time();
345 
346 
347 	if (acb->ccboutstandingcount != 0) {
348 		/* check each ccb */
349 		i = ddi_dma_sync(acb->ccbs_pool_handle, 0,
350 		    acb->dma_sync_size, DDI_DMA_SYNC_FORKERNEL);
351 		if (i != DDI_SUCCESS) {
352 			if ((acb->timeout_id != 0) &&
353 			    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
354 				/* do pkt timeout check each 60 secs */
355 				acb->timeout_id = timeout(arcmsr_ccbs_timeout,
356 				    (void*)acb,
357 				    (60 * drv_usectohz(1000000)));
358 			}
359 			return;
360 		}
361 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
362 			ccb = acb->pccb_pool[i];
363 			if (ccb->acb != acb) {
364 				break;
365 			}
366 			if (ccb->startdone == ARCMSR_CCB_DONE) {
367 				continue;
368 			}
369 			if (ccb->pkt == NULL) {
370 				continue;
371 			}
372 			if (ccb->pkt->pkt_time == 0) {
373 				continue;
374 			}
375 			if ((int)ccb->ccb_time >= current_time) {
376 				continue;
377 			}
378 			if (ccb->startdone == ARCMSR_CCB_START) {
379 				int id = ccb->pkt->pkt_address.a_target;
380 				int lun = ccb->pkt->pkt_address.a_lun;
381 
382 				/*
383 				 * handle outstanding command of timeout ccb
384 				 */
385 				ccb->pkt->pkt_reason = CMD_TIMEOUT;
386 				ccb->pkt->pkt_statistics = STAT_TIMEOUT;
387 
388 				cmn_err(CE_CONT,
389 				    "arcmsr%d: scsi target %d lun %d "
390 				    "outstanding command timeout",
391 				    ddi_get_instance(acb->dev_info),
392 				    id, lun);
393 				cmn_err(CE_CONT,
394 				    "arcmsr%d: scsi target %d lun %d "
395 				    "fatal error on target, device is gone",
396 				    ddi_get_instance(acb->dev_info),
397 				    id, lun);
398 				acb->devstate[id][lun] = ARECA_RAID_GONE;
399 				arcmsr_ccb_complete(ccb, 1);
400 				acb->timeout_count++;
401 				continue;
402 			}
403 			ccb->ccb_time = (time_t)(ccb->pkt->pkt_time +
404 			    current_time); /* adjust ccb_time of pending ccb */
405 		}
406 	}
407 	if ((acb->timeout_id != 0) &&
408 	    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
409 		/* do pkt timeout check each 60 secs */
410 		acb->timeout_id = timeout(arcmsr_ccbs_timeout,
411 		    (void*)acb, (60 * drv_usectohz(1000000)));
412 	}
413 }
414 
415 
416 static uint32_t
417 arcmsr_disable_allintr(struct ACB *acb) {
418 
419 	uint32_t intmask_org;
420 
421 	switch (acb->adapter_type) {
422 	case ACB_ADAPTER_TYPE_A: {
423 		struct HBA_msgUnit *phbamu =
424 		    (struct HBA_msgUnit *)acb->pmu;
425 
426 		/* disable all outbound interrupt */
427 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
428 		    &phbamu->outbound_intmask);
429 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
430 		    &phbamu->outbound_intmask,
431 		    intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
432 		}
433 		break;
434 	case ACB_ADAPTER_TYPE_B: {
435 		struct HBB_msgUnit *phbbmu =
436 		    (struct HBB_msgUnit *)acb->pmu;
437 
438 		/* disable all outbound interrupt */
439 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
440 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask);
441 		/* disable all interrupts */
442 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
443 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask, 0);
444 		}
445 		break;
446 	}
447 	return (intmask_org);
448 }
449 
450 
451 static void
452 arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org) {
453 
454 	int mask;
455 
456 	switch (acb->adapter_type) {
457 	case ACB_ADAPTER_TYPE_A: {
458 		struct HBA_msgUnit *phbamu =
459 		    (struct HBA_msgUnit *)acb->pmu;
460 
461 		/*
462 		 * enable outbound Post Queue, outbound doorbell message0
463 		 * Interrupt
464 		 */
465 		mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
466 		    ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE |
467 		    ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
468 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
469 		    &phbamu->outbound_intmask, intmask_org & mask);
470 		acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
471 		}
472 		break;
473 	case ACB_ADAPTER_TYPE_B: {
474 		struct HBB_msgUnit *phbbmu =
475 		    (struct HBB_msgUnit *)acb->pmu;
476 
477 		mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK |
478 		    ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE |
479 		    ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
480 		/* 1=interrupt enable, 0=interrupt disable */
481 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
482 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask,
483 		    intmask_org | mask);
484 		acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
485 		}
486 		break;
487 	}
488 }
489 
490 
491 static void
492 arcmsr_iop_parking(struct ACB *acb) {
493 
494 	if (acb != NULL) {
495 		/* stop adapter background rebuild */
496 		if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
497 			uint32_t intmask_org;
498 
499 			acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
500 			/* disable all outbound interrupt */
501 			intmask_org = arcmsr_disable_allintr(acb);
502 			if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
503 				arcmsr_stop_hba_bgrb(acb);
504 				arcmsr_flush_hba_cache(acb);
505 			} else {
506 				arcmsr_stop_hbb_bgrb(acb);
507 				arcmsr_flush_hbb_cache(acb);
508 			}
509 			/*
510 			 * enable outbound Post Queue
511 			 * enable outbound doorbell Interrupt
512 			 */
513 			arcmsr_enable_allintr(acb, intmask_org);
514 		}
515 	}
516 }
517 
518 
519 
520 static int
521 arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd) {
522 
523 	struct ACB *acb;
524 	scsi_hba_tran_t *scsi_hba_transport;
525 
526 	scsi_hba_transport = (scsi_hba_tran_t *)
527 	    ddi_get_driver_private(resetdev);
528 
529 	if (!scsi_hba_transport)
530 		return (DDI_FAILURE);
531 
532 	acb = (struct ACB *)
533 	    scsi_hba_transport->tran_hba_private;
534 
535 	if (!acb)
536 		return (DDI_FAILURE);
537 
538 	if ((cmd == RESET_LUN) ||
539 	    (cmd == RESET_BUS) ||
540 	    (cmd == RESET_TARGET))
541 		arcmsr_log(NULL, CE_WARN,
542 		    "arcmsr%d: reset op (%d) not supported",
543 		    ddi_get_instance(resetdev), cmd);
544 
545 	arcmsr_pcidev_disattach(acb);
546 
547 	return (DDI_SUCCESS);
548 }
549 
550 static int
551 arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance) {
552 
553 	scsi_hba_tran_t *hba_trans;
554 	ddi_device_acc_attr_t dev_acc_attr;
555 	struct ACB *acb;
556 	static char buf[256];
557 	uint16_t wval;
558 	int raid6 = 1;
559 	char *type;
560 
561 	/*
562 	 * Soft State Structure
563 	 * The driver should allocate the per-device-instance
564 	 * soft state structure, being careful to clean up properly if
565 	 * an error occurs. Allocate data structure.
566 	 */
567 	if (ddi_soft_state_zalloc(arcmsr_soft_state, instance)
568 	    != DDI_SUCCESS) {
569 		arcmsr_log(NULL, CE_WARN,
570 		    "arcmsr%d: ddi_soft_state_zalloc failed",
571 		    instance);
572 		return (DDI_FAILURE);
573 	}
574 
575 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
576 	if (acb == NULL) {
577 		arcmsr_log(NULL, CE_WARN,
578 		    "arcmsr%d: ddi_get_soft_state failed",
579 		    instance);
580 		goto error_level_1;
581 	}
582 
583 	/* acb is already zalloc()d so we don't need to bzero() it */
584 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
585 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
586 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
587 
588 	acb->dev_info = dev_info;
589 	acb->dev_acc_attr = dev_acc_attr;
590 
591 	/*
592 	 * The driver, if providing DMA, should also check that its hardware is
593 	 * installed in a DMA-capable slot
594 	 */
595 	if (ddi_slaveonly(dev_info) == DDI_SUCCESS) {
596 		arcmsr_log(NULL, CE_WARN,
597 		    "arcmsr%d: hardware is not installed in a "
598 		    "DMA-capable slot",
599 		    instance);
600 		goto error_level_0;
601 	}
602 	/* We do not support adapter drivers with high-level interrupts */
603 	if (ddi_intr_hilevel(dev_info, 0) != 0) {
604 		arcmsr_log(NULL, CE_WARN,
605 		    "arcmsr%d: high-level interrupt not supported",
606 		    instance);
607 		goto error_level_0;
608 	}
609 
610 	if (pci_config_setup(dev_info, &acb->pci_acc_handle)
611 	    != DDI_SUCCESS) {
612 		arcmsr_log(NULL, CE_NOTE,
613 		    "arcmsr%d: pci_config_setup() failed, attach failed",
614 		    instance);
615 		return (DDI_PROBE_FAILURE);
616 	}
617 
618 	wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_VENID);
619 	if (wval != PCI_VENDOR_ID_ARECA) {
620 		arcmsr_log(NULL, CE_NOTE,
621 		    "arcmsr%d: failing attach: 'vendorid (0x%04x) "
622 		    "does not match 0x%04x (PCI_VENDOR_ID_ARECA)\n",
623 		    instance, wval, PCI_VENDOR_ID_ARECA);
624 		return (DDI_PROBE_FAILURE);
625 	}
626 
627 	wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID);
628 	switch (wval) {
629 	case PCI_DEVICE_ID_ARECA_1110:
630 	case PCI_DEVICE_ID_ARECA_1210:
631 	case PCI_DEVICE_ID_ARECA_1201:
632 		raid6 = 0;
633 		/*FALLTHRU*/
634 	case PCI_DEVICE_ID_ARECA_1120:
635 	case PCI_DEVICE_ID_ARECA_1130:
636 	case PCI_DEVICE_ID_ARECA_1160:
637 	case PCI_DEVICE_ID_ARECA_1170:
638 	case PCI_DEVICE_ID_ARECA_1220:
639 	case PCI_DEVICE_ID_ARECA_1230:
640 	case PCI_DEVICE_ID_ARECA_1260:
641 	case PCI_DEVICE_ID_ARECA_1270:
642 	case PCI_DEVICE_ID_ARECA_1280:
643 		type = "SATA";
644 		break;
645 	case PCI_DEVICE_ID_ARECA_1380:
646 	case PCI_DEVICE_ID_ARECA_1381:
647 	case PCI_DEVICE_ID_ARECA_1680:
648 	case PCI_DEVICE_ID_ARECA_1681:
649 		type = "SAS";
650 		break;
651 	default:
652 		type = "X-TYPE";
653 		break;
654 	}
655 
656 	(void) sprintf(buf, "Areca %s Host Adapter RAID Controller%s",
657 	    type, raid6 ? " (RAID6 capable)" : "");
658 	cmn_err(CE_CONT, "arcmsr%d:%s ", instance, buf);
659 	cmn_err(CE_CONT, "arcmsr%d:%s ", instance, ARCMSR_DRIVER_VERSION);
660 
661 
662 	/* we disable iop interrupt here */
663 	if (arcmsr_initialize(acb) == DDI_FAILURE) {
664 		arcmsr_log(NULL, CE_WARN, "arcmsr%d: arcmsr_initialize "
665 		    "failed", instance);
666 		goto error_level_1;
667 	}
668 
669 	/*
670 	 * The driver must first obtain the iblock cookie to initialize
671 	 * mutexes used in the driver handler. Only after those mutexes
672 	 * have been initialized can the interrupt handler be added.
673 	 */
674 	if (ddi_get_iblock_cookie(dev_info, 0, &acb->iblock_cookie)
675 	    != DDI_SUCCESS) {
676 		arcmsr_log(NULL, CE_WARN, "arcmsr%d: "
677 		    "ddi_get_iblock_cookie failed", instance);
678 		goto error_level_2;
679 	}
680 	mutex_init(&acb->acb_mutex, NULL, MUTEX_DRIVER,
681 	    (void *)acb->iblock_cookie);
682 	mutex_init(&acb->postq_mutex, NULL, MUTEX_DRIVER,
683 	    (void *)acb->iblock_cookie);
684 	mutex_init(&acb->workingQ_mutex, NULL, MUTEX_DRIVER,
685 	    (void *)acb->iblock_cookie);
686 	mutex_init(&acb->ioctl_mutex, NULL, MUTEX_DRIVER,
687 	    (void *)acb->iblock_cookie);
688 
689 	/* Allocate a transport structure */
690 	hba_trans = scsi_hba_tran_alloc(dev_info, SCSI_HBA_CANSLEEP);
691 	if (hba_trans == NULL) {
692 		arcmsr_log(NULL, CE_WARN,
693 		    "arcmsr%d: scsi_hba_tran_alloc failed",
694 		    instance);
695 		goto error_level_3;
696 	}
697 	acb->scsi_hba_transport = hba_trans;
698 	acb->dev_info = dev_info;
699 	/* init scsi host adapter transport entry */
700 	hba_trans->tran_hba_private  = acb;
701 	hba_trans->tran_tgt_private  = NULL;
702 	/*
703 	 * If no per-target initialization is required, the HBA can leave
704 	 * tran_tgt_init set to NULL.
705 	 */
706 	hba_trans->tran_tgt_init = arcmsr_tran_tgt_init;
707 	hba_trans->tran_tgt_probe = scsi_hba_probe;
708 	hba_trans->tran_tgt_free = NULL;
709 	hba_trans->tran_start = arcmsr_tran_start;
710 	hba_trans->tran_abort = arcmsr_tran_abort;
711 	hba_trans->tran_reset = arcmsr_tran_reset;
712 	hba_trans->tran_getcap = arcmsr_tran_getcap;
713 	hba_trans->tran_setcap = arcmsr_tran_setcap;
714 	hba_trans->tran_init_pkt = arcmsr_tran_init_pkt;
715 	hba_trans->tran_destroy_pkt = arcmsr_tran_destroy_pkt;
716 	hba_trans->tran_dmafree = arcmsr_tran_dmafree;
717 	hba_trans->tran_sync_pkt = arcmsr_tran_sync_pkt;
718 
719 	hba_trans->tran_reset_notify = NULL;
720 	hba_trans->tran_get_bus_addr = NULL;
721 	hba_trans->tran_get_name = NULL;
722 	hba_trans->tran_quiesce = NULL;
723 	hba_trans->tran_unquiesce = NULL;
724 	hba_trans->tran_bus_reset = NULL;
725 	hba_trans->tran_bus_config = arcmsr_tran_bus_config;
726 	hba_trans->tran_add_eventcall = NULL;
727 	hba_trans->tran_get_eventcookie = NULL;
728 	hba_trans->tran_post_event = NULL;
729 	hba_trans->tran_remove_eventcall = NULL;
730 
731 	/* iop init and enable interrupt here */
732 	mutex_enter(&arcmsr_global_mutex);
733 	arcmsr_iop_init(acb);
734 	mutex_exit(&arcmsr_global_mutex);
735 
736 	/* Adding an Interrupt Handler */
737 	if (ddi_add_intr(dev_info, 0, &acb->iblock_cookie, 0,
738 	    arcmsr_interrupt, (caddr_t)acb) != DDI_SUCCESS) {
739 		arcmsr_log(NULL, CE_WARN,
740 		    "arcmsr%d: failed to add interrupt handler",
741 		    instance);
742 		goto error_level_4;
743 	}
744 	/*
745 	 * The driver should attach this instance of the device, and
746 	 * perform error cleanup if necessary
747 	 */
748 	if (scsi_hba_attach_setup(dev_info, &arcmsr_dma_attr,
749 	    hba_trans, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
750 		arcmsr_log(NULL, CE_WARN,
751 		    "arcmsr%d: scsi_hba_attach_setup failed",
752 		    instance);
753 		goto error_level_5;
754 	}
755 
756 	if (ddi_create_minor_node(dev_info, "arcmsr",
757 	    S_IFCHR, INST2MSR(instance), DDI_PSEUDO, 0) == DDI_FAILURE) {
758 		arcmsr_log(NULL, CE_WARN,
759 		    "arcmsr%d: ddi_create_minor_node fail", instance);
760 		goto error_level_6;
761 	}
762 
763 
764 	/* Initialize power management bookkeeping. */
765 	if (pm_create_components(dev_info, 1) == DDI_SUCCESS) {
766 		if (pm_idle_component(dev_info, 0) == DDI_FAILURE) {
767 			arcmsr_log(NULL, CE_WARN,
768 			    "arcmsr%d: pm_idle_component fail",
769 			    instance);
770 			goto error_level_8;
771 		}
772 		pm_set_normal_power(dev_info, 0, 1);
773 		/* acb->power_level = 1; */
774 	} else {
775 		arcmsr_log(NULL, CE_WARN,
776 		    "arcmsr%d: pm_create_components fail",
777 		    instance);
778 		goto error_level_7;
779 	}
780 
781 	/*
782 	 * Since this driver manages devices with "remote" hardware, "
783 	 * i.e. the devices themselves have no "reg" property, the SUSPEND/
784 	 * RESUME commands in detach/attach will not be called by the power
785 	 * management framework unless we request it by creating a
786 	 * "pm-hardware-state" property and setting it to value
787 	 * "needs-suspend-resume".
788 	 */
789 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dev_info,
790 	    "pm-hardware-state", "needs-suspend-resume")
791 	    != DDI_PROP_SUCCESS) {
792 		arcmsr_log(NULL, CE_WARN,
793 		    "arcmsr%d: ddi_prop_update(\"pm-hardware-state\")failed",
794 		    instance);
795 		goto error_level_8;
796 	}
797 
798 	/* Create a taskq for dealing with dr events */
799 	if ((acb->taskq = ddi_taskq_create(dev_info, "arcmsr_dr_taskq", 1,
800 	    TASKQ_DEFAULTPRI, 0)) == NULL) {
801 		cmn_err(CE_WARN, "ddi_taskq_create failed");
802 		goto error_level_8;
803 	}
804 
805 	acb->timeout_count = 0;
806 	/* active ccbs "timeout" watchdog */
807 	acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
808 	    (60 * drv_usectohz(1000000)));
809 	acb->timeout_sc_id = timeout(arcmsr_devmap_req_timeout, (caddr_t)acb,
810 	    (5 * drv_usectohz(1000000)));
811 
812 	/* report device info */
813 	ddi_report_dev(dev_info);
814 	ArcMSRHBA[arcmsr_hba_count] = acb;
815 	arcmsr_hba_count++;
816 
817 	return (DDI_SUCCESS);
818 
819 error_level_8:
820 	pm_destroy_components(dev_info);
821 
822 error_level_7:
823 	/* Remove any previously allocated minor nodes */
824 	ddi_remove_minor_node(dev_info, NULL);
825 
826 error_level_6:
827 	scsi_hba_tran_free(hba_trans);
828 
829 error_level_5:
830 	ddi_remove_intr(dev_info, 0, (void *)acb->iblock_cookie);
831 
832 error_level_4:
833 	scsi_hba_tran_free(hba_trans);
834 
835 error_level_3:
836 	mutex_destroy(&acb->acb_mutex);
837 	mutex_destroy(&acb->postq_mutex);
838 	mutex_destroy(&acb->workingQ_mutex);
839 	mutex_destroy(&acb->ioctl_mutex);
840 
841 error_level_2:
842 	ddi_dma_mem_free(&acb->ccbs_acc_handle);
843 	ddi_dma_free_handle(&acb->ccbs_pool_handle);
844 
845 error_level_1:
846 	ddi_soft_state_free(arcmsr_soft_state, instance);
847 
848 error_level_0:
849 	return (DDI_FAILURE);
850 }
851 
852 
853 
854 /*
855  *      Function: arcmsr_attach(9E)
856  *   Description: Set up all device state and allocate data structures,
857  *		  mutexes, condition variables, etc. for device operation.
858  *		  Set mt_attr property for driver to indicate MT-safety.
859  *		  Add interrupts needed.
860  *         Input: dev_info_t *dev_info, ddi_attach_cmd_t cmd
861  *        Output: Return DDI_SUCCESS if device is ready,
862  *		          else return DDI_FAILURE
863  */
864 static int
865 arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd) {
866 
867 	scsi_hba_tran_t *hba_trans;
868 	struct ACB *acb;
869 
870 
871 #if defined(ARCMSR_DEBUG)
872 	arcmsr_log(NULL, CE_NOTE,
873 	    "arcmsr_attach called for device %lx (instance %d)",
874 	    &dev_info, ddi_get_instance(dev_info));
875 #endif
876 	switch (cmd) {
877 	case DDI_ATTACH:
878 		return (arcmsr_do_ddi_attach(dev_info,
879 		    ddi_get_instance(dev_info)));
880 	case DDI_RESUME:
881 	case DDI_PM_RESUME:
882 	/*
883 	 * There is no hardware state to restart and no timeouts to
884 	 * restart since we didn't PM_SUSPEND with active cmds or
885 	 * active timeouts We just need to unblock waiting threads
886 	 * and restart I/O the code for DDI_RESUME is almost identical
887 	 * except it uses the suspend flag rather than pm_suspend flag
888 	 */
889 	    hba_trans = (scsi_hba_tran_t *)ddi_get_driver_private(dev_info);
890 	    if (!hba_trans) {
891 		    return (DDI_FAILURE);
892 	    }
893 	    acb = (struct ACB *)
894 		hba_trans->tran_hba_private;
895 	    mutex_enter(&acb->acb_mutex);
896 	    arcmsr_iop_init(acb);
897 
898 	    /* restart ccbs "timeout" watchdog */
899 	    acb->timeout_count = 0;
900 	    acb->timeout_id = timeout(arcmsr_ccbs_timeout,
901 		(caddr_t)acb, (60 * drv_usectohz(1000000)));
902 	    acb->timeout_sc_id = timeout(arcmsr_devmap_req_timeout,
903 		(caddr_t)acb, (5 * drv_usectohz(1000000)));
904 	    mutex_exit(&acb->acb_mutex);
905 	    return (DDI_SUCCESS);
906 
907     default:
908 	    arcmsr_log(NULL, CE_WARN,
909 		"arcmsr%d: ddi attach cmd (%d) unsupported",
910 		cmd, ddi_get_instance(dev_info));
911 	    return (DDI_FAILURE);
912 	}
913 }
914 
915 /*
916  *    Function:	arcmsr_detach(9E)
917  * Description: Remove all device allocation and system resources, disable
918  *		        device interrupt.
919  *       Input: dev_info_t *dev_info
920  *		        ddi_detach_cmd_t cmd
921  *      Output:	Return DDI_SUCCESS if done,
922  *		        else returnDDI_FAILURE
923  */
924 static int
925 arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd) {
926 
927 	int instance;
928 	struct ACB *acb;
929 
930 
931 	instance = ddi_get_instance(dev_info);
932 	acb = (struct ACB *)ddi_get_soft_state(arcmsr_soft_state,
933 	    instance);
934 	if (!acb) {
935 		return (DDI_FAILURE);
936 	}
937 
938 	switch (cmd) {
939 	case DDI_DETACH:
940 		mutex_enter(&acb->acb_mutex);
941 		if (acb->timeout_id != 0) {
942 			mutex_exit(&acb->acb_mutex);
943 			(void) untimeout(acb->timeout_id);
944 			mutex_enter(&acb->acb_mutex);
945 			acb->timeout_id = 0;
946 		}
947 		if (acb->timeout_sc_id != 0) {
948 			mutex_exit(&acb->acb_mutex);
949 			(void) untimeout(acb->timeout_sc_id);
950 			mutex_enter(&acb->acb_mutex);
951 			acb->timeout_sc_id = 0;
952 		}
953 		arcmsr_pcidev_disattach(acb);
954 		/* Remove interrupt set up by ddi_add_intr */
955 		ddi_remove_intr(dev_info, 0, acb->iblock_cookie);
956 		/* unbind mapping object to handle */
957 		(void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
958 		/* Free ccb pool memory */
959 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
960 		/* Free DMA handle */
961 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
962 		ddi_regs_map_free(&acb->reg_mu_acc_handle0);
963 		if (scsi_hba_detach(dev_info) != DDI_SUCCESS)
964 			arcmsr_log(NULL, CE_WARN,
965 			    "arcmsr%d: Unable to detach instance cleanly "
966 			    "(should not happen)",
967 			    ddi_get_instance(dev_info));
968 		/* free scsi_hba_transport from scsi_hba_tran_alloc */
969 		scsi_hba_tran_free(acb->scsi_hba_transport);
970 		ddi_remove_minor_node(dev_info, NULL);
971 		ddi_taskq_destroy(acb->taskq);
972 		ddi_prop_remove_all(dev_info);
973 		mutex_exit(&acb->acb_mutex);
974 		mutex_destroy(&acb->acb_mutex);
975 		mutex_destroy(&acb->postq_mutex);
976 		mutex_destroy(&acb->workingQ_mutex);
977 		mutex_destroy(&acb->ioctl_mutex);
978 		pci_config_teardown(&acb->pci_acc_handle);
979 		ddi_set_driver_private(dev_info, NULL);
980 		ddi_soft_state_free(arcmsr_soft_state, instance);
981 		pm_destroy_components(dev_info);
982 		return (DDI_SUCCESS);
983 	case DDI_SUSPEND:
984 	case DDI_PM_SUSPEND:
985 		mutex_enter(&acb->acb_mutex);
986 		if (acb->timeout_id != 0) {
987 			acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
988 			mutex_exit(&acb->acb_mutex);
989 			(void) untimeout(acb->timeout_id);
990 			(void) untimeout(acb->timeout_sc_id);
991 			mutex_enter(&acb->acb_mutex);
992 			acb->timeout_id = 0;
993 		}
994 
995 		if (acb->timeout_sc_id != 0) {
996 			acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
997 			mutex_exit(&acb->acb_mutex);
998 			(void) untimeout(acb->timeout_sc_id);
999 			mutex_enter(&acb->acb_mutex);
1000 			acb->timeout_sc_id = 0;
1001 		}
1002 
1003 		/* disable all outbound interrupt */
1004 		(void) arcmsr_disable_allintr(acb);
1005 		/* stop adapter background rebuild */
1006 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
1007 			arcmsr_stop_hba_bgrb(acb);
1008 			arcmsr_flush_hba_cache(acb);
1009 		} else {
1010 			arcmsr_stop_hbb_bgrb(acb);
1011 			arcmsr_flush_hbb_cache(acb);
1012 		}
1013 		mutex_exit(&acb->acb_mutex);
1014 		return (DDI_SUCCESS);
1015 	default:
1016 		return (DDI_FAILURE);
1017 	}
1018 }
1019 
1020 
1021 
1022 /*
1023  *    Function:	arcmsr_tran_tgt_init
1024  * Description: Called when initializing a target device instance. If
1025  *		        no per-target initialization is required, the HBA
1026  *		        may leave tran_tgt_init to NULL
1027  *       Input:
1028  *		        dev_info_t *host_dev_info,
1029  *		        dev_info_t *target_dev_info,
1030  *		        scsi_hba_tran_t *tran,
1031  *		        struct scsi_device *sd
1032  *
1033  *      Return: DDI_SUCCESS if success, else return DDI_FAILURE
1034  *
1035  *  entry point enables the HBA to allocate and/or initialize any per-
1036  *  target resources.
1037  *  It also enables the HBA to qualify the device's address as valid and
1038  *  supportable for that particular HBA.
1039  *  By returning DDI_FAILURE, the instance of the target driver for that
1040  *  device will not be probed or attached.
1041  * 	This entry point is not required, and if none is supplied,
1042  *  the framework will attempt to probe and attach all possible instances
1043  *  of the appropriate target drivers.
1044  */
1045 static int
1046 arcmsr_tran_tgt_init(dev_info_t *host_dev_info, dev_info_t *target_dev_info,
1047     scsi_hba_tran_t *hosttran, struct scsi_device *sd) {
1048 #ifndef __lock_lint
1049 	_NOTE(ARGUNUSED(hosttran, target_dev_info))
1050 #endif
1051 	uint16_t  target;
1052 	uint8_t  lun;
1053 	struct ACB *acb = (struct ACB *)sd->sd_address.a_hba_tran ->
1054 	    tran_hba_private;
1055 
1056 	target = sd->sd_address.a_target;
1057 	lun = sd->sd_address.a_lun;
1058 	if ((target >= ARCMSR_MAX_TARGETID) || (lun >= ARCMSR_MAX_TARGETLUN)) {
1059 		cmn_err(CE_WARN,
1060 		    "arcmsr%d: (target %d, lun %d) exceeds "
1061 		    "maximum supported values (%d, %d)",
1062 		    ddi_get_instance(host_dev_info),
1063 		    target, lun, ARCMSR_MAX_TARGETID, ARCMSR_MAX_TARGETLUN);
1064 		return (DDI_FAILURE);
1065 	}
1066 
1067 
1068 	if (ndi_dev_is_persistent_node(target_dev_info) == 0) {
1069 		/*
1070 		 * If no persistent node exist, we don't allow .conf node
1071 		 * to be created.
1072 		 */
1073 		if (arcmsr_find_child(acb, target, lun) != NULL) {
1074 			if ((ndi_merge_node(target_dev_info,
1075 				    arcmsr_name_node) != DDI_SUCCESS)) {
1076 				return (DDI_SUCCESS);
1077 			}
1078 		}
1079 		return (DDI_FAILURE);
1080 	}
1081 
1082 	return (DDI_SUCCESS);
1083 }
1084 
1085 /*
1086  *         Function: arcmsr_tran_getcap(9E)
1087  *      Description: Get the capability named, and returnits value.
1088  *    Return Values: current value of capability, ifdefined
1089  *		             -1 ifcapability is not defined
1090  * ------------------------------------------------------
1091  *         Common Capability Strings Array
1092  * ------------------------------------------------------
1093  *	#define	SCSI_CAP_DMA_MAX		0
1094  *	#define	SCSI_CAP_MSG_OUT		1
1095  *	#define	SCSI_CAP_DISCONNECT		2
1096  *	#define	SCSI_CAP_SYNCHRONOUS		3
1097  *	#define	SCSI_CAP_WIDE_XFER		4
1098  *	#define	SCSI_CAP_PARITY			5
1099  *	#define	SCSI_CAP_INITIATOR_ID		6
1100  *	#define	SCSI_CAP_UNTAGGED_QING		7
1101  *	#define	SCSI_CAP_TAGGED_QING		8
1102  *	#define	SCSI_CAP_ARQ			9
1103  *	#define	SCSI_CAP_LINKED_CMDS		10 a
1104  *	#define	SCSI_CAP_SECTOR_SIZE		11 b
1105  *	#define	SCSI_CAP_TOTAL_SECTORS		12 c
1106  *	#define	SCSI_CAP_GEOMETRY		13 d
1107  *	#define	SCSI_CAP_RESET_NOTIFICATION	14 e
1108  *	#define	SCSI_CAP_QFULL_RETRIES		15 f
1109  *	#define	SCSI_CAP_QFULL_RETRY_INTERVAL	16 10
1110  *	#define	SCSI_CAP_SCSI_VERSION		17 11
1111  *	#define	SCSI_CAP_INTERCONNECT_TYPE	18 12
1112  *	#define	SCSI_CAP_LUN_RESET		19 13
1113  */
1114 static int
1115 arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom) {
1116 
1117 	int capability = 0;
1118 	struct ACB *acb =
1119 	    (struct ACB *)ap->a_hba_tran->tran_hba_private;
1120 
1121 
1122 	if (cap == NULL || whom == 0) {
1123 		return (DDI_FAILURE);
1124 	}
1125 
1126 	mutex_enter(&arcmsr_global_mutex);
1127 	switch (scsi_hba_lookup_capstr(cap)) {
1128 	case SCSI_CAP_MSG_OUT:
1129 	case SCSI_CAP_DISCONNECT:
1130 	case SCSI_CAP_SYNCHRONOUS:
1131 	case SCSI_CAP_WIDE_XFER:
1132 	case SCSI_CAP_TAGGED_QING:
1133 	case SCSI_CAP_UNTAGGED_QING:
1134 	case SCSI_CAP_PARITY:
1135 	case SCSI_CAP_ARQ:
1136 		capability = acb->tgt_scsi_opts[ap->a_target];
1137 		break;
1138 	case SCSI_CAP_SECTOR_SIZE:
1139 		capability = ARCMSR_DEV_SECTOR_SIZE;
1140 		break;
1141 	case SCSI_CAP_DMA_MAX:
1142 		/* Limit to 16MB max transfer */
1143 		capability = ARCMSR_MAX_XFER_LEN;
1144 		break;
1145 	case SCSI_CAP_INITIATOR_ID:
1146 		capability = ARCMSR_SCSI_INITIATOR_ID;
1147 		break;
1148 	case SCSI_CAP_GEOMETRY:
1149 		/* head , track , cylinder */
1150 		capability = (255 << 16) | 63;
1151 		break;
1152 	default:
1153 		capability = -1;
1154 		break;
1155 	}
1156 	mutex_exit(&arcmsr_global_mutex);
1157 	return (capability);
1158 }
1159 
1160 /*
1161  *      Function: arcmsr_tran_setcap(9E)
1162  *   Description: Set the specific capability.
1163  * Return Values: 1 - capability exists and can be set to new value
1164  *		          0 - capability could not be set to new value
1165  *		         -1 - no such capability
1166  */
1167 static int
1168 arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
1169     int whom) {
1170 #ifndef __lock_lint
1171 	_NOTE(ARGUNUSED(value))
1172 #endif
1173 
1174 
1175 	int supported = 0;
1176 	struct ACB *acb =
1177 	    (struct ACB *)ap->a_hba_tran->tran_hba_private;
1178 
1179 
1180 	if (cap == NULL || whom == 0) {
1181 		return (-1);
1182 	}
1183 
1184 	mutex_enter(&arcmsr_global_mutex);
1185 	switch (supported = scsi_hba_lookup_capstr(cap)) {
1186 	case SCSI_CAP_DISCONNECT:		/* 2 */
1187 	case SCSI_CAP_SYNCHRONOUS:		/* 3 */
1188 	case SCSI_CAP_TAGGED_QING:		/* 8 */
1189 	case SCSI_CAP_WIDE_XFER:		/* 4 */
1190 	case SCSI_CAP_ARQ:			/* 9 auto request sense */
1191 	case SCSI_CAP_TOTAL_SECTORS:		/* c */
1192 		acb->tgt_scsi_opts[ap->a_target] |= supported;
1193 		supported = 1;
1194 		break;
1195 	case SCSI_CAP_UNTAGGED_QING:   		/* 7 */
1196 	case SCSI_CAP_INITIATOR_ID:		/* 6 */
1197 	case SCSI_CAP_DMA_MAX:			/* 0 */
1198 	case SCSI_CAP_MSG_OUT:			/* 1 */
1199 	case SCSI_CAP_PARITY:			/* 5 */
1200 	case SCSI_CAP_LINKED_CMDS:		/* a */
1201 	case SCSI_CAP_RESET_NOTIFICATION:	/* e */
1202 	case SCSI_CAP_SECTOR_SIZE:		/* b */
1203 		supported = 0;
1204 		break;
1205 	default:
1206 		supported = -1;
1207 		break;
1208 	}
1209 	mutex_exit(&arcmsr_global_mutex);
1210 	return (supported);
1211 }
1212 
1213 
1214 
1215 static void
1216 arcmsr_free_ccb(struct CCB *ccb) {
1217 
1218 	struct ACB *acb = ccb->acb;
1219 
1220 	ccb->startdone = ARCMSR_CCB_DONE;
1221 	ccb->pkt = NULL;
1222 	ccb->ccb_flags = 0;
1223 	mutex_enter(&acb->workingQ_mutex);
1224 	acb->ccbworkingQ[acb->workingccb_doneindex] = ccb;
1225 	acb->workingccb_doneindex++;
1226 	acb->workingccb_doneindex %= ARCMSR_MAX_FREECCB_NUM;
1227 	mutex_exit(&acb->workingQ_mutex);
1228 }
1229 
1230 /*
1231  *      Function: arcmsr_tran_init_pkt
1232  * Return Values: pointer to scsi_pkt, or NULL
1233  *   Description: simultaneously allocate both a scsi_pkt(9S) structure and
1234  *                DMA resources for that pkt.
1235  *                Called by kernel on behalf of a target driver
1236  *		          calling scsi_init_pkt(9F).
1237  *		          Refer to tran_init_pkt(9E) man page
1238  *       Context: Can be called from different kernel process threads.
1239  *		          Can be called by interrupt thread.
1240  * Allocates SCSI packet and DMA resources
1241  */
1242 static struct
1243 scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
1244     register struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
1245     int tgtlen, int flags, int (*callback)(), caddr_t arg) {
1246 
1247 	struct CCB *ccb;
1248 	struct ARCMSR_CDB *arcmsr_cdb;
1249 	struct ACB *acb;
1250 	int old_pkt_flag = 1;
1251 
1252 
1253 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1254 
1255 	if (pkt == NULL) {
1256 		/* get free CCB */
1257 		ccb = arcmsr_get_freeccb(acb);
1258 		if (ccb == (struct CCB *)NULL) {
1259 			return (NULL);
1260 		}
1261 
1262 		if (ccb->pkt != NULL) {
1263 			/*
1264 			 * If kmem_flags are turned on, expect to
1265 			 * see a message
1266 			 */
1267 			cmn_err(CE_WARN, "arcmsr%d: invalid pkt",
1268 			    ddi_get_instance(acb->dev_info));
1269 			return (NULL);
1270 		}
1271 		pkt = scsi_hba_pkt_alloc(acb->dev_info, ap, cmdlen,
1272 		    statuslen, tgtlen, sizeof (struct scsi_pkt),
1273 		    callback, arg);
1274 		if (pkt == NULL) {
1275 			cmn_err(CE_WARN,
1276 			    "arcmsr%d: scsi pkt allocation failed",
1277 			    ddi_get_instance(acb->dev_info));
1278 			arcmsr_free_ccb(ccb);
1279 			return (NULL);
1280 		}
1281 		/* Initialize CCB */
1282 		ccb->pkt = pkt;
1283 		ccb->pkt_dma_handle = NULL;
1284 		/* record how many sg are needed to xfer on this pkt */
1285 		ccb->pkt_ncookies = 0;
1286 		/* record how many sg we got from this window */
1287 		ccb->pkt_cookie = 0;
1288 		/* record how many windows have partial dma map set */
1289 		ccb->pkt_nwin = 0;
1290 		/* record current sg window position */
1291 		ccb->pkt_curwin	= 0;
1292 		ccb->pkt_dma_len = 0;
1293 		ccb->pkt_dma_offset = 0;
1294 		ccb->resid_dmacookie.dmac_size = 0;
1295 
1296 		/*
1297 		 * we will still use this point for we want to fake some
1298 		 * information in tran_start
1299 		 */
1300 		ccb->bp = bp;
1301 
1302 		/* Initialize arcmsr_cdb */
1303 		arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1304 		bzero(arcmsr_cdb, sizeof (struct ARCMSR_CDB));
1305 		arcmsr_cdb->Bus = 0;
1306 		arcmsr_cdb->Function = 1;
1307 		arcmsr_cdb->LUN = ap->a_lun;
1308 		arcmsr_cdb->TargetID = ap->a_target;
1309 		arcmsr_cdb->CdbLength = (uint8_t)cmdlen;
1310 		arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
1311 
1312 		/* Fill in the rest of the structure */
1313 		pkt->pkt_ha_private = ccb;
1314 		pkt->pkt_address = *ap;
1315 		pkt->pkt_comp = (void (*)())NULL;
1316 		pkt->pkt_flags = 0;
1317 		pkt->pkt_time = 0;
1318 		pkt->pkt_resid = 0;
1319 		pkt->pkt_statistics = 0;
1320 		pkt->pkt_reason = 0;
1321 		old_pkt_flag = 0;
1322 	} else {
1323 		ccb = (struct CCB *)pkt->pkt_ha_private;
1324 		/*
1325 		 * you cannot update CdbLength with cmdlen here, it would
1326 		 * cause a data compare error
1327 		 */
1328 		ccb->startdone = ARCMSR_CCB_UNBUILD;
1329 	}
1330 
1331 	/* Second step : dma allocation/move */
1332 	if (bp && bp->b_bcount != 0) {
1333 		/*
1334 		 * system had a lot of data trunk need to xfer, from...20 byte
1335 		 * to 819200 byte.
1336 		 * arcmsr_dma_alloc will get pkt_dma_handle (not null) until
1337 		 * this lot of data trunk xfer done this mission will be done
1338 		 * by some of continue READ or WRITE scsi command, till this
1339 		 * lot of data trunk xfer completed.
1340 		 * arcmsr_dma_move do the action repeatedly, and use the same
1341 		 * ccb till this lot of data trunk xfer complete notice.
1342 		 * when after the arcmsr_tran_init_pkt returns the solaris
1343 		 * kernel is by your pkt_resid and its b_bcount to give you
1344 		 * which type of scsi command descriptor to implement the
1345 		 * length of folowing arcmsr_tran_start scsi cdb (data length)
1346 		 *
1347 		 * Each transfer should be aligned on a 512 byte boundary
1348 		 */
1349 		if (ccb->pkt_dma_handle == NULL) {
1350 			if (arcmsr_dma_alloc(acb, pkt, bp, flags,
1351 			    callback) == DDI_FAILURE) {
1352 				/*
1353 				 * the HBA driver is unable to allocate DMA
1354 				 * resources, it must free the allocated
1355 				 * scsi_pkt(9S) before returning
1356 				 */
1357 				cmn_err(CE_WARN, "arcmsr%d: dma allocation "
1358 				    "failure ",
1359 				    ddi_get_instance(acb->dev_info));
1360 				if (old_pkt_flag == 0) {
1361 					cmn_err(CE_WARN, "arcmsr%d: dma "
1362 					    "allocation failed to free scsi "
1363 					    "hba pkt ",
1364 					    ddi_get_instance(acb->dev_info));
1365 					arcmsr_free_ccb(ccb);
1366 					scsi_hba_pkt_free(ap, pkt);
1367 				}
1368 				return ((struct scsi_pkt *)NULL);
1369 			}
1370 		} else {
1371 			/* DMA resources to next DMA window, for old pkt */
1372 			if (arcmsr_dma_move(acb, pkt, bp) == -1) {
1373 				cmn_err(CE_WARN, "arcmsr%d: dma move "
1374 				    "failed ",
1375 				    ddi_get_instance(acb->dev_info));
1376 				return ((struct scsi_pkt *)NULL);
1377 			}
1378 		}
1379 	} else {
1380 		pkt->pkt_resid = 0;
1381 	}
1382 	return (pkt);
1383 }
1384 
1385 /*
1386  * Function name: arcmsr_dma_alloc
1387  * Return Values: 0 if successful, -1 if failure
1388  *   Description: allocate DMA resources
1389  *       Context: Can only be called from arcmsr_tran_init_pkt()
1390  *     register struct scsi_address	*ap = &((pkt)->pkt_address);
1391  */
1392 static int
1393 arcmsr_dma_alloc(struct ACB *acb, struct scsi_pkt *pkt,
1394     struct buf *bp, int flags, int (*callback)()) {
1395 
1396 	struct CCB *ccb = pkt->pkt_ha_private;
1397 	int alloc_result, map_method, dma_flags;
1398 	int resid = 0;
1399 	int total_ccb_xferlen = 0;
1400 	int (*cb)(caddr_t);
1401 	uint8_t i;
1402 
1403 	/*
1404 	 * at this point the PKT SCSI CDB is empty, and dma xfer length
1405 	 * is bp->b_bcount
1406 	 */
1407 
1408 	if (bp->b_flags & B_READ) {
1409 		ccb->ccb_flags &= ~CCB_FLAG_DMAWRITE;
1410 		dma_flags = DDI_DMA_READ;
1411 	} else {
1412 		ccb->ccb_flags |= CCB_FLAG_DMAWRITE;
1413 		dma_flags = DDI_DMA_WRITE;
1414 	}
1415 
1416 	if (flags & PKT_CONSISTENT) {
1417 		ccb->ccb_flags |= CCB_FLAG_DMACONSISTENT;
1418 		dma_flags |= DDI_DMA_CONSISTENT;
1419 	}
1420 	if (flags & PKT_DMA_PARTIAL) {
1421 		dma_flags |= DDI_DMA_PARTIAL;
1422 	}
1423 
1424 	dma_flags |= DDI_DMA_REDZONE;
1425 	cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1426 
1427 	if ((alloc_result = ddi_dma_alloc_handle(acb->dev_info,
1428 	    &arcmsr_dma_attr, cb, 0, &ccb->pkt_dma_handle))
1429 	    != DDI_SUCCESS) {
1430 		switch (alloc_result) {
1431 		case DDI_DMA_BADATTR:
1432 			/*
1433 			 * If the system does not support physical DMA,
1434 			 * the return value from ddi_dma_alloc_handle
1435 			 * will be DDI_DMA_BADATTR
1436 			 */
1437 			cmn_err(CE_WARN, "arcmsr%d: dma allocate returned "
1438 			    "'bad attribute'",
1439 			    ddi_get_instance(acb->dev_info));
1440 			bioerror(bp, EFAULT);
1441 			return (DDI_FAILURE);
1442 		case DDI_DMA_NORESOURCES:
1443 			cmn_err(CE_WARN, "arcmsr%d: dma allocate returned "
1444 			    "'no resources'",
1445 			    ddi_get_instance(acb->dev_info));
1446 			bioerror(bp, 0);
1447 			return (DDI_FAILURE);
1448 		default:
1449 			cmn_err(CE_WARN, "arcmsr%d: dma allocate returned "
1450 			    "'unknown failure'",
1451 			    ddi_get_instance(acb->dev_info));
1452 			return (DDI_FAILURE);
1453 		}
1454 	}
1455 
1456 	map_method = ddi_dma_buf_bind_handle(ccb->pkt_dma_handle, bp,
1457 	    dma_flags, cb, 0,
1458 	    &ccb->pkt_dmacookies[0],	/* SG List pointer */
1459 	    &ccb->pkt_ncookies);	/* number of sgl cookies */
1460 
1461 	switch (map_method) {
1462 	case DDI_DMA_PARTIAL_MAP:
1463 		/*
1464 		 * When your main memory size larger then 4G
1465 		 * DDI_DMA_PARTIAL_MAP will be touched.
1466 		 *
1467 		 * We've already set DDI_DMA_PARTIAL in dma_flags,
1468 		 * so if it's now missing, there's something screwy
1469 		 * happening. We plow on....
1470 		 */
1471 
1472 		if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
1473 			cmn_err(CE_WARN, "arcmsr%d: dma partial mapping lost "
1474 			    "...impossible case!",
1475 			    ddi_get_instance(acb->dev_info));
1476 		}
1477 		if (ddi_dma_numwin(ccb->pkt_dma_handle, &ccb->pkt_nwin) ==
1478 		    DDI_FAILURE) {
1479 			cmn_err(CE_WARN, "arcmsr%d: ddi_dma_numwin() failed",
1480 			    ddi_get_instance(acb->dev_info));
1481 		}
1482 
1483 		if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1484 		    &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1485 		    &ccb->pkt_dmacookies[0], &ccb->pkt_ncookies) ==
1486 		    DDI_FAILURE) {
1487 			cmn_err(CE_WARN, "arcmsr%d: ddi_dma_getwin failed",
1488 			    ddi_get_instance(acb->dev_info));
1489 		}
1490 
1491 		i = 0;
1492 		/* first cookie is accessed from ccb->pkt_dmacookies[0] */
1493 		total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1494 		for (;;) {
1495 			i++;
1496 			if (i == ARCMSR_MAX_SG_ENTRIES ||
1497 			    i == ccb->pkt_ncookies ||
1498 			    total_ccb_xferlen == ARCMSR_MAX_XFER_LEN) {
1499 				break;
1500 			}
1501 			/*
1502 			 * next cookie will be retrieved from
1503 			 * ccb->pkt_dmacookies[i]
1504 			 */
1505 			ddi_dma_nextcookie(ccb->pkt_dma_handle,
1506 			    &ccb->pkt_dmacookies[i]);
1507 			total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1508 		}
1509 		ccb->pkt_cookie = i;
1510 		ccb->arcmsr_cdb.sgcount = i;
1511 		if (total_ccb_xferlen > 512) {
1512 			resid = total_ccb_xferlen % 512;
1513 			if (resid != 0) {
1514 				i--;
1515 				total_ccb_xferlen -= resid;
1516 				/* modify last sg length */
1517 				ccb->pkt_dmacookies[i].dmac_size =
1518 				    ccb->pkt_dmacookies[i].dmac_size - resid;
1519 				ccb->resid_dmacookie.dmac_size = resid;
1520 				ccb->resid_dmacookie.dmac_laddress =
1521 				    ccb->pkt_dmacookies[i].dmac_laddress +
1522 				    ccb->pkt_dmacookies[i].dmac_size;
1523 			}
1524 		}
1525 		ccb->total_dmac_size = total_ccb_xferlen;
1526 		ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1527 		pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1528 
1529 		return (DDI_SUCCESS);
1530 
1531 	case DDI_DMA_MAPPED:
1532 		ccb->pkt_nwin = 1; /* all mapped, so only one window */
1533 		ccb->pkt_dma_len = 0;
1534 		ccb->pkt_dma_offset = 0;
1535 		i = 0;
1536 		/* first cookie is accessed from ccb->pkt_dmacookies[0] */
1537 		total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1538 		for (;;) {
1539 			i++;
1540 			if (i == ARCMSR_MAX_SG_ENTRIES ||
1541 			    i == ccb->pkt_ncookies ||
1542 			    total_ccb_xferlen == ARCMSR_MAX_XFER_LEN) {
1543 				break;
1544 			}
1545 			/*
1546 			 * next cookie will be retrieved from
1547 			 * ccb->pkt_dmacookies[i]
1548 			 */
1549 			ddi_dma_nextcookie(ccb->pkt_dma_handle,
1550 			    &ccb->pkt_dmacookies[i]);
1551 			total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1552 		}
1553 		ccb->pkt_cookie = i;
1554 		ccb->arcmsr_cdb.sgcount = i;
1555 		if (total_ccb_xferlen > 512) {
1556 			resid = total_ccb_xferlen % 512;
1557 			    if (resid != 0) {
1558 				i--;
1559 				total_ccb_xferlen -= resid;
1560 				/* modify last sg length */
1561 				ccb->pkt_dmacookies[i].dmac_size =
1562 				    ccb->pkt_dmacookies[i].dmac_size - resid;
1563 				ccb->resid_dmacookie.dmac_size = resid;
1564 				ccb->resid_dmacookie.dmac_laddress =
1565 				    ccb->pkt_dmacookies[i].dmac_laddress +
1566 				    ccb->pkt_dmacookies[i].dmac_size;
1567 			}
1568 		}
1569 		ccb->total_dmac_size = total_ccb_xferlen;
1570 		ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1571 		pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1572 		return (DDI_SUCCESS);
1573 
1574 	case DDI_DMA_NORESOURCES:
1575 		cmn_err(CE_WARN, "arcmsr%d: dma map got 'no resources'",
1576 		    ddi_get_instance(acb->dev_info));
1577 		bioerror(bp, ENOMEM);
1578 		break;
1579 
1580 	case DDI_DMA_NOMAPPING:
1581 		cmn_err(CE_WARN, "arcmsr%d: dma map got 'no mapping'",
1582 		    ddi_get_instance(acb->dev_info));
1583 		bioerror(bp, EFAULT);
1584 		break;
1585 
1586 	case DDI_DMA_TOOBIG:
1587 		cmn_err(CE_WARN, "arcmsr%d: dma map got 'too big'",
1588 		    ddi_get_instance(acb->dev_info));
1589 		bioerror(bp, EINVAL);
1590 		break;
1591 
1592 	case DDI_DMA_INUSE:
1593 		cmn_err(CE_WARN, "arcmsr%d: dma map got 'in use' "
1594 		    "(should not happen)",
1595 		    ddi_get_instance(acb->dev_info));
1596 		break;
1597 	default:
1598 		cmn_err(CE_WARN,
1599 		    "arcmsr%d: dma map got 'unknown failure 0x%x' "
1600 		    "(should not happen)",
1601 		    ddi_get_instance(acb->dev_info), i);
1602 #ifdef ARCMSR_DEBUG
1603 		arcmsr_dump_scsi_cdb(&pkt->pkt_address, pkt);
1604 #endif
1605 		break;
1606 	}
1607 
1608 	ddi_dma_free_handle(&ccb->pkt_dma_handle);
1609 	ccb->pkt_dma_handle = NULL;
1610 	ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1611 	return (DDI_FAILURE);
1612 }
1613 
1614 
1615 /*
1616  * Function name: arcmsr_dma_move
1617  * Return Values: 0 if successful, -1 if failure
1618  *   Description: move DMA resources to next DMA window
1619  *       Context: Can only be called from arcmsr_tran_init_pkt()
1620  */
1621 static int
1622 arcmsr_dma_move(struct ACB *acb, struct scsi_pkt *pkt,
1623     struct buf *bp) {
1624 
1625 	struct CCB *ccb = pkt->pkt_ha_private;
1626 	uint8_t i = 0;
1627 	int resid = 0;
1628 	int total_ccb_xferlen = 0;
1629 
1630 	if (ccb->resid_dmacookie.dmac_size != 0) 	{
1631 		total_ccb_xferlen += ccb->resid_dmacookie.dmac_size;
1632 		ccb->pkt_dmacookies[i].dmac_size =
1633 		    ccb->resid_dmacookie.dmac_size;
1634 		ccb->pkt_dmacookies[i].dmac_laddress =
1635 		    ccb->resid_dmacookie.dmac_laddress;
1636 		i++;
1637 		ccb->resid_dmacookie.dmac_size = 0;
1638 	}
1639 	/*
1640 	 * If there are no more cookies remaining in this window,
1641 	 * move to the next window.
1642 	 */
1643 	if (ccb->pkt_cookie == ccb->pkt_ncookies) {
1644 		/*
1645 		 * only dma map "partial" arrive here
1646 		 */
1647 		if ((ccb->pkt_curwin == ccb->pkt_nwin) &&
1648 		    (ccb->pkt_nwin == 1)) {
1649 			cmn_err(CE_CONT,
1650 			    "arcmsr%d: dma partial set, but only "
1651 			    "one window allocated",
1652 			    ddi_get_instance(acb->dev_info));
1653 			return (DDI_SUCCESS);
1654 		}
1655 
1656 		/* At last window, cannot move */
1657 		if (++ccb->pkt_curwin >= ccb->pkt_nwin) {
1658 			cmn_err(CE_WARN,
1659 			    "arcmsr%d: dma partial set, numwin exceeded",
1660 			    ddi_get_instance(acb->dev_info));
1661 			return (DDI_FAILURE);
1662 		}
1663 		if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1664 		    &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1665 		    &ccb->pkt_dmacookies[i], &ccb->pkt_ncookies) ==
1666 		    DDI_FAILURE) {
1667 			cmn_err(CE_WARN,
1668 			    "arcmsr%d: dma partial set, "
1669 			    "ddi_dma_getwin failure",
1670 			    ddi_get_instance(acb->dev_info));
1671 			return (DDI_FAILURE);
1672 		}
1673 		/* reset cookie pointer */
1674 		ccb->pkt_cookie = 0;
1675 	} else {
1676 		/*
1677 		 * only dma map "all" arrive here
1678 		 * We still have more cookies in this window,
1679 		 * get the next one
1680 		 * access the pkt_dma_handle remain cookie record at
1681 		 * ccb->pkt_dmacookies array
1682 		 */
1683 		ddi_dma_nextcookie(ccb->pkt_dma_handle,
1684 		    &ccb->pkt_dmacookies[i]);
1685 	}
1686 
1687 	/* Get remaining cookies in this window, up to our maximum */
1688 	total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1689 
1690 	/* retrieve and store cookies, start at ccb->pkt_dmacookies[0] */
1691 	for (;;) {
1692 		i++;
1693 		/* handled cookies count level indicator */
1694 		ccb->pkt_cookie++;
1695 		if (i == ARCMSR_MAX_SG_ENTRIES ||
1696 		    ccb->pkt_cookie == ccb->pkt_ncookies ||
1697 		    total_ccb_xferlen == ARCMSR_MAX_XFER_LEN) {
1698 			break;
1699 		}
1700 		ddi_dma_nextcookie(ccb->pkt_dma_handle,
1701 		    &ccb->pkt_dmacookies[i]);
1702 		total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1703 	}
1704 
1705 	ccb->arcmsr_cdb.sgcount = i;
1706 	if (total_ccb_xferlen > 512) {
1707 		resid = total_ccb_xferlen % 512;
1708 		if (resid != 0) {
1709 			i--;
1710 			total_ccb_xferlen -= resid;
1711 			/* modify last sg length */
1712 			ccb->pkt_dmacookies[i].dmac_size =
1713 			    ccb->pkt_dmacookies[i].dmac_size - resid;
1714 			ccb->resid_dmacookie.dmac_size = resid;
1715 			ccb->resid_dmacookie.dmac_laddress =
1716 			    ccb->pkt_dmacookies[i].dmac_laddress +
1717 			    ccb->pkt_dmacookies[i].dmac_size;
1718 		}
1719 	}
1720 	ccb->total_dmac_size += total_ccb_xferlen;
1721 	pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1722 
1723 	return (DDI_SUCCESS);
1724 }
1725 
1726 /*
1727  * Function name: arcmsr_tran_destroy_pkt
1728  * Return Values: none
1729  *   Description: Called by kernel on behalf of a target driver
1730  *	          calling scsi_destroy_pkt(9F).
1731  *	          Refer to tran_destroy_pkt(9E) man page
1732  *       Context: Can be called from different kernel process threads.
1733  *	          Can be called by interrupt thread.
1734  */
1735 static void
1736 arcmsr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) {
1737 
1738 	struct CCB *ccb = pkt->pkt_ha_private;
1739 
1740 	if ((ccb != NULL) && (ccb->pkt == pkt)) {
1741 		struct ACB *acb = ccb->acb;
1742 		if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1743 			if (ddi_dma_unbind_handle(ccb->pkt_dma_handle)
1744 			    != DDI_SUCCESS) {
1745 				cmn_err(CE_WARN,
1746 				    "arcmsr%d: ddi_dma_unbind_handle() failed",
1747 				    ddi_get_instance(acb->dev_info));
1748 			}
1749 			ddi_dma_free_handle(&ccb->pkt_dma_handle);
1750 			ccb->pkt_dma_handle = NULL;
1751 		}
1752 		arcmsr_free_ccb(ccb);
1753 	}
1754 
1755 	scsi_hba_pkt_free(ap, pkt);
1756 }
1757 
1758 /*
1759  * Function name: arcmsr_tran_dmafree()
1760  * Return Values: none
1761  *   Description: free dvma resources
1762  *       Context: Can be called from different kernel process threads.
1763  *	          Can be called by interrupt thread.
1764  */
1765 static void
1766 arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) {
1767 
1768 	struct CCB *ccb = pkt->pkt_ha_private;
1769 
1770 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1771 		ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1772 		if (ddi_dma_unbind_handle(ccb->pkt_dma_handle)
1773 		    != DDI_SUCCESS) {
1774 			cmn_err(CE_WARN,
1775 			    "arcmsr%d: ddi_dma_unbind_handle() failed "
1776 			    "(target %d lun %d)",
1777 			    ddi_get_instance(ccb->acb->dev_info),
1778 			    ap->a_target, ap->a_lun);
1779 		}
1780 		ddi_dma_free_handle(&ccb->pkt_dma_handle);
1781 		ccb->pkt_dma_handle = NULL;
1782 	}
1783 }
1784 
1785 /*
1786  * Function name: arcmsr_tran_sync_pkt()
1787  * Return Values: none
1788  *   Description: sync dma
1789  *       Context: Can be called from different kernel process threads.
1790  *		  Can be called by interrupt thread.
1791  */
1792 static void
1793 arcmsr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) {
1794 
1795 	struct CCB *ccb;
1796 
1797 	ccb = pkt->pkt_ha_private;
1798 
1799 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1800 		if (ddi_dma_sync(ccb->pkt_dma_handle,
1801 		    ccb->pkt_dma_offset, ccb->pkt_dma_len,
1802 		    (ccb->ccb_flags & CCB_FLAG_DMAWRITE) ?
1803 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU)
1804 			!= DDI_SUCCESS) {
1805 			cmn_err(CE_WARN, "arcmsr%d: sync pkt failed "
1806 			    "for target %d lun %d",
1807 			    ddi_get_instance(ccb->acb->dev_info),
1808 			    ap->a_target, ap->a_lun);
1809 		}
1810 	}
1811 }
1812 
1813 
1814 static uint8_t
1815 arcmsr_hba_wait_msgint_ready(struct ACB *acb) {
1816 
1817 	uint32_t i;
1818 	uint8_t retries = 0x00;
1819 	struct HBA_msgUnit *phbamu;
1820 
1821 
1822 	phbamu = (struct HBA_msgUnit *)acb->pmu;
1823 
1824 	do {
1825 		for (i = 0; i < 100; i++) {
1826 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
1827 			    &phbamu->outbound_intstatus) &
1828 			    ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1829 				/* clear interrupt */
1830 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1831 				    &phbamu->outbound_intstatus,
1832 				    ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1833 				return (TRUE);
1834 			}
1835 			drv_usecwait(10000);
1836 			if (ddi_in_panic()) {
1837 				/* clear interrupts */
1838 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1839 				    &phbamu->outbound_intstatus,
1840 				    ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1841 				return (TRUE);
1842 			}
1843 		} /* max 1 second */
1844 	} while (retries++ < 20); /* max 20 seconds */
1845 	return (FALSE);
1846 }
1847 
1848 
1849 
1850 static uint8_t
1851 arcmsr_hbb_wait_msgint_ready(struct ACB *acb) {
1852 
1853 	struct HBB_msgUnit *phbbmu;
1854 	uint32_t i;
1855 	uint8_t retries = 0x00;
1856 
1857 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
1858 
1859 	do {
1860 		for (i = 0; i < 100; i++) {
1861 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
1862 			    &phbbmu->hbb_doorbell->iop2drv_doorbell) &
1863 			    ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1864 				/* clear interrupt */
1865 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1866 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
1867 				    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1868 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1869 				    &phbbmu->hbb_doorbell->drv2iop_doorbell,
1870 				    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1871 				return (TRUE);
1872 			}
1873 			drv_usecwait(10000);
1874 			if (ddi_in_panic()) {
1875 				/* clear interrupts */
1876 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1877 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
1878 				    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1879 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1880 				    &phbbmu->hbb_doorbell->drv2iop_doorbell,
1881 				    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1882 				return (TRUE);
1883 			}
1884 		} /* max 1 second */
1885 	} while (retries++ < 20); /* max 20 seconds */
1886 
1887 	return (FALSE);
1888 }
1889 
1890 
1891 static void
1892 arcmsr_flush_hba_cache(struct ACB *acb) {
1893 
1894 	struct HBA_msgUnit *phbamu;
1895 	int retry_count = 30;
1896 
1897 	/* enlarge wait flush adapter cache time: 10 minutes */
1898 
1899 	phbamu = (struct HBA_msgUnit *)acb->pmu;
1900 
1901 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
1902 	    ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
1903 
1904 	do {
1905 		if (arcmsr_hba_wait_msgint_ready(acb)) {
1906 			break;
1907 		} else {
1908 			retry_count--;
1909 		}
1910 	} while (retry_count != 0);
1911 }
1912 
1913 
1914 
1915 static void
1916 arcmsr_flush_hbb_cache(struct ACB *acb) {
1917 
1918 	struct HBB_msgUnit *phbbmu;
1919 	int retry_count = 30;
1920 
1921 	/* enlarge wait flush adapter cache time: 10 minutes */
1922 
1923 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
1924 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1925 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
1926 	    ARCMSR_MESSAGE_FLUSH_CACHE);
1927 
1928 	do {
1929 		if (arcmsr_hbb_wait_msgint_ready(acb)) {
1930 			break;
1931 		} else {
1932 			retry_count--;
1933 		}
1934 	} while (retry_count != 0);
1935 }
1936 
1937 
1938 static void
1939 arcmsr_ccb_complete(struct CCB *ccb, int flag) {
1940 
1941 	struct ACB *acb = ccb->acb;
1942 	struct scsi_pkt *pkt = ccb->pkt;
1943 
1944 	if (flag == 1) {
1945 		atomic_add_32((volatile uint32_t *)
1946 		    &acb->ccboutstandingcount, -1);
1947 	}
1948 	pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1949 	    STATE_SENT_CMD | STATE_GOT_STATUS);
1950 
1951 	if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1952 	    (pkt->pkt_state & STATE_XFERRED_DATA)) {
1953 		(void) ddi_dma_sync(ccb->pkt_dma_handle,
1954 		    ccb->pkt_dma_offset, ccb->pkt_dma_len,
1955 		    DDI_DMA_SYNC_FORCPU);
1956 	}
1957 
1958 	if (pkt->pkt_comp) {
1959 		(*pkt->pkt_comp)(pkt);
1960 	}
1961 }
1962 
1963 
1964 static void
1965 arcmsr_report_sense_info(struct CCB *ccb) {
1966 
1967 	struct scsi_pkt *pkt = ccb->pkt;
1968 	struct scsi_arq_status *arq_status;
1969 
1970 
1971 	arq_status = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
1972 	bzero((caddr_t)arq_status, sizeof (struct scsi_arq_status));
1973 	arq_status->sts_rqpkt_reason = CMD_CMPLT;
1974 	arq_status->sts_rqpkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET |
1975 	    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS);
1976 	arq_status->sts_rqpkt_statistics = pkt->pkt_statistics;
1977 	arq_status->sts_rqpkt_resid = 0;
1978 
1979 	pkt->pkt_reason = CMD_CMPLT;
1980 	/* auto rqsense took place */
1981 	pkt->pkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
1982 	    STATE_GOT_STATUS | STATE_ARQ_DONE);
1983 
1984 	if (&arq_status->sts_sensedata != NULL) {
1985 		struct SENSE_DATA *cdb_sensedata;
1986 		struct scsi_extended_sense *sts_sensedata;
1987 
1988 		cdb_sensedata =
1989 		    (struct SENSE_DATA *)ccb->arcmsr_cdb.SenseData;
1990 		sts_sensedata = &arq_status->sts_sensedata;
1991 
1992 		sts_sensedata->es_code = cdb_sensedata->ErrorCode;
1993 		/* must eq CLASS_EXTENDED_SENSE (0x07) */
1994 		sts_sensedata->es_class = cdb_sensedata->ErrorClass;
1995 		sts_sensedata->es_valid = cdb_sensedata->Valid;
1996 		sts_sensedata->es_segnum = cdb_sensedata->SegmentNumber;
1997 		sts_sensedata->es_key = cdb_sensedata->SenseKey;
1998 		sts_sensedata->es_ili = cdb_sensedata->IncorrectLength;
1999 		sts_sensedata->es_eom = cdb_sensedata->EndOfMedia;
2000 		sts_sensedata->es_filmk = cdb_sensedata->FileMark;
2001 		sts_sensedata->es_info_1 = cdb_sensedata->Information[0];
2002 		sts_sensedata->es_info_2 = cdb_sensedata->Information[1];
2003 		sts_sensedata->es_info_3 = cdb_sensedata->Information[2];
2004 		sts_sensedata->es_info_4 = cdb_sensedata->Information[3];
2005 		sts_sensedata->es_add_len =
2006 		    cdb_sensedata->AdditionalSenseLength;
2007 		sts_sensedata->es_cmd_info[0] =
2008 		    cdb_sensedata->CommandSpecificInformation[0];
2009 		sts_sensedata->es_cmd_info[1] =
2010 		    cdb_sensedata->CommandSpecificInformation[1];
2011 		sts_sensedata->es_cmd_info[2] =
2012 		    cdb_sensedata->CommandSpecificInformation[2];
2013 		sts_sensedata->es_cmd_info[3] =
2014 		    cdb_sensedata->CommandSpecificInformation[3];
2015 		sts_sensedata->es_add_code =
2016 		    cdb_sensedata->AdditionalSenseCode;
2017 		sts_sensedata->es_qual_code =
2018 		    cdb_sensedata->AdditionalSenseCodeQualifier;
2019 		sts_sensedata->es_fru_code =
2020 		    cdb_sensedata->FieldReplaceableUnitCode;
2021 	}
2022 }
2023 
2024 
2025 
2026 static void
2027 arcmsr_abort_hba_allcmd(struct ACB *acb) {
2028 
2029 	struct HBA_msgUnit *phbamu;
2030 
2031 	phbamu = (struct HBA_msgUnit *)acb->pmu;
2032 
2033 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2034 	    &phbamu->inbound_msgaddr0,
2035 	    ARCMSR_INBOUND_MESG0_ABORT_CMD);
2036 
2037 	if (!arcmsr_hba_wait_msgint_ready(acb)) {
2038 		cmn_err(CE_WARN,
2039 		    "arcmsr%d: timeout while waiting for 'abort all "
2040 		    "outstanding commands'",
2041 		    ddi_get_instance(acb->dev_info));
2042 	}
2043 }
2044 
2045 
2046 
2047 static void
2048 arcmsr_abort_hbb_allcmd(struct ACB *acb) {
2049 
2050 	struct HBB_msgUnit *phbbmu =
2051 	    (struct HBB_msgUnit *)acb->pmu;
2052 
2053 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2054 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
2055 	    ARCMSR_MESSAGE_ABORT_CMD);
2056 
2057 	if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2058 		cmn_err(CE_WARN,
2059 		    "arcmsr%d: timeout while waiting for 'abort all "
2060 		    "outstanding commands'",
2061 		    ddi_get_instance(acb->dev_info));
2062 	}
2063 }
2064 
2065 static void
2066 arcmsr_report_ccb_state(struct ACB *acb,
2067     struct CCB *ccb, uint32_t flag_ccb) {
2068 
2069 	int id, lun;
2070 
2071 	id = ccb->pkt->pkt_address.a_target;
2072 	lun = ccb->pkt->pkt_address.a_lun;
2073 
2074 	if ((flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR) == 0) {
2075 		if (acb->devstate[id][lun] == ARECA_RAID_GONE) {
2076 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
2077 		}
2078 		ccb->pkt->pkt_reason = CMD_CMPLT;
2079 		ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
2080 		arcmsr_ccb_complete(ccb, 1);
2081 	} else {
2082 		switch (ccb->arcmsr_cdb.DeviceStatus) {
2083 		case ARCMSR_DEV_SELECT_TIMEOUT:
2084 			if (acb->devstate[id][lun] == ARECA_RAID_GOOD) {
2085 				cmn_err(CE_CONT,
2086 				    "arcmsr%d: raid volume was kicked out ",
2087 				    ddi_get_instance(acb->dev_info));
2088 			}
2089 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2090 			ccb->pkt->pkt_reason = CMD_TIMEOUT;
2091 			ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
2092 			arcmsr_ccb_complete(ccb, 1);
2093 			break;
2094 		case ARCMSR_DEV_ABORTED:
2095 		case ARCMSR_DEV_INIT_FAIL:
2096 			cmn_err(CE_CONT,
2097 			    "arcmsr%d: isr got "
2098 			    "'ARCMSR_DEV_ABORTED' 'ARCMSR_DEV_INIT_FAIL'",
2099 			    ddi_get_instance(acb->dev_info));
2100 			cmn_err(CE_CONT, "arcmsr%d: raid volume was kicked "
2101 			    "out", ddi_get_instance(acb->dev_info));
2102 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2103 			ccb->pkt->pkt_reason = CMD_DEV_GONE;
2104 			ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2105 			arcmsr_ccb_complete(ccb, 1);
2106 			break;
2107 		case SCSISTAT_CHECK_CONDITION:
2108 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
2109 			arcmsr_report_sense_info(ccb);
2110 			arcmsr_ccb_complete(ccb, 1);
2111 			break;
2112 		default:
2113 			cmn_err(CE_WARN, "arcmsr%d: target %d lun %d "
2114 			    "isr received CMD_DONE with unknown "
2115 			    "DeviceStatus (0x%x)",
2116 			    ddi_get_instance(acb->dev_info), id, lun,
2117 			    ccb->arcmsr_cdb.DeviceStatus);
2118 			cmn_err(CE_CONT, "arcmsr%d: raid volume was kicked "
2119 			    "out ", ddi_get_instance(acb->dev_info));
2120 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2121 			/* unknown error or crc error just for retry */
2122 			ccb->pkt->pkt_reason = CMD_TRAN_ERR;
2123 			ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2124 			arcmsr_ccb_complete(ccb, 1);
2125 			break;
2126 		}
2127 	}
2128 }
2129 
2130 
2131 static void
2132 arcmsr_drain_donequeue(struct ACB *acb, uint32_t flag_ccb) {
2133 
2134 	struct CCB *ccb;
2135 
2136 	/* check if command completed without error */
2137 	ccb = (struct CCB *)(acb->vir2phy_offset +
2138 	    (flag_ccb << 5)); /* frame must be aligned on 32 byte boundary */
2139 
2140 	if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) 	{
2141 		if (ccb->startdone == ARCMSR_CCB_ABORTED) {
2142 			cmn_err(CE_CONT,
2143 			    "arcmsr%d: isr got aborted command "
2144 			    "while draining doneq",
2145 			    ddi_get_instance(acb->dev_info));
2146 			ccb->pkt->pkt_reason = CMD_ABORTED;
2147 			ccb->pkt->pkt_statistics |= STAT_ABORTED;
2148 			arcmsr_ccb_complete(ccb, 1);
2149 			return;
2150 		}
2151 
2152 		if (ccb->startdone == ARCMSR_CCB_RESET) {
2153 			cmn_err(CE_CONT,
2154 			    "arcmsr%d: isr got command reset "
2155 			    "while draining doneq",
2156 			    ddi_get_instance(acb->dev_info));
2157 			ccb->pkt->pkt_reason = CMD_RESET;
2158 			ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
2159 			arcmsr_ccb_complete(ccb, 1);
2160 			return;
2161 		}
2162 
2163 		cmn_err(CE_WARN, "arcmsr%d: isr got an illegal ccb command "
2164 		    "done while draining doneq",
2165 		    ddi_get_instance(acb->dev_info));
2166 		return;
2167 	}
2168 	arcmsr_report_ccb_state(acb, ccb, flag_ccb);
2169 }
2170 
2171 
2172 static void
2173 arcmsr_done4abort_postqueue(struct ACB *acb) {
2174 
2175 	int i = 0;
2176 	uint32_t flag_ccb;
2177 
2178 	switch (acb->adapter_type) {
2179 	case ACB_ADAPTER_TYPE_A:
2180 	{
2181 		struct HBA_msgUnit *phbamu;
2182 		uint32_t outbound_intstatus;
2183 
2184 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2185 		/* clear and abort all outbound posted Q */
2186 		outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
2187 		    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
2188 		/* clear interrupt */
2189 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2190 		    &phbamu->outbound_intstatus, outbound_intstatus);
2191 		while (((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
2192 		    &phbamu->outbound_queueport)) != 0xFFFFFFFF) &&
2193 		    (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
2194 			arcmsr_drain_donequeue(acb, flag_ccb);
2195 		}
2196 	}
2197 		break;
2198 
2199 	case ACB_ADAPTER_TYPE_B:
2200 	{
2201 		struct HBB_msgUnit *phbbmu;
2202 
2203 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2204 
2205 		/* clear all outbound posted Q */
2206 		/* clear doorbell interrupt */
2207 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2208 		    &phbbmu->hbb_doorbell->iop2drv_doorbell,
2209 		    ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
2210 		for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
2211 			if ((flag_ccb = phbbmu->done_qbuffer[i]) != 0) {
2212 				phbbmu->done_qbuffer[i] = 0;
2213 				arcmsr_drain_donequeue(acb, flag_ccb);
2214 			}
2215 			phbbmu->post_qbuffer[i] = 0;
2216 		}	/* drain reply FIFO */
2217 		phbbmu->doneq_index = 0;
2218 		phbbmu->postq_index = 0;
2219 		break;
2220 	}
2221 	}
2222 }
2223 
2224 /*
2225  * Routine Description: Reset 80331 iop.
2226  *           Arguments:
2227  *        Return Value: Nothing.
2228  */
2229 static void
2230 arcmsr_iop_reset(struct ACB *acb) {
2231 
2232 	struct CCB *ccb;
2233 	uint32_t intmask_org;
2234 	int i = 0;
2235 
2236 	if (acb->ccboutstandingcount > 0) {
2237 		/* disable all outbound interrupt */
2238 		intmask_org = arcmsr_disable_allintr(acb);
2239 		/* talk to iop 331 outstanding command aborted */
2240 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
2241 			arcmsr_abort_hba_allcmd(acb);
2242 		} else {
2243 			arcmsr_abort_hbb_allcmd(acb);
2244 		}
2245 		/* clear and abort all outbound posted Q */
2246 		arcmsr_done4abort_postqueue(acb);
2247 
2248 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2249 			ccb = acb->pccb_pool[i];
2250 			if (ccb->startdone == ARCMSR_CCB_START) {
2251 				ccb->startdone = ARCMSR_CCB_RESET;
2252 				ccb->pkt->pkt_reason = CMD_RESET;
2253 				ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
2254 				arcmsr_ccb_complete(ccb, 1);
2255 			}
2256 		}
2257 		/* enable all outbound interrupt */
2258 		arcmsr_enable_allintr(acb, intmask_org);
2259 	}
2260 }
2261 
2262 /*
2263  * You can access the DMA address through the #defines:
2264  * dmac_address for 32-bit addresses and dmac_laddress for 64-bit addresses.
2265  *	These macros are defined as follows:
2266  *
2267  *	#define dmac_laddress   _dmu._dmac_ll
2268  *	#ifdef _LONG_LONG_HTOL
2269  *		#define dmac_notused    _dmu._dmac_la[0]
2270  *		#define dmac_address    _dmu._dmac_la[1]
2271  *	#else
2272  *		#define dmac_address    _dmu._dmac_la[0]
2273  *		#define dmac_notused    _dmu._dmac_la[1]
2274  *	#endif
2275  */
2276 /*ARGSUSED*/
2277 static void
2278 arcmsr_build_ccb(struct CCB *ccb) {
2279 
2280 	struct scsi_pkt *pkt = ccb->pkt;
2281 	struct ARCMSR_CDB *arcmsr_cdb;
2282 	char *psge;
2283 	uint32_t address_lo, address_hi;
2284 	int arccdbsize = 0x30;
2285 	uint8_t sgcount;
2286 
2287 	arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
2288 	psge = (char *)&arcmsr_cdb->sgu;
2289 
2290 	/* return the current time in seconds */
2291 	ccb->ccb_time = (time_t)(pkt->pkt_time + ddi_get_time());
2292 	bcopy((caddr_t)pkt->pkt_cdbp, arcmsr_cdb->Cdb,
2293 	    arcmsr_cdb->CdbLength);
2294 	sgcount = ccb->arcmsr_cdb.sgcount;
2295 
2296 	if (sgcount) {
2297 		int length, i;
2298 		int cdb_sgcount = 0;
2299 		int total_xfer_length = 0;
2300 
2301 		/* map stor port SG list to our iop SG List. */
2302 		for (i = 0; i < sgcount; i++) {
2303 			/* Get physaddr of the current data pointer */
2304 			length = ccb->pkt_dmacookies[i].dmac_size;
2305 			total_xfer_length += length;
2306 			address_lo = dma_addr_lo32(
2307 				ccb->pkt_dmacookies[i].dmac_laddress);
2308 			address_hi = dma_addr_hi32(
2309 				ccb->pkt_dmacookies[i].dmac_laddress);
2310 
2311 			if (address_hi == 0) {
2312 				struct SG32ENTRY *dma_sg;
2313 
2314 				dma_sg = (struct SG32ENTRY *)(intptr_t)psge;
2315 
2316 				dma_sg->address = address_lo;
2317 				dma_sg->length = length;
2318 				psge += sizeof (struct SG32ENTRY);
2319 				arccdbsize += sizeof (struct SG32ENTRY);
2320 			} else {
2321 				int sg64s_size = 0;
2322 				int tmplength = length;
2323 				int64_t span4G, length0;
2324 				struct SG64ENTRY *dma_sg;
2325 
2326 				/*LINTED*/
2327 				while (1) {
2328 					dma_sg =
2329 					    (struct SG64ENTRY *)(intptr_t)psge;
2330 					span4G =
2331 					    (int64_t)address_lo + tmplength;
2332 
2333 					dma_sg->addresshigh = address_hi;
2334 					dma_sg->address = address_lo;
2335 					if (span4G > 0x100000000ULL) {
2336 						/* see if we cross 4G */
2337 						length0 = 0x100000000ULL -
2338 						    address_lo;
2339 						dma_sg->length =
2340 						    (uint32_t)length0 |
2341 						    IS_SG64_ADDR;
2342 						address_hi = address_hi + 1;
2343 						address_lo = 0;
2344 						tmplength = tmplength-
2345 						    (int32_t)length0;
2346 						sg64s_size +=
2347 						    sizeof (struct SG64ENTRY);
2348 						psge +=
2349 						    sizeof (struct SG64ENTRY);
2350 						cdb_sgcount++;
2351 					} else {
2352 						dma_sg->length = tmplength |
2353 						    IS_SG64_ADDR;
2354 						sg64s_size +=
2355 						    sizeof (struct SG64ENTRY);
2356 						psge +=
2357 						    sizeof (struct SG64ENTRY);
2358 						break;
2359 					}
2360 				}
2361 				arccdbsize += sg64s_size;
2362 			}
2363 			cdb_sgcount++;
2364 		}
2365 		arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
2366 		arcmsr_cdb->DataLength = total_xfer_length;
2367 		if (arccdbsize > 256) {
2368 			arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
2369 		}
2370 	} else {
2371 		arcmsr_cdb->DataLength = 0;
2372 	}
2373 
2374 	if (ccb->ccb_flags & CCB_FLAG_DMAWRITE)
2375 		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
2376 }
2377 
2378 /*
2379  * arcmsr_post_ccb - Send a protocol specific ARC send postcard to a AIOC.
2380  *
2381  * handle:		Handle of registered ARC protocol driver
2382  * adapter_id:		AIOC unique identifier(integer)
2383  * pPOSTCARD_SEND:	Pointer to ARC send postcard
2384  *
2385  * This routine posts a ARC send postcard to the request post FIFO of a
2386  * specific ARC adapter.
2387  */
2388 static int
2389 arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb) {
2390 
2391 	uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
2392 	struct scsi_pkt *pkt = ccb->pkt;
2393 	struct ARCMSR_CDB *arcmsr_cdb;
2394 
2395 	arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
2396 
2397 	/* Use correct offset and size for syncing */
2398 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
2399 	    DDI_DMA_SYNC_FORDEV) == DDI_FAILURE)
2400 		return (DDI_FAILURE);
2401 
2402 	atomic_add_32((volatile uint32_t *)&acb->ccboutstandingcount, 1);
2403 	ccb->startdone = ARCMSR_CCB_START;
2404 
2405 	switch (acb->adapter_type) {
2406 	case ACB_ADAPTER_TYPE_A:
2407 	{
2408 		struct HBA_msgUnit *phbamu;
2409 
2410 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2411 
2412 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
2413 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2414 			    &phbamu->inbound_queueport,
2415 			    cdb_shifted_phyaddr |
2416 			    ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
2417 		} else {
2418 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2419 			    &phbamu->inbound_queueport, cdb_shifted_phyaddr);
2420 		}
2421 		if (pkt->pkt_flags & FLAG_NOINTR)
2422 			arcmsr_polling_hba_ccbdone(acb, ccb);
2423 	}
2424 		break;
2425 	case ACB_ADAPTER_TYPE_B:
2426 	{
2427 		struct HBB_msgUnit *phbbmu;
2428 		int ending_index, index;
2429 
2430 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2431 		mutex_enter(&acb->postq_mutex);
2432 		index = phbbmu->postq_index;
2433 		ending_index = ((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
2434 		phbbmu->post_qbuffer[ending_index] = 0;
2435 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
2436 			phbbmu->post_qbuffer[index] =
2437 			    (cdb_shifted_phyaddr|ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
2438 		} else {
2439 			phbbmu->post_qbuffer[index] = cdb_shifted_phyaddr;
2440 		}
2441 		index++;
2442 		/* if last index number set it to 0 */
2443 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
2444 		phbbmu->postq_index = index;
2445 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2446 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
2447 		    ARCMSR_DRV2IOP_CDB_POSTED);
2448 		mutex_exit(&acb->postq_mutex);
2449 		if (pkt->pkt_flags & FLAG_NOINTR)
2450 			arcmsr_polling_hbb_ccbdone(acb, ccb);
2451 	}
2452 	break;
2453 	}
2454 
2455 	return (DDI_SUCCESS);
2456 }
2457 
2458 
2459 
2460 
2461 static struct QBUFFER *
2462 arcmsr_get_iop_rqbuffer(struct ACB *acb) {
2463 
2464 	struct QBUFFER *qb;
2465 
2466 	switch (acb->adapter_type) {
2467 	case ACB_ADAPTER_TYPE_A:
2468 	{
2469 		struct HBA_msgUnit *phbamu;
2470 
2471 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2472 		qb = (struct QBUFFER *)&phbamu->message_rbuffer;
2473 	}
2474 		break;
2475 	case ACB_ADAPTER_TYPE_B:
2476 	{
2477 		struct HBB_msgUnit *phbbmu;
2478 
2479 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2480 		qb = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
2481 	}
2482 		break;
2483 	}
2484 
2485 	return (qb);
2486 }
2487 
2488 
2489 
2490 static struct QBUFFER *
2491 arcmsr_get_iop_wqbuffer(struct ACB *acb) {
2492 
2493 	struct QBUFFER *qbuffer = NULL;
2494 
2495 	switch (acb->adapter_type) {
2496 	case ACB_ADAPTER_TYPE_A:
2497 	{
2498 		struct HBA_msgUnit *phbamu;
2499 
2500 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2501 		qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
2502 	}
2503 	break;
2504 	case ACB_ADAPTER_TYPE_B:
2505 	{
2506 		struct HBB_msgUnit *phbbmu;
2507 
2508 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2509 		qbuffer =
2510 		    (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
2511 	}
2512 	break;
2513 	}
2514 	return (qbuffer);
2515 }
2516 
2517 
2518 
2519 static void
2520 arcmsr_iop_message_read(struct ACB *acb) {
2521 
2522 	switch (acb->adapter_type) {
2523 	case ACB_ADAPTER_TYPE_A:
2524 	{
2525 		struct HBA_msgUnit *phbamu;
2526 
2527 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2528 		/* let IOP know the data has been read */
2529 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2530 		    &phbamu->inbound_doorbell,
2531 		    ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
2532 	}
2533 	break;
2534 	case ACB_ADAPTER_TYPE_B:
2535 	{
2536 		struct HBB_msgUnit *phbbmu;
2537 
2538 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2539 		/* let IOP know the data has been read */
2540 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2541 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
2542 		    ARCMSR_DRV2IOP_DATA_READ_OK);
2543 	}
2544 	break;
2545 	}
2546 }
2547 
2548 
2549 
2550 static void
2551 arcmsr_iop_message_wrote(struct ACB *acb) {
2552 
2553 	switch (acb->adapter_type) {
2554 	case ACB_ADAPTER_TYPE_A:
2555 	{
2556 		struct HBA_msgUnit *phbamu;
2557 
2558 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2559 		/*
2560 		 * push inbound doorbell tell iop, driver data write ok
2561 		 * and wait reply on next hwinterrupt for next Qbuffer post
2562 		 */
2563 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2564 		    &phbamu->inbound_doorbell,
2565 		    ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
2566 	}
2567 	break;
2568 	case ACB_ADAPTER_TYPE_B:
2569 	{
2570 		struct HBB_msgUnit *phbbmu;
2571 
2572 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2573 		/*
2574 		 * push inbound doorbell tell iop, driver data was writen
2575 		 * successfully, then await reply on next hwinterrupt for
2576 		 * next Qbuffer post
2577 		 */
2578 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2579 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
2580 		    ARCMSR_DRV2IOP_DATA_WRITE_OK);
2581 	}
2582 	break;
2583 	}
2584 }
2585 
2586 
2587 
2588 static void
2589 arcmsr_post_ioctldata2iop(struct ACB *acb) {
2590 
2591 	uint8_t *pQbuffer;
2592 	struct QBUFFER *pwbuffer;
2593 	uint8_t *iop_data;
2594 	int32_t allxfer_len = 0;
2595 
2596 	pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2597 	iop_data = (uint8_t *)pwbuffer->data;
2598 	if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
2599 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
2600 		while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
2601 		    (allxfer_len < 124)) {
2602 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
2603 			(void) memcpy(iop_data, pQbuffer, 1);
2604 			acb->wqbuf_firstidx++;
2605 			/* if last index number set it to 0 */
2606 			acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
2607 			iop_data++;
2608 			allxfer_len++;
2609 		}
2610 		pwbuffer->data_len = allxfer_len;
2611 		/*
2612 		 * push inbound doorbell and wait reply at hwinterrupt
2613 		 * routine for next Qbuffer post
2614 		 */
2615 		arcmsr_iop_message_wrote(acb);
2616 	}
2617 }
2618 
2619 
2620 
2621 static void
2622 arcmsr_stop_hba_bgrb(struct ACB *acb) {
2623 
2624 	struct HBA_msgUnit *phbamu;
2625 
2626 	phbamu = (struct HBA_msgUnit *)acb->pmu;
2627 
2628 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2629 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2630 	    &phbamu->inbound_msgaddr0,
2631 	    ARCMSR_INBOUND_MESG0_STOP_BGRB);
2632 	if (!arcmsr_hba_wait_msgint_ready(acb))
2633 		cmn_err(CE_WARN,
2634 		    "arcmsr%d: timeout while waiting for background "
2635 		    "rebuild completion",
2636 		    ddi_get_instance(acb->dev_info));
2637 }
2638 
2639 
2640 static void
2641 arcmsr_stop_hbb_bgrb(struct ACB *acb) {
2642 
2643 	struct HBB_msgUnit *phbbmu;
2644 
2645 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
2646 
2647 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2648 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2649 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
2650 	    ARCMSR_MESSAGE_STOP_BGRB);
2651 
2652 	if (!arcmsr_hbb_wait_msgint_ready(acb))
2653 		cmn_err(CE_WARN,
2654 		    "arcmsr%d: timeout while waiting for background "
2655 		    "rebuild completion",
2656 		    ddi_get_instance(acb->dev_info));
2657 }
2658 
2659 static int
2660 arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt) {
2661 
2662 	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2663 	struct CCB *ccb = pkt->pkt_ha_private;
2664 	struct buf *bp = ccb->bp;
2665 	uint8_t *pQbuffer;
2666 	int retvalue = 0, transfer_len = 0;
2667 	char *buffer;
2668 	uint32_t controlcode;
2669 
2670 
2671 	/* 4 bytes: Areca io control code */
2672 	controlcode = (uint32_t)pkt->pkt_cdbp[5] << 24 |
2673 	    (uint32_t)pkt->pkt_cdbp[6] << 16 |
2674 	    (uint32_t)pkt->pkt_cdbp[7] << 8 |
2675 	    (uint32_t)pkt->pkt_cdbp[8];
2676 
2677 	if (bp->b_flags & (B_PHYS | B_PAGEIO))
2678 		bp_mapin(bp);
2679 
2680 
2681 	buffer = bp->b_un.b_addr;
2682 	transfer_len = bp->b_bcount;
2683 	if (transfer_len > sizeof (struct CMD_MESSAGE_FIELD)) {
2684 		retvalue = ARCMSR_MESSAGE_FAIL;
2685 		goto message_out;
2686 	}
2687 
2688 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)(intptr_t)buffer;
2689 
2690 	switch (controlcode) {
2691 	case ARCMSR_MESSAGE_READ_RQBUFFER:
2692 	{
2693 		unsigned long *ver_addr;
2694 		uint8_t *ptmpQbuffer;
2695 		int32_t allxfer_len = 0;
2696 
2697 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
2698 		if (!ver_addr) {
2699 			retvalue = ARCMSR_MESSAGE_FAIL;
2700 			goto message_out;
2701 		}
2702 
2703 		ptmpQbuffer = (uint8_t *)ver_addr;
2704 		while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
2705 		    (allxfer_len < (MSGDATABUFLEN - 1))) {
2706 			pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
2707 			(void) memcpy(ptmpQbuffer, pQbuffer, 1);
2708 			acb->rqbuf_firstidx++;
2709 			acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
2710 			ptmpQbuffer++;
2711 			allxfer_len++;
2712 		}
2713 
2714 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2715 			struct QBUFFER *prbuffer;
2716 			uint8_t  *iop_data;
2717 			int32_t iop_len;
2718 
2719 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2720 			prbuffer = arcmsr_get_iop_rqbuffer(acb);
2721 			iop_data = (uint8_t *)prbuffer->data;
2722 			iop_len = (int32_t)prbuffer->data_len;
2723 
2724 			while (iop_len > 0) {
2725 				pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
2726 				(void) memcpy(pQbuffer, iop_data, 1);
2727 				acb->rqbuf_lastidx++;
2728 				acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
2729 				iop_data++;
2730 				iop_len--;
2731 			}
2732 			arcmsr_iop_message_read(acb);
2733 		}
2734 
2735 		(void) memcpy(pcmdmessagefld->messagedatabuffer,
2736 		    (uint8_t *)ver_addr, allxfer_len);
2737 		pcmdmessagefld->cmdmessage.Length = allxfer_len;
2738 		pcmdmessagefld->cmdmessage.ReturnCode =
2739 		    ARCMSR_MESSAGE_RETURNCODE_OK;
2740 		kmem_free(ver_addr, MSGDATABUFLEN);
2741 	}
2742 	break;
2743 	case ARCMSR_MESSAGE_WRITE_WQBUFFER:
2744 	{
2745 		unsigned long *ver_addr;
2746 		int32_t my_empty_len, user_len, wqbuf_firstidx, wqbuf_lastidx;
2747 		uint8_t *ptmpuserbuffer;
2748 
2749 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
2750 		if (!ver_addr) {
2751 			retvalue = ARCMSR_MESSAGE_FAIL;
2752 			goto message_out;
2753 		}
2754 		ptmpuserbuffer = (uint8_t *)ver_addr;
2755 		user_len = pcmdmessagefld->cmdmessage.Length;
2756 		(void) memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer,
2757 		    user_len);
2758 		wqbuf_lastidx = acb->wqbuf_lastidx;
2759 		wqbuf_firstidx = acb->wqbuf_firstidx;
2760 		if (wqbuf_lastidx != wqbuf_firstidx) {
2761 			struct scsi_arq_status *arq_status;
2762 
2763 			arcmsr_post_ioctldata2iop(acb);
2764 			arq_status =
2765 			    (struct scsi_arq_status *)(intptr_t)
2766 			    (pkt->pkt_scbp);
2767 			bzero((caddr_t)arq_status,
2768 			    sizeof (struct scsi_arq_status));
2769 			arq_status->sts_rqpkt_reason = CMD_CMPLT;
2770 			arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
2771 			    STATE_GOT_TARGET |STATE_SENT_CMD |
2772 			    STATE_XFERRED_DATA | STATE_GOT_STATUS);
2773 
2774 			arq_status->sts_rqpkt_statistics = pkt->pkt_statistics;
2775 			arq_status->sts_rqpkt_resid = 0;
2776 			if (&arq_status->sts_sensedata != NULL) {
2777 				struct scsi_extended_sense *sts_sensedata;
2778 
2779 				sts_sensedata = &arq_status->sts_sensedata;
2780 
2781 				/* has error report sensedata */
2782 				sts_sensedata->es_code = 0x0;
2783 				sts_sensedata->es_valid = 0x01;
2784 				sts_sensedata->es_key = KEY_ILLEGAL_REQUEST;
2785 				/* AdditionalSenseLength */
2786 				sts_sensedata->es_add_len = 0x0A;
2787 				/* AdditionalSenseCode */
2788 				sts_sensedata->es_add_code = 0x20;
2789 			}
2790 			retvalue = ARCMSR_MESSAGE_FAIL;
2791 		} else {
2792 			my_empty_len = (wqbuf_firstidx-wqbuf_lastidx - 1) &
2793 			    (ARCMSR_MAX_QBUFFER - 1);
2794 			if (my_empty_len >= user_len) {
2795 				while (user_len > 0) {
2796 					pQbuffer =
2797 					    &acb->wqbuffer[acb->wqbuf_lastidx];
2798 					(void) memcpy(pQbuffer,
2799 					    ptmpuserbuffer, 1);
2800 					acb->wqbuf_lastidx++;
2801 					acb->wqbuf_lastidx %=
2802 					    ARCMSR_MAX_QBUFFER;
2803 					ptmpuserbuffer++;
2804 					user_len--;
2805 				}
2806 				if (acb->acb_flags &
2807 				    ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2808 					acb->acb_flags &=
2809 					    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2810 					arcmsr_post_ioctldata2iop(acb);
2811 				}
2812 			} else {
2813 				struct scsi_arq_status *arq_status;
2814 
2815 				/* has error report sensedata */
2816 				arq_status =
2817 				    (struct scsi_arq_status *)
2818 				    (intptr_t)(pkt->pkt_scbp);
2819 				bzero((caddr_t)arq_status,
2820 				    sizeof (struct scsi_arq_status));
2821 				arq_status->sts_rqpkt_reason = CMD_CMPLT;
2822 				arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
2823 				    STATE_GOT_TARGET |STATE_SENT_CMD |
2824 				    STATE_XFERRED_DATA | STATE_GOT_STATUS);
2825 				arq_status->sts_rqpkt_statistics =
2826 				    pkt->pkt_statistics;
2827 				arq_status->sts_rqpkt_resid = 0;
2828 				if (&arq_status->sts_sensedata != NULL) {
2829 					struct scsi_extended_sense
2830 					    *sts_sensedata;
2831 
2832 					sts_sensedata =
2833 					    &arq_status->sts_sensedata;
2834 
2835 					/* has error report sensedata */
2836 					sts_sensedata->es_code  = 0x0;
2837 					sts_sensedata->es_valid = 0x01;
2838 					sts_sensedata->es_key =
2839 					    KEY_ILLEGAL_REQUEST;
2840 					/* AdditionalSenseLength */
2841 					sts_sensedata->es_add_len = 0x0A;
2842 					/* AdditionalSenseCode */
2843 					sts_sensedata->es_add_code = 0x20;
2844 				}
2845 				retvalue = ARCMSR_MESSAGE_FAIL;
2846 			}
2847 		}
2848 		kmem_free(ver_addr, MSGDATABUFLEN);
2849 	}
2850 	break;
2851 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
2852 	{
2853 		pQbuffer = acb->rqbuffer;
2854 
2855 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2856 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2857 			arcmsr_iop_message_read(acb);
2858 		}
2859 		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2860 		acb->rqbuf_firstidx = 0;
2861 		acb->rqbuf_lastidx = 0;
2862 		(void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2863 		pcmdmessagefld->cmdmessage.ReturnCode =
2864 		    ARCMSR_MESSAGE_RETURNCODE_OK;
2865 	}
2866 	break;
2867 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
2868 	{
2869 		pQbuffer = acb->wqbuffer;
2870 
2871 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2872 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2873 			arcmsr_iop_message_read(acb);
2874 		}
2875 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2876 		    ACB_F_MESSAGE_WQBUFFER_READ);
2877 		acb->wqbuf_firstidx = 0;
2878 		acb->wqbuf_lastidx = 0;
2879 		(void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2880 		pcmdmessagefld->cmdmessage.ReturnCode =
2881 		    ARCMSR_MESSAGE_RETURNCODE_OK;
2882 	}
2883 	break;
2884 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
2885 	{
2886 
2887 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2888 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2889 			arcmsr_iop_message_read(acb);
2890 		}
2891 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2892 		    ACB_F_MESSAGE_RQBUFFER_CLEARED |
2893 		    ACB_F_MESSAGE_WQBUFFER_READ);
2894 		acb->rqbuf_firstidx = 0;
2895 		acb->rqbuf_lastidx = 0;
2896 		acb->wqbuf_firstidx = 0;
2897 		acb->wqbuf_lastidx = 0;
2898 		pQbuffer = acb->rqbuffer;
2899 		(void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
2900 		pQbuffer = acb->wqbuffer;
2901 		(void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
2902 		pcmdmessagefld->cmdmessage.ReturnCode =
2903 		    ARCMSR_MESSAGE_RETURNCODE_OK;
2904 	}
2905 	break;
2906 	case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
2907 		pcmdmessagefld->cmdmessage.ReturnCode =
2908 		    ARCMSR_MESSAGE_RETURNCODE_3F;
2909 		break;
2910 	/*
2911 	 * Not supported - ARCMSR_MESSAGE_SAY_HELLO
2912 	 */
2913 	case ARCMSR_MESSAGE_SAY_GOODBYE:
2914 		arcmsr_iop_parking(acb);
2915 		break;
2916 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2917 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
2918 			arcmsr_flush_hba_cache(acb);
2919 		} else {
2920 			arcmsr_flush_hbb_cache(acb);
2921 		}
2922 		break;
2923 	default:
2924 		retvalue = ARCMSR_MESSAGE_FAIL;
2925 	}
2926 
2927 message_out:
2928 
2929 	return (retvalue);
2930 }
2931 
2932 
2933 
2934 static int
2935 arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg, int mode,
2936     cred_t *credp, int *rvalp) {
2937 #ifndef __lock_lint
2938 	_NOTE(ARGUNUSED(rvalp))
2939 #endif
2940 
2941 	struct ACB *acb;
2942 	struct CMD_MESSAGE_FIELD *pktioctlfld;
2943 	int retvalue = 0;
2944 	int instance = MINOR2INST(getminor(dev));
2945 
2946 	if (instance < 0)
2947 		return (ENXIO);
2948 
2949 	if (secpolicy_sys_config(credp, B_FALSE) != 0)
2950 		return (EPERM);
2951 
2952 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
2953 	if (acb == NULL)
2954 		return (ENXIO);
2955 
2956 	pktioctlfld = kmem_zalloc(sizeof (struct CMD_MESSAGE_FIELD),
2957 	    KM_SLEEP);
2958 	if (pktioctlfld == NULL)
2959 		return (ENXIO);
2960 
2961 	/*
2962 	 * if we got here, we either are a 64-bit app in a 64-bit kernel
2963 	 * or a 32-bit app in a 32-bit kernel. Either way, we can just
2964 	 * copy in the args without any special conversions.
2965 	 */
2966 
2967 	mutex_enter(&acb->ioctl_mutex);
2968 	if (ddi_copyin((void *)arg, pktioctlfld,
2969 	    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0) {
2970 		retvalue = ENXIO;
2971 		goto ioctl_out;
2972 	}
2973 
2974 	if (memcmp(pktioctlfld->cmdmessage.Signature, "ARCMSR", 6) != 0) {
2975 		/* validity check */
2976 		retvalue = ENXIO;
2977 		goto ioctl_out;
2978 	}
2979 
2980 	switch ((unsigned int)ioctl_cmd) {
2981 	case ARCMSR_MESSAGE_READ_RQBUFFER:
2982 	{
2983 		unsigned long *ver_addr;
2984 		uint8_t *pQbuffer, *ptmpQbuffer;
2985 		int32_t allxfer_len = 0;
2986 
2987 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
2988 		if (ver_addr == NULL) {
2989 			retvalue = ENXIO;
2990 			goto ioctl_out;
2991 		}
2992 
2993 		ptmpQbuffer = (uint8_t *)ver_addr;
2994 		while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
2995 		    (allxfer_len < (MSGDATABUFLEN - 1))) {
2996 			/* copy READ QBUFFER to srb */
2997 			pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
2998 			(void) memcpy(ptmpQbuffer, pQbuffer, 1);
2999 			acb->rqbuf_firstidx++;
3000 			/* if last index number set it to 0 */
3001 			acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
3002 			ptmpQbuffer++;
3003 			allxfer_len++;
3004 		}
3005 
3006 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3007 			struct QBUFFER *prbuffer;
3008 			uint8_t *pQbuffer;
3009 			uint8_t *iop_data;
3010 			int32_t iop_len;
3011 
3012 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3013 			prbuffer = arcmsr_get_iop_rqbuffer(acb);
3014 			iop_data = (uint8_t *)prbuffer->data;
3015 			iop_len = (int32_t)prbuffer->data_len;
3016 			/*
3017 			 * this iop data does no chance to make me overflow
3018 			 * again here, so just do it
3019 			 */
3020 			while (iop_len > 0) {
3021 				pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
3022 				(void) memcpy(pQbuffer, iop_data, 1);
3023 				acb->rqbuf_lastidx++;
3024 				/* if last index number set it to 0 */
3025 				acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
3026 				iop_data++;
3027 				iop_len--;
3028 			}
3029 			/* let IOP know data has been read */
3030 			arcmsr_iop_message_read(acb);
3031 		}
3032 		(void) memcpy(pktioctlfld->messagedatabuffer,
3033 		    (uint8_t *)ver_addr, allxfer_len);
3034 		pktioctlfld->cmdmessage.Length = allxfer_len;
3035 		pktioctlfld->cmdmessage.ReturnCode =
3036 		    ARCMSR_MESSAGE_RETURNCODE_OK;
3037 
3038 		if (ddi_copyout(pktioctlfld, (void *)arg,
3039 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3040 			retvalue = ENXIO;
3041 
3042 		kmem_free(ver_addr, MSGDATABUFLEN);
3043 	}
3044 	break;
3045 	case ARCMSR_MESSAGE_WRITE_WQBUFFER:
3046 	{
3047 		unsigned long *ver_addr;
3048 		int32_t my_empty_len, user_len;
3049 		int32_t wqbuf_firstidx, wqbuf_lastidx;
3050 		uint8_t *pQbuffer, *ptmpuserbuffer;
3051 
3052 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
3053 
3054 		if (ver_addr == NULL) {
3055 			retvalue = ENXIO;
3056 			goto ioctl_out;
3057 		}
3058 
3059 		ptmpuserbuffer = (uint8_t *)ver_addr;
3060 		user_len = pktioctlfld->cmdmessage.Length;
3061 		(void) memcpy(ptmpuserbuffer,
3062 		    pktioctlfld->messagedatabuffer, user_len);
3063 		/*
3064 		 * check ifdata xfer length of this request will overflow
3065 		 * my array qbuffer
3066 		 */
3067 		wqbuf_lastidx = acb->wqbuf_lastidx;
3068 		wqbuf_firstidx = acb->wqbuf_firstidx;
3069 		if (wqbuf_lastidx != wqbuf_firstidx) {
3070 			arcmsr_post_ioctldata2iop(acb);
3071 			pktioctlfld->cmdmessage.ReturnCode =
3072 			    ARCMSR_MESSAGE_RETURNCODE_ERROR;
3073 		} else {
3074 			my_empty_len = (wqbuf_firstidx - wqbuf_lastidx - 1)
3075 			    & (ARCMSR_MAX_QBUFFER - 1);
3076 			if (my_empty_len >= user_len) {
3077 				while (user_len > 0) {
3078 					/* copy srb data to wqbuffer */
3079 					pQbuffer =
3080 					    &acb->wqbuffer[acb->wqbuf_lastidx];
3081 					(void) memcpy(pQbuffer,
3082 					    ptmpuserbuffer, 1);
3083 					acb->wqbuf_lastidx++;
3084 					/* iflast index number set it to 0 */
3085 					acb->wqbuf_lastidx %=
3086 					    ARCMSR_MAX_QBUFFER;
3087 					ptmpuserbuffer++;
3088 					user_len--;
3089 				}
3090 				/* post first Qbuffer */
3091 				if (acb->acb_flags &
3092 				    ACB_F_MESSAGE_WQBUFFER_CLEARED) {
3093 					acb->acb_flags &=
3094 					    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
3095 					arcmsr_post_ioctldata2iop(acb);
3096 				}
3097 				pktioctlfld->cmdmessage.ReturnCode =
3098 				    ARCMSR_MESSAGE_RETURNCODE_OK;
3099 			} else {
3100 				pktioctlfld->cmdmessage.ReturnCode =
3101 				    ARCMSR_MESSAGE_RETURNCODE_ERROR;
3102 			}
3103 		}
3104 		if (ddi_copyout(pktioctlfld, (void *)arg,
3105 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3106 			retvalue = ENXIO;
3107 
3108 		kmem_free(ver_addr, MSGDATABUFLEN);
3109 	}
3110 	break;
3111 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
3112 	{
3113 		uint8_t *pQbuffer = acb->rqbuffer;
3114 
3115 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3116 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3117 				arcmsr_iop_message_read(acb);
3118 		}
3119 		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3120 		acb->rqbuf_firstidx = 0;
3121 		acb->rqbuf_lastidx = 0;
3122 		bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
3123 		/* report success */
3124 		pktioctlfld->cmdmessage.ReturnCode =
3125 		    ARCMSR_MESSAGE_RETURNCODE_OK;
3126 		if (ddi_copyout(pktioctlfld, (void *)arg,
3127 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3128 			retvalue = ENXIO;
3129 
3130 	}
3131 	break;
3132 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
3133 	{
3134 		uint8_t *pQbuffer = acb->wqbuffer;
3135 
3136 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3137 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3138 			arcmsr_iop_message_read(acb);
3139 		}
3140 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3141 		    ACB_F_MESSAGE_WQBUFFER_READ);
3142 		acb->wqbuf_firstidx = 0;
3143 		acb->wqbuf_lastidx = 0;
3144 		bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
3145 		/* report success */
3146 		pktioctlfld->cmdmessage.ReturnCode =
3147 		    ARCMSR_MESSAGE_RETURNCODE_OK;
3148 		if (ddi_copyout(pktioctlfld, (void *)arg,
3149 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3150 			retvalue = ENXIO;
3151 
3152 	}
3153 	break;
3154 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
3155 	{
3156 		uint8_t *pQbuffer;
3157 
3158 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3159 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3160 			arcmsr_iop_message_read(acb);
3161 		}
3162 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3163 		    ACB_F_MESSAGE_RQBUFFER_CLEARED |
3164 		    ACB_F_MESSAGE_WQBUFFER_READ);
3165 		acb->rqbuf_firstidx = 0;
3166 		acb->rqbuf_lastidx = 0;
3167 		acb->wqbuf_firstidx = 0;
3168 		acb->wqbuf_lastidx = 0;
3169 		pQbuffer = acb->rqbuffer;
3170 		bzero(pQbuffer, sizeof (struct QBUFFER));
3171 		pQbuffer = acb->wqbuffer;
3172 		bzero(pQbuffer, sizeof (struct QBUFFER));
3173 		/* report success */
3174 		pktioctlfld->cmdmessage.ReturnCode =
3175 		    ARCMSR_MESSAGE_RETURNCODE_OK;
3176 		if (ddi_copyout(pktioctlfld, (void *)arg,
3177 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3178 			retvalue = ENXIO;
3179 
3180 	}
3181 	break;
3182 	case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
3183 	{
3184 		pktioctlfld->cmdmessage.ReturnCode =
3185 		    ARCMSR_MESSAGE_RETURNCODE_3F;
3186 		if (ddi_copyout(pktioctlfld, (void *)arg,
3187 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3188 			retvalue = ENXIO;
3189 	}
3190 	break;
3191 	/* Not supported: ARCMSR_MESSAGE_SAY_HELLO */
3192 	case ARCMSR_MESSAGE_SAY_GOODBYE:
3193 		arcmsr_iop_parking(acb);
3194 		break;
3195 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
3196 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3197 			arcmsr_flush_hba_cache(acb);
3198 		} else {
3199 			arcmsr_flush_hbb_cache(acb);
3200 		}
3201 		break;
3202 	default:
3203 		retvalue = ENOTTY;
3204 	}
3205 
3206 ioctl_out:
3207 	kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
3208 	mutex_exit(&acb->ioctl_mutex);
3209 
3210 	return (retvalue);
3211 }
3212 
3213 
3214 
3215 static struct CCB *
3216 arcmsr_get_freeccb(struct ACB *acb) {
3217 
3218 	struct CCB *ccb;
3219 	int workingccb_startindex, workingccb_doneindex;
3220 
3221 
3222 	mutex_enter(&acb->workingQ_mutex);
3223 	workingccb_doneindex = acb->workingccb_doneindex;
3224 	workingccb_startindex = acb->workingccb_startindex;
3225 	ccb = acb->ccbworkingQ[workingccb_startindex];
3226 	workingccb_startindex++;
3227 	workingccb_startindex %= ARCMSR_MAX_FREECCB_NUM;
3228 	if (workingccb_doneindex != workingccb_startindex) {
3229 		acb->workingccb_startindex = workingccb_startindex;
3230 	} else {
3231 		ccb = NULL;
3232 	}
3233 
3234 	mutex_exit(&acb->workingQ_mutex);
3235 	return (ccb);
3236 }
3237 
3238 
3239 
3240 static int
3241 arcmsr_seek_cmd2abort(struct ACB *acb,
3242     struct scsi_pkt *abortpkt) {
3243 
3244 	struct CCB *ccb;
3245 	uint32_t intmask_org = 0;
3246 	int i = 0;
3247 
3248 	acb->num_aborts++;
3249 
3250 	if (abortpkt == NULL) {
3251 		/*
3252 		 * if abortpkt is NULL, the upper layer needs us
3253 		 * to abort all commands
3254 		 */
3255 		if (acb->ccboutstandingcount != 0) {
3256 			/* disable all outbound interrupt */
3257 			intmask_org = arcmsr_disable_allintr(acb);
3258 			/* clear and abort all outbound posted Q */
3259 			arcmsr_done4abort_postqueue(acb);
3260 			/* talk to iop 331 outstanding command aborted */
3261 			if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3262 				arcmsr_abort_hba_allcmd(acb);
3263 			} else {
3264 				arcmsr_abort_hbb_allcmd(acb);
3265 			}
3266 
3267 			for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3268 				ccb = acb->pccb_pool[i];
3269 				if (ccb->startdone == ARCMSR_CCB_START) {
3270 					/*
3271 					 * this ccb will complete at
3272 					 * hwinterrupt
3273 					 */
3274 					ccb->startdone = ARCMSR_CCB_ABORTED;
3275 					ccb->pkt->pkt_reason = CMD_ABORTED;
3276 					ccb->pkt->pkt_statistics |=
3277 					    STAT_ABORTED;
3278 					arcmsr_ccb_complete(ccb, 1);
3279 				}
3280 			}
3281 			/*
3282 			 * enable outbound Post Queue, outbound
3283 			 * doorbell Interrupt
3284 			 */
3285 			arcmsr_enable_allintr(acb, intmask_org);
3286 		}
3287 		return (DDI_SUCCESS);
3288 	}
3289 
3290 	/*
3291 	 * It is the upper layer do abort command this lock
3292 	 * just prior to calling us.
3293 	 * First determine if we currently own this command.
3294 	 * Start by searching the device queue. If not found
3295 	 * at all, and the system wanted us to just abort the
3296 	 * command returnsuccess.
3297 	 */
3298 
3299 	if (acb->ccboutstandingcount != 0) {
3300 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3301 			ccb = acb->pccb_pool[i];
3302 			if (ccb->startdone == ARCMSR_CCB_START) {
3303 				if (ccb->pkt == abortpkt) {
3304 					ccb->startdone =
3305 					    ARCMSR_CCB_ABORTED;
3306 					goto abort_outstanding_cmd;
3307 				}
3308 			}
3309 		}
3310 	}
3311 
3312 	return (DDI_FAILURE);
3313 
3314 abort_outstanding_cmd:
3315 	/* disable all outbound interrupts */
3316 	intmask_org = arcmsr_disable_allintr(acb);
3317 	if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3318 		arcmsr_polling_hba_ccbdone(acb, ccb);
3319 	} else {
3320 		arcmsr_polling_hbb_ccbdone(acb, ccb);
3321 	}
3322 
3323 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3324 	arcmsr_enable_allintr(acb, intmask_org);
3325 	return (DDI_SUCCESS);
3326 }
3327 
3328 
3329 
3330 static void
3331 arcmsr_pcidev_disattach(struct ACB *acb) {
3332 
3333 	struct CCB *ccb;
3334 	int i = 0;
3335 
3336 	/* disable all outbound interrupts */
3337 	(void) arcmsr_disable_allintr(acb);
3338 	/* stop adapter background rebuild */
3339 	if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3340 		arcmsr_stop_hba_bgrb(acb);
3341 		arcmsr_flush_hba_cache(acb);
3342 	} else {
3343 		arcmsr_stop_hbb_bgrb(acb);
3344 		arcmsr_flush_hbb_cache(acb);
3345 	}
3346 	/* abort all outstanding commands */
3347 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
3348 	acb->acb_flags &= ~ACB_F_IOP_INITED;
3349 
3350 	if (acb->ccboutstandingcount != 0) {
3351 		/* clear and abort all outbound posted Q */
3352 		arcmsr_done4abort_postqueue(acb);
3353 		/* talk to iop 331 outstanding command aborted */
3354 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3355 			arcmsr_abort_hba_allcmd(acb);
3356 		} else {
3357 			arcmsr_abort_hbb_allcmd(acb);
3358 		}
3359 
3360 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3361 			ccb = acb->pccb_pool[i];
3362 			if (ccb->startdone == ARCMSR_CCB_START) {
3363 				ccb->startdone = ARCMSR_CCB_ABORTED;
3364 				ccb->pkt->pkt_reason = CMD_ABORTED;
3365 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
3366 				arcmsr_ccb_complete(ccb, 1);
3367 			}
3368 		}
3369 	}
3370 }
3371 
3372 /* get firmware miscellaneous data */
3373 static void
3374 arcmsr_get_hba_config(struct ACB *acb) {
3375 
3376 	struct HBA_msgUnit *phbamu;
3377 
3378 	char *acb_firm_model;
3379 	char *acb_firm_version;
3380 	char *acb_device_map;
3381 	char *iop_firm_model;
3382 	char *iop_firm_version;
3383 	char *iop_device_map;
3384 	int count;
3385 
3386 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3387 	acb_firm_model = acb->firm_model;
3388 	acb_firm_version = acb->firm_version;
3389 	acb_device_map = acb->device_map;
3390 	/* firm_model, 15 */
3391 	iop_firm_model = (char *)
3392 	    (&phbamu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
3393 	/* firm_version, 17 */
3394 	iop_firm_version =
3395 	    (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
3396 
3397 	/* device_map, 21 */
3398 	iop_device_map =
3399 	    (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
3400 
3401 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3402 	    ARCMSR_INBOUND_MESG0_GET_CONFIG);
3403 
3404 	if (!arcmsr_hba_wait_msgint_ready(acb))
3405 		cmn_err(CE_CONT,
3406 		    "arcmsr%d: timeout while waiting for adapter firmware "
3407 		    "miscellaneous data",
3408 		    ddi_get_instance(acb->dev_info));
3409 
3410 	count = 8;
3411 	while (count) {
3412 		*acb_firm_model =
3413 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_model);
3414 		acb_firm_model++;
3415 		iop_firm_model++;
3416 		count--;
3417 	}
3418 
3419 	count = 16;
3420 	while (count) {
3421 		*acb_firm_version =
3422 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
3423 		acb_firm_version++;
3424 		iop_firm_version++;
3425 		count--;
3426 	}
3427 
3428 	count = 16;
3429 	while (count) {
3430 		*acb_device_map =
3431 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
3432 		acb_device_map++;
3433 		iop_device_map++;
3434 		count--;
3435 	}
3436 
3437 	cmn_err(CE_CONT, "arcmsr%d: ARECA RAID FIRMWARE VERSION %s",
3438 	    ddi_get_instance(acb->dev_info), acb->firm_version);
3439 
3440 	/* firm_request_len, 1 */
3441 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3442 	    &phbamu->msgcode_rwbuffer[1]);
3443 	/* firm_numbers_queue, 2 */
3444 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3445 	    &phbamu->msgcode_rwbuffer[2]);
3446 	/* firm_sdram_size, 3 */
3447 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3448 	    &phbamu->msgcode_rwbuffer[3]);
3449 	/* firm_ide_channels, 4 */
3450 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3451 	    &phbamu->msgcode_rwbuffer[4]);
3452 }
3453 
3454 /* get firmware miscellaneous data */
3455 static void
3456 arcmsr_get_hbb_config(struct ACB *acb) {
3457 
3458 	struct HBB_msgUnit *phbbmu;
3459 	char *acb_firm_model;
3460 	char *acb_firm_version;
3461 	char *acb_device_map;
3462 	char *iop_firm_model;
3463 	char *iop_firm_version;
3464 	char *iop_device_map;
3465 	int count;
3466 
3467 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3468 	acb_firm_model = acb->firm_model;
3469 	acb_firm_version = acb->firm_version;
3470 	acb_device_map = acb->device_map;
3471 	/* firm_model, 15 */
3472 	iop_firm_model = (char *)
3473 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
3474 	/* firm_version, 17 */
3475 	iop_firm_version = (char *)
3476 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
3477 	/* device_map, 21 */
3478 	iop_device_map = (char *)
3479 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
3480 
3481 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3482 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3483 	    ARCMSR_MESSAGE_GET_CONFIG);
3484 
3485 	if (!arcmsr_hbb_wait_msgint_ready(acb))
3486 		cmn_err(CE_CONT,
3487 		    "arcmsr%d: timeout while waiting for adapter firmware "
3488 		    "miscellaneous data",
3489 		    ddi_get_instance(acb->dev_info));
3490 
3491 	count = 8;
3492 	while (count) {
3493 		*acb_firm_model = CHIP_REG_READ8(acb->reg_mu_acc_handle1,
3494 		    iop_firm_model);
3495 		acb_firm_model++;
3496 		iop_firm_model++;
3497 		count--;
3498 	}
3499 
3500 	count = 16;
3501 	while (count) {
3502 		*acb_firm_version = CHIP_REG_READ8(acb->reg_mu_acc_handle1,
3503 		    iop_firm_version);
3504 		acb_firm_version++;
3505 		iop_firm_version++;
3506 		count--;
3507 	}
3508 	count = 16;
3509 	while (count) {
3510 		*acb_device_map =
3511 		    CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_device_map);
3512 		acb_device_map++;
3513 		iop_device_map++;
3514 		count--;
3515 	}
3516 
3517 	cmn_err(CE_CONT, "arcmsr%d: ARECA RAID FIRMWARE VERSION %s",
3518 	    ddi_get_instance(acb->dev_info), acb->firm_version);
3519 
3520 	/* firm_request_len, 1 */
3521 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3522 		&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1]);
3523 	/* firm_numbers_queue, 2 */
3524 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3525 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2]);
3526 	/* firm_sdram_size, 3 */
3527 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3528 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3]);
3529 	/* firm_ide_channels, 4 */
3530 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3531 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4]);
3532 }
3533 
3534 
3535 
3536 /* start background rebuild */
3537 static void
3538 arcmsr_start_hba_bgrb(struct ACB *acb) {
3539 
3540 	struct HBA_msgUnit *phbamu;
3541 
3542 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3543 
3544 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
3545 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3546 	    &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
3547 
3548 	if (!arcmsr_hba_wait_msgint_ready(acb))
3549 		cmn_err(CE_WARN,
3550 		    "arcmsr%d: timeout while waiting for background "
3551 		    "rebuild to start",
3552 		    ddi_get_instance(acb->dev_info));
3553 }
3554 
3555 
3556 static void
3557 arcmsr_start_hbb_bgrb(struct ACB *acb) {
3558 
3559 	struct HBB_msgUnit *phbbmu;
3560 
3561 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3562 
3563 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
3564 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3565 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3566 	    ARCMSR_MESSAGE_START_BGRB);
3567 
3568 	if (!arcmsr_hbb_wait_msgint_ready(acb))
3569 		cmn_err(CE_WARN,
3570 		    "arcmsr%d: timeout while waiting for background "
3571 		    "rebuild to start",
3572 		    ddi_get_instance(acb->dev_info));
3573 }
3574 
3575 
3576 static void
3577 arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb) {
3578 
3579 	struct HBA_msgUnit *phbamu;
3580 	struct CCB *ccb;
3581 	uint32_t flag_ccb, outbound_intstatus;
3582 	uint32_t poll_ccb_done = 0;
3583 	uint32_t poll_count = 0;
3584 
3585 
3586 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3587 
3588 polling_ccb_retry:
3589 	poll_count++;
3590 	outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3591 	    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
3592 
3593 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
3594 	    outbound_intstatus); /* clear interrupt */
3595 
3596 	/* Use correct offset and size for syncing */
3597 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
3598 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
3599 		return;
3600 
3601 	/*LINTED*/
3602 	while (1) {
3603 		if ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3604 		    &phbamu->outbound_queueport)) == 0xFFFFFFFF) {
3605 			if (poll_ccb_done) {
3606 				/* chip FIFO no ccb for completion already */
3607 				break;
3608 			} else {
3609 				drv_usecwait(25000);
3610 				if ((poll_count > 100) && (poll_ccb != NULL)) {
3611 					break;
3612 				}
3613 				if (acb->ccboutstandingcount == 0) {
3614 					break;
3615 				}
3616 					goto polling_ccb_retry;
3617 			}
3618 		}
3619 
3620 		/* check ifcommand done with no error */
3621 		ccb = (struct CCB *)(acb->vir2phy_offset  +
3622 		    (flag_ccb << 5)); /* frame must be 32 bytes aligned */
3623 		if (poll_ccb != NULL)
3624 			poll_ccb_done = (ccb == poll_ccb) ? 1 : 0;
3625 
3626 		if ((ccb->acb != acb) ||
3627 		    (ccb->startdone != ARCMSR_CCB_START)) {
3628 			if (ccb->startdone == ARCMSR_CCB_ABORTED) {
3629 				ccb->pkt->pkt_reason = CMD_ABORTED;
3630 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
3631 				arcmsr_ccb_complete(ccb, 1);
3632 				continue;
3633 			}
3634 			cmn_err(CE_WARN, "arcmsr%d: polling op got "
3635 			    "unexpected ccb command done",
3636 			    ddi_get_instance(acb->dev_info));
3637 			continue;
3638 		}
3639 		arcmsr_report_ccb_state(acb, ccb, flag_ccb);
3640 	}	/* drain reply FIFO */
3641 }
3642 
3643 
3644 static void
3645 arcmsr_polling_hbb_ccbdone(struct ACB *acb,
3646     struct CCB *poll_ccb) {
3647 
3648 	struct HBB_msgUnit *phbbmu;
3649 	struct CCB *ccb;
3650 	uint32_t flag_ccb;
3651 	uint32_t poll_ccb_done = 0;
3652 	uint32_t poll_count = 0;
3653 	int index;
3654 
3655 
3656 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3657 
3658 
3659 polling_ccb_retry:
3660 	poll_count++;
3661 	/* clear doorbell interrupt */
3662 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3663 	    &phbbmu->hbb_doorbell->iop2drv_doorbell,
3664 	    ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
3665 
3666 	/* Use correct offset and size for syncing */
3667 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
3668 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
3669 		return;
3670 
3671 
3672 	/*LINTED*/
3673 	while (1) {
3674 		index = phbbmu->doneq_index;
3675 		if ((flag_ccb = phbbmu->done_qbuffer[index]) == 0) {
3676 			if (poll_ccb_done) {
3677 				/* chip FIFO no ccb for completion already */
3678 				break;
3679 			} else {
3680 				drv_usecwait(25000);
3681 				if ((poll_count > 100) && (poll_ccb != NULL))
3682 					break;
3683 				if (acb->ccboutstandingcount == 0)
3684 					break;
3685 				goto polling_ccb_retry;
3686 			}
3687 		}
3688 
3689 		phbbmu->done_qbuffer[index] = 0;
3690 		index++;
3691 		/* if last index number set it to 0 */
3692 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
3693 		phbbmu->doneq_index = index;
3694 		/* check if command done with no error */
3695 		/* frame must be 32 bytes aligned */
3696 		ccb = (struct CCB *)(acb->vir2phy_offset +
3697 		    (flag_ccb << 5));
3698 		if (poll_ccb != NULL)
3699 			poll_ccb_done = (ccb == poll_ccb) ? 1 : 0;
3700 		if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3701 			if (ccb->startdone == ARCMSR_CCB_ABORTED) {
3702 				ccb->pkt->pkt_reason = CMD_ABORTED;
3703 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
3704 				arcmsr_ccb_complete(ccb, 1);
3705 				continue;
3706 			}
3707 			cmn_err(CE_WARN, "arcmsr%d: polling op got"
3708 			    "unexpect ccb command done",
3709 			    ddi_get_instance(acb->dev_info));
3710 			continue;
3711 		}
3712 		arcmsr_report_ccb_state(acb, ccb, flag_ccb);
3713 	}	/* drain reply FIFO */
3714 }
3715 
3716 
3717 /*
3718  *    Function: arcmsr_tran_start(9E)
3719  * Description: Transport the command in pktp to the target device.
3720  *		The command is not finished when this returns, only
3721  *		sent to the target; arcmsr_interrupt will call
3722  *		(*pktp->pkt_comp)(pktp) when the target device has done.
3723  *
3724  *       Input: struct scsi_address *ap, struct scsi_pkt *pktp
3725  *      Output:	TRAN_ACCEPT if pkt is OK and not driver not busy
3726  *		TRAN_BUSY if driver is
3727  *		TRAN_BADPKT if pkt is invalid
3728  */
3729 static int
3730 arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) {
3731 
3732 	struct ACB *acb;
3733 	struct CCB *ccb;
3734 	int target = ap->a_target;
3735 	int lun = ap->a_lun;
3736 
3737 
3738 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
3739 	ccb = pkt->pkt_ha_private;
3740 
3741 	if ((ccb->ccb_flags & CCB_FLAG_DMAVALID) &&
3742 	    (ccb->ccb_flags & DDI_DMA_CONSISTENT))
3743 		(void) ddi_dma_sync(ccb->pkt_dma_handle, ccb->pkt_dma_offset,
3744 		    ccb->pkt_dma_len, DDI_DMA_SYNC_FORDEV);
3745 
3746 
3747 	if (ccb->startdone == ARCMSR_CCB_UNBUILD)
3748 		arcmsr_build_ccb(ccb);
3749 
3750 
3751 	if (acb->acb_flags & ACB_F_BUS_RESET) {
3752 		cmn_err(CE_CONT,
3753 		    "arcmsr%d: bus reset returned busy",
3754 		    ddi_get_instance(acb->dev_info));
3755 		pkt->pkt_reason = CMD_RESET;
3756 		pkt->pkt_statistics |= STAT_BUS_RESET;
3757 		pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
3758 		    STATE_SENT_CMD | STATE_GOT_STATUS);
3759 		if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
3760 		    (pkt->pkt_state & STATE_XFERRED_DATA))
3761 			(void) ddi_dma_sync(ccb->pkt_dma_handle,
3762 			    ccb->pkt_dma_offset, ccb->pkt_dma_len,
3763 			    DDI_DMA_SYNC_FORCPU);
3764 
3765 		if (pkt->pkt_comp)
3766 			(*pkt->pkt_comp)(pkt);
3767 
3768 
3769 		return (TRAN_ACCEPT);
3770 	}
3771 
3772 	if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
3773 		uint8_t block_cmd;
3774 
3775 		block_cmd = pkt->pkt_cdbp[0] & 0x0f;
3776 
3777 		if (block_cmd == 0x08 || block_cmd == 0x0a) {
3778 			cmn_err(CE_CONT,
3779 			    "arcmsr%d: block read/write command while raid"
3780 			    "volume missing (cmd %02x for target %d lun %d)",
3781 			    ddi_get_instance(acb->dev_info),
3782 			    block_cmd, target, lun);
3783 			pkt->pkt_reason = CMD_TIMEOUT;
3784 			pkt->pkt_statistics |= CMD_TIMEOUT;
3785 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
3786 			    STATE_SENT_CMD | STATE_GOT_STATUS);
3787 
3788 			if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
3789 			    (pkt->pkt_state & STATE_XFERRED_DATA))
3790 				(void) ddi_dma_sync(ccb->pkt_dma_handle,
3791 				    ccb->pkt_dma_offset, ccb->pkt_dma_len,
3792 				    DDI_DMA_SYNC_FORCPU);
3793 
3794 
3795 			if (pkt->pkt_comp)
3796 				(*pkt->pkt_comp)(pkt);
3797 
3798 
3799 			return (TRAN_ACCEPT);
3800 		}
3801 	}
3802 
3803 
3804 	/* IMPORTANT: Target 16 is a virtual device for iop message transfer */
3805 	if (target == 16) {
3806 
3807 		struct buf *bp = ccb->bp;
3808 		uint8_t scsicmd = pkt->pkt_cdbp[0];
3809 
3810 		switch (scsicmd) {
3811 		case SCMD_INQUIRY: {
3812 			if (lun != 0) {
3813 				ccb->pkt->pkt_reason = CMD_TIMEOUT;
3814 				ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
3815 				arcmsr_ccb_complete(ccb, 0);
3816 				return (TRAN_ACCEPT);
3817 			}
3818 
3819 			if (bp && bp->b_un.b_addr && bp->b_bcount) {
3820 				uint8_t inqdata[36];
3821 
3822 				/* The EVDP and pagecode is not supported */
3823 				if (pkt->pkt_cdbp[1] || pkt->pkt_cdbp[2]) {
3824 					inqdata[1] = 0xFF;
3825 					inqdata[2] = 0x00;
3826 				} else {
3827 					/* Periph Qualifier & Periph Dev Type */
3828 					inqdata[0] = DTYPE_PROCESSOR;
3829 					/* rem media bit & Dev Type Modifier */
3830 					inqdata[1] = 0;
3831 					/* ISO, ECMA, & ANSI versions */
3832 					inqdata[2] = 0;
3833 					/* length of additional data */
3834 					inqdata[4] = 31;
3835 					/* Vendor Identification */
3836 					bcopy("Areca   ",
3837 					    &inqdata[8], VIDLEN);
3838 					/* Product Identification */
3839 					bcopy("RAID controller ",
3840 					    &inqdata[16], PIDLEN);
3841 					/* Product Revision */
3842 					bcopy(&inqdata[32],
3843 					    "R001", REVLEN);
3844 					if (bp->b_flags & (B_PHYS | B_PAGEIO))
3845 						bp_mapin(bp);
3846 
3847 					(void) memcpy(bp->b_un.b_addr,
3848 					    inqdata, sizeof (inqdata));
3849 				}
3850 				ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
3851 			}
3852 			arcmsr_ccb_complete(ccb, 0);
3853 			return (TRAN_ACCEPT);
3854 		}
3855 		case SCMD_WRITE_BUFFER:
3856 		case SCMD_READ_BUFFER: {
3857 			if (arcmsr_iop_message_xfer(acb, pkt)) {
3858 				/* error just for retry */
3859 				ccb->pkt->pkt_reason = CMD_TRAN_ERR;
3860 				ccb->pkt->pkt_statistics |= STAT_TERMINATED;
3861 			}
3862 			ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
3863 			arcmsr_ccb_complete(ccb, 0);
3864 			return (TRAN_ACCEPT);
3865 		}
3866 		default:
3867 			ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
3868 			arcmsr_ccb_complete(ccb, 0);
3869 			return (TRAN_ACCEPT);
3870 		}
3871 	}
3872 
3873 	if (acb->ccboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
3874 		cmn_err(CE_CONT,
3875 		    "arcmsr%d: too many outstanding commands (%d > %d)",
3876 		    ddi_get_instance(acb->dev_info),
3877 		    acb->ccboutstandingcount,
3878 		    ARCMSR_MAX_OUTSTANDING_CMD);
3879 		return (TRAN_BUSY);
3880 	} else if (arcmsr_post_ccb(acb, ccb) == DDI_FAILURE) {
3881 		cmn_err(CE_CONT,
3882 		    "arcmsr%d: post failure, ccboutstandingcount = %d",
3883 		    ddi_get_instance(acb->dev_info),
3884 		    acb->ccboutstandingcount);
3885 		return (TRAN_BUSY);
3886 	}
3887 
3888     return (TRAN_ACCEPT);
3889 }
3890 
3891 /*
3892  * Function: arcmsr_tran_abort(9E)
3893  * 		SCSA interface routine to abort pkt(s) in progress.
3894  * 		Aborts the pkt specified.  If NULL pkt, aborts ALL pkts.
3895  * Output:	Return 1 if success
3896  *		Return 0 if failure
3897  */
3898 static int
3899 arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *abortpkt) {
3900 
3901 	struct ACB *acb;
3902 	int return_code;
3903 
3904 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
3905 
3906 
3907 	cmn_err(CE_WARN,
3908 	    "arcmsr%d: tran_abort called for target %d lun %d",
3909 	    ddi_get_instance(acb->dev_info), ap->a_target, ap->a_lun);
3910 
3911 	while (acb->ccboutstandingcount != 0) {
3912 		drv_usecwait(10000);
3913 	}
3914 
3915 	mutex_enter(&acb->acb_mutex);
3916 	return_code = arcmsr_seek_cmd2abort(acb, abortpkt);
3917 	mutex_exit(&acb->acb_mutex);
3918 
3919 	if (return_code != DDI_SUCCESS) {
3920 		cmn_err(CE_WARN,
3921 		    "arcmsr%d: abort command failed for target %d lun %d",
3922 		    ddi_get_instance(acb->dev_info),
3923 		    ap->a_target, ap->a_lun);
3924 		return (0);
3925 	}
3926 
3927 	return (1);
3928 }
3929 
3930 
3931 /*
3932  * Function: arcmsr_tran_reset(9E)
3933  *           SCSA interface routine to perform scsi resets on either
3934  *           a specified target or the bus (default).
3935  *   Output: Return 1 if success
3936  *	     Return 0 if failure
3937  */
3938 static int
3939 arcmsr_tran_reset(struct scsi_address *ap, int level) {
3940 
3941 	struct ACB *acb;
3942 	int return_code = 1;
3943 	int retry = 0;
3944 
3945 
3946 	/* Are we in the middle of dumping core? */
3947 	if (ddi_in_panic())
3948 		return (return_code);
3949 
3950 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
3951 
3952 	cmn_err(CE_WARN, "arcmsr%d: tran reset (level 0x%x) called "
3953 	    "for target %d lun %d",
3954 	    ddi_get_instance(acb->dev_info), level,
3955 	    ap->a_target, ap->a_lun);
3956 	mutex_enter(&acb->acb_mutex);
3957 
3958 	while ((acb->ccboutstandingcount > 0) && (retry < 400)) {
3959 		(void) arcmsr_interrupt((caddr_t)acb);
3960 		drv_usecwait(25000);
3961 		retry++;
3962 	}
3963 
3964 	switch (level) {
3965 	case RESET_ALL:		/* level 1 */
3966 		acb->num_resets++;
3967 		acb->acb_flags |= ACB_F_BUS_RESET;
3968 		if (acb->timeout_count)
3969 			arcmsr_iop_reset(acb);
3970 		acb->acb_flags &= ~ACB_F_BUS_RESET;
3971 		return_code = 0;
3972 		break;
3973 	case RESET_TARGET:	/* level 0 */
3974 		cmn_err(CE_WARN, "arcmsr%d: target reset not supported",
3975 		    ddi_get_instance(acb->dev_info));
3976 		return_code = 0;
3977 		break;
3978 	default:
3979 		return_code = 0;
3980 	}
3981 
3982 	mutex_exit(&acb->acb_mutex);
3983 	return (return_code);
3984 }
3985 
3986 
3987 static void
3988 arcmsr_log(struct ACB *acb, int level, char *fmt, ...) {
3989 
3990 	char	buf[256];
3991 	va_list ap;
3992 
3993 	va_start(ap, fmt);
3994 	(void) vsprintf(buf, fmt, ap);
3995 	va_end(ap);
3996 	scsi_log(acb ? acb->dev_info : NULL, "arcmsr", level, "%s", buf);
3997 }
3998 
3999 
4000 static void
4001 arcmsr_iop2drv_data_wrote_handle(struct ACB *acb) {
4002 
4003 	struct QBUFFER *prbuffer;
4004 	uint8_t *pQbuffer;
4005 	uint8_t *iop_data;
4006 	int my_empty_len, iop_len;
4007 	int rqbuf_firstidx, rqbuf_lastidx;
4008 
4009 	/* check this iop data if overflow my rqbuffer */
4010 	rqbuf_lastidx = acb->rqbuf_lastidx;
4011 	rqbuf_firstidx = acb->rqbuf_firstidx;
4012 	prbuffer = arcmsr_get_iop_rqbuffer(acb);
4013 	iop_data = (uint8_t *)prbuffer->data;
4014 	iop_len = prbuffer->data_len;
4015 	my_empty_len = (rqbuf_firstidx-rqbuf_lastidx - 1) &
4016 	    (ARCMSR_MAX_QBUFFER - 1);
4017 
4018 	if (my_empty_len >= iop_len) {
4019 		while (iop_len > 0) {
4020 			pQbuffer = &acb->rqbuffer[rqbuf_lastidx];
4021 			(void) memcpy(pQbuffer, iop_data, 1);
4022 			rqbuf_lastidx++;
4023 			/* if last index number set it to 0 */
4024 			rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
4025 			iop_data++;
4026 			iop_len--;
4027 		}
4028 		acb->rqbuf_lastidx = rqbuf_lastidx;
4029 		arcmsr_iop_message_read(acb);
4030 		/* signature, let IOP know data has been read */
4031 	} else {
4032 		acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
4033 	}
4034 }
4035 
4036 
4037 
4038 static void
4039 arcmsr_iop2drv_data_read_handle(struct ACB *acb) {
4040 
4041 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
4042 	/*
4043 	 * check if there are any mail packages from user space program
4044 	 * in my post bag, now is the time to send them into Areca's firmware
4045 	 */
4046 
4047 	if (acb->wqbuf_firstidx != acb->wqbuf_lastidx) {
4048 
4049 		uint8_t *pQbuffer;
4050 		struct QBUFFER *pwbuffer;
4051 		uint8_t *iop_data;
4052 		int allxfer_len = 0;
4053 
4054 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
4055 		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
4056 		iop_data = (uint8_t *)pwbuffer->data;
4057 
4058 		while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
4059 		    (allxfer_len < 124)) {
4060 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
4061 			(void) memcpy(iop_data, pQbuffer, 1);
4062 			acb->wqbuf_firstidx++;
4063 			/* if last index number set it to 0 */
4064 			acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4065 			iop_data++;
4066 			allxfer_len++;
4067 		}
4068 		pwbuffer->data_len = allxfer_len;
4069 		/*
4070 		 * push inbound doorbell, tell iop driver data write ok
4071 		 * await reply on next hwinterrupt for next Qbuffer post
4072 		 */
4073 		arcmsr_iop_message_wrote(acb);
4074 	}
4075 
4076 	if (acb->wqbuf_firstidx == acb->wqbuf_lastidx)
4077 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
4078 }
4079 
4080 
4081 static void
4082 arcmsr_hba_doorbell_isr(struct ACB *acb) {
4083 
4084 	uint32_t outbound_doorbell;
4085 	struct HBA_msgUnit *phbamu;
4086 
4087 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4088 
4089 	/*
4090 	 *  Maybe here we need to check wrqbuffer_lock is locked or not
4091 	 *  DOORBELL: ding! dong!
4092 	 *  check if there are any mail need to pack from firmware
4093 	 */
4094 
4095 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4096 	    &phbamu->outbound_doorbell);
4097 	/* clear doorbell interrupt */
4098 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4099 	    &phbamu->outbound_doorbell, outbound_doorbell);
4100 
4101 	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
4102 		arcmsr_iop2drv_data_wrote_handle(acb);
4103 
4104 
4105 	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
4106 		arcmsr_iop2drv_data_read_handle(acb);
4107 }
4108 
4109 
4110 
4111 static void
4112 arcmsr_hba_postqueue_isr(struct ACB *acb) {
4113 
4114 	uint32_t flag_ccb;
4115 	struct HBA_msgUnit *phbamu;
4116 
4117 
4118 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4119 
4120 	/* areca cdb command done */
4121 	/* Use correct offset and size for syncing */
4122 	(void) ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
4123 	    DDI_DMA_SYNC_FORKERNEL);
4124 
4125 	while ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4126 	    &phbamu->outbound_queueport)) != 0xFFFFFFFF) {
4127 		/* check if command done with no error */
4128 		arcmsr_drain_donequeue(acb, flag_ccb);
4129 	}	/* drain reply FIFO */
4130 }
4131 
4132 static void arcmsr_dr_handle(struct ACB *acb)
4133 {
4134 	char *acb_dev_map = (char *)acb->device_map;
4135 	char *devicemap;
4136 	int target, lun;
4137 	char diff;
4138 	int circ1;
4139 	dev_info_t *dip;
4140 	ddi_acc_handle_t reg;
4141 	switch (acb->adapter_type) {
4142 	case ACB_ADAPTER_TYPE_A:
4143 		{
4144 			struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)
4145 			    acb->pmu;
4146 			devicemap = (char *)&phbamu->msgcode_rwbuffer[21];
4147 			reg = acb->reg_mu_acc_handle0;
4148 		}
4149 		break;
4150 	case ACB_ADAPTER_TYPE_B:
4151 		{
4152 			struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)
4153 			    acb->pmu;
4154 			devicemap = (char *)
4155 			    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[21];
4156 			reg = acb->reg_mu_acc_handle1;
4157 		}
4158 		break;
4159 	}
4160 
4161 	for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
4162 		diff =
4163 		    (*acb_dev_map)^CHIP_REG_READ8(reg, devicemap);
4164 		if (diff != 0) {
4165 			char temp;
4166 			*acb_dev_map =
4167 			    CHIP_REG_READ8(reg, devicemap);
4168 			temp = *acb_dev_map;
4169 			for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
4170 				if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
4171 					ndi_devi_enter(acb->dev_info, &circ1);
4172 					(void) arcmsr_config_lun(acb, target,
4173 					    lun, NULL);
4174 					ndi_devi_exit(acb->dev_info, circ1);
4175 				} else if ((temp & 0x01) == 0 && (diff & 0x01)
4176 				    == 1) {
4177 					dip = arcmsr_find_child(acb, target,
4178 					    lun);
4179 					if (dip != NULL) {
4180 						(void) ndi_devi_offline(dip,
4181 						    NDI_DEVI_REMOVE);
4182 						cmn_err(CE_NOTE, "arcmsr%d: "
4183 						    "T%dL%d offlined",
4184 						    ddi_get_instance
4185 						    (acb->dev_info), target,
4186 						    lun);
4187 					}
4188 				}
4189 				temp >>= 1;
4190 				diff >>= 1;
4191 			}
4192 		}
4193 		devicemap++;
4194 		acb_dev_map++;
4195 	}
4196 }
4197 
4198 static void arcmsr_hba_message_isr(struct ACB *acb)
4199 {
4200 	struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
4201 	uint32_t  *signature = (&phbamu->msgcode_rwbuffer[0]);
4202 	uint32_t outbound_message;
4203 
4204 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
4205 	    ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
4206 
4207 	outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4208 	    signature);
4209 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
4210 		if ((ddi_taskq_dispatch(acb->taskq, (void (*)(void *))
4211 		    arcmsr_dr_handle, acb, DDI_NOSLEEP)) != DDI_SUCCESS)
4212 			cmn_err(CE_WARN, "DR task start failed");
4213 }
4214 
4215 static void arcmsr_hbb_message_isr(struct ACB *acb)
4216 {
4217 	struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
4218 	uint32_t  *signature = (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0]);
4219 	uint32_t outbound_message;
4220 
4221 	/* clear interrupts */
4222 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4223 	    &phbbmu->hbb_doorbell->iop2drv_doorbell,
4224 	    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
4225 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4226 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4227 	    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
4228 
4229 	outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4230 	    signature);
4231 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
4232 		if ((ddi_taskq_dispatch(acb->taskq,
4233 		    (void (*)(void *))arcmsr_dr_handle, acb,
4234 		    DDI_NOSLEEP)) != DDI_SUCCESS) {
4235 			cmn_err(CE_WARN, "DR task start failed");
4236 		}
4237 }
4238 
4239 static void
4240 arcmsr_hbb_postqueue_isr(struct ACB *acb) {
4241 
4242 	int index;
4243 	uint32_t flag_ccb;
4244 	struct HBB_msgUnit *phbbmu;
4245 
4246 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4247 
4248 
4249 	/* areca cdb command done */
4250 	index = phbbmu->doneq_index;
4251 
4252 	while ((flag_ccb = phbbmu->done_qbuffer[index]) != 0) {
4253 		phbbmu->done_qbuffer[index] = 0;
4254 		index++;
4255 		/* if last index number set it to 0 */
4256 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
4257 		phbbmu->doneq_index = index;
4258 		/* check if command done with no error */
4259 		arcmsr_drain_donequeue(acb, flag_ccb);
4260 	}	/* drain reply FIFO */
4261 }
4262 
4263 
4264 
4265 
4266 
4267 static uint_t
4268 arcmsr_handle_hba_isr(struct ACB *acb) {
4269 
4270 	uint32_t outbound_intstatus;
4271 	struct HBA_msgUnit *phbamu;
4272 
4273 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4274 
4275 	outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4276 	    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
4277 
4278 	if (!outbound_intstatus)
4279 		/* it must be a shared irq */
4280 		return (DDI_INTR_UNCLAIMED);
4281 
4282 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
4283 	    outbound_intstatus); /* clear interrupt */
4284 
4285 
4286 	/* MU doorbell interrupts */
4287 
4288 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
4289 		arcmsr_hba_doorbell_isr(acb);
4290 
4291 	/* MU post queue interrupts */
4292 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
4293 		arcmsr_hba_postqueue_isr(acb);
4294 
4295 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
4296 		arcmsr_hba_message_isr(acb);
4297 	}
4298 
4299 	return (DDI_INTR_CLAIMED);
4300 }
4301 
4302 
4303 static uint_t
4304 arcmsr_handle_hbb_isr(struct ACB *acb) {
4305 
4306 	uint32_t outbound_doorbell;
4307 	struct HBB_msgUnit *phbbmu;
4308 
4309 
4310 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4311 
4312 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4313 	    &phbbmu->hbb_doorbell->iop2drv_doorbell) & acb->outbound_int_enable;
4314 
4315 	if (!outbound_doorbell)
4316 		/* it must be a shared irq */
4317 		return (DDI_INTR_UNCLAIMED);
4318 
4319 	/* clear doorbell interrupt */
4320 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4321 	    &phbbmu->hbb_doorbell->iop2drv_doorbell, ~outbound_doorbell);
4322 	/* wait a cycle */
4323 	(void) CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4324 	    &phbbmu->hbb_doorbell->iop2drv_doorbell);
4325 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4326 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4327 	    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
4328 
4329 	/* MU ioctl transfer doorbell interrupts */
4330 	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
4331 		arcmsr_iop2drv_data_wrote_handle(acb);
4332 
4333 	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
4334 		arcmsr_iop2drv_data_read_handle(acb);
4335 
4336 	/* MU post queue interrupts */
4337 	if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
4338 		arcmsr_hbb_postqueue_isr(acb);
4339 
4340 	/* MU message interrupt */
4341 
4342 	if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
4343 		arcmsr_hbb_message_isr(acb);
4344 	}
4345 
4346 	return (DDI_INTR_CLAIMED);
4347 }
4348 
4349 
4350 static uint_t
4351 arcmsr_interrupt(caddr_t arg) {
4352 
4353 
4354 	struct ACB *acb = (struct ACB *)(intptr_t)arg;
4355 
4356 	switch (acb->adapter_type) {
4357 	case ACB_ADAPTER_TYPE_A:
4358 		return (arcmsr_handle_hba_isr(acb));
4359 	case ACB_ADAPTER_TYPE_B:
4360 		return (arcmsr_handle_hbb_isr(acb));
4361 	default:
4362 		cmn_err(CE_WARN, "arcmsr%d: unknown adapter type (%d)",
4363 		    ddi_get_instance(acb->dev_info), acb->adapter_type);
4364 		return (DDI_INTR_UNCLAIMED);
4365 	}
4366 }
4367 
4368 
4369 static void
4370 arcmsr_wait_firmware_ready(struct ACB *acb) {
4371 
4372 	uint32_t firmware_state;
4373 
4374 	firmware_state = 0;
4375 
4376 	switch (acb->adapter_type) {
4377 	case ACB_ADAPTER_TYPE_A:
4378 	{
4379 		struct HBA_msgUnit *phbamu;
4380 
4381 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4382 		do {
4383 			firmware_state =
4384 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4385 			    &phbamu->outbound_msgaddr1);
4386 		} while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)
4387 		    == 0);
4388 	}
4389 	break;
4390 	case ACB_ADAPTER_TYPE_B:
4391 	{
4392 		struct HBB_msgUnit *phbbmu;
4393 
4394 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4395 		do {
4396 			firmware_state =
4397 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4398 				    &phbbmu->hbb_doorbell->iop2drv_doorbell);
4399 		} while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
4400 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4401 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4402 		    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
4403 	}
4404 	break;
4405 	}
4406 }
4407 
4408 static void
4409 arcmsr_clear_doorbell_queue_buffer(struct ACB *acb) {
4410 
4411 	switch (acb->adapter_type) {
4412 	case ACB_ADAPTER_TYPE_A:
4413 	{
4414 		struct HBA_msgUnit *phbamu;
4415 		uint32_t outbound_doorbell;
4416 
4417 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4418 		/* empty doorbell Qbuffer if door bell rung */
4419 		outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4420 		    &phbamu->outbound_doorbell);
4421 		/* clear doorbell interrupt */
4422 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4423 		    &phbamu->outbound_doorbell, outbound_doorbell);
4424 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4425 		    &phbamu->inbound_doorbell,
4426 		    ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
4427 	}
4428 	break;
4429 	case ACB_ADAPTER_TYPE_B:
4430 	{
4431 		struct HBB_msgUnit *phbbmu;
4432 
4433 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4434 
4435 		/* clear interrupt and message state */
4436 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4437 		    &phbbmu->hbb_doorbell->iop2drv_doorbell,
4438 		    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
4439 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4440 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4441 		    ARCMSR_DRV2IOP_DATA_READ_OK);
4442 		/* let IOP know data has been read */
4443 	}
4444 	break;
4445 	}
4446 }
4447 
4448 
4449 static uint32_t
4450 arcmsr_iop_confirm(struct ACB *acb) {
4451 
4452 	unsigned long ccb_phyaddr;
4453 	uint32_t ccb_phyaddr_hi32;
4454 
4455 	/*
4456 	 * here we need to tell iop 331 about our freeccb.HighPart
4457 	 * if freeccb.HighPart is non-zero
4458 	 */
4459 	ccb_phyaddr = (unsigned long)acb->ccb_cookie.dmac_address;
4460 	ccb_phyaddr_hi32 = (uint32_t)((ccb_phyaddr >> 16) >> 16);
4461 
4462 	switch (acb->adapter_type) {
4463 	case ACB_ADAPTER_TYPE_A:
4464 	{
4465 		if (ccb_phyaddr_hi32 != 0) {
4466 			struct HBA_msgUnit *phbamu;
4467 
4468 			phbamu = (struct HBA_msgUnit *)acb->pmu;
4469 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4470 			    &phbamu->msgcode_rwbuffer[0],
4471 			    ARCMSR_SIGNATURE_SET_CONFIG);
4472 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4473 			    &phbamu->msgcode_rwbuffer[1], ccb_phyaddr_hi32);
4474 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4475 			    &phbamu->inbound_msgaddr0,
4476 			    ARCMSR_INBOUND_MESG0_SET_CONFIG);
4477 			if (!arcmsr_hba_wait_msgint_ready(acb)) {
4478 				cmn_err(CE_WARN,
4479 				    "arcmsr%d: timeout setting ccb high "
4480 				    "physical address",
4481 				    ddi_get_instance(acb->dev_info));
4482 				return (FALSE);
4483 			}
4484 		}
4485 	}
4486 	break;
4487 
4488 	/* if adapter is type B, set window of "post command queue" */
4489 
4490 	case ACB_ADAPTER_TYPE_B:
4491 	{
4492 		uint32_t post_queue_phyaddr;
4493 		struct HBB_msgUnit *phbbmu;
4494 
4495 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4496 		phbbmu->postq_index = 0;
4497 		phbbmu->doneq_index = 0;
4498 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4499 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4500 		    ARCMSR_MESSAGE_SET_POST_WINDOW);
4501 
4502 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
4503 			cmn_err(CE_WARN,
4504 			    "arcmsr%d: timeout setting post command "
4505 			    "queue window",
4506 			    ddi_get_instance(acb->dev_info));
4507 			return (FALSE);
4508 		}
4509 
4510 		post_queue_phyaddr = ccb_phyaddr +
4511 		    ARCMSR_MAX_FREECCB_NUM *
4512 		    sizeof (struct CCB)
4513 		    + ARCOFFSET(struct HBB_msgUnit, post_qbuffer);
4514 		/* driver "set config" signature */
4515 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4516 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0],
4517 		    ARCMSR_SIGNATURE_SET_CONFIG);
4518 		/* normal should be zero */
4519 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4520 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1],
4521 		    ccb_phyaddr_hi32);
4522 		/* postQ size (256+8)*4 */
4523 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4524 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2],
4525 		    post_queue_phyaddr);
4526 		/* doneQ size (256+8)*4 */
4527 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4528 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3],
4529 		    post_queue_phyaddr+1056);
4530 		/* ccb maxQ size must be --> [(256+8)*4] */
4531 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4532 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4], 1056);
4533 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4534 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4535 		    ARCMSR_MESSAGE_SET_CONFIG);
4536 
4537 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
4538 			cmn_err(CE_WARN,
4539 			    "arcmsr%d: timeout setting command queue window",
4540 			    ddi_get_instance(acb->dev_info));
4541 			return (FALSE);
4542 		}
4543 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4544 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4545 		    ARCMSR_MESSAGE_START_DRIVER_MODE);
4546 
4547 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
4548 			cmn_err(CE_WARN,
4549 			    "arcmsr%d: timeout in 'start driver mode'",
4550 			    ddi_get_instance(acb->dev_info));
4551 			return (FALSE);
4552 		}
4553 	}
4554 	break;
4555 	}
4556 	return (TRUE);
4557 }
4558 
4559 
4560 /*
4561  * ONLY used for Adapter type B
4562  */
4563 static void
4564 arcmsr_enable_eoi_mode(struct ACB *acb) {
4565 
4566 	struct HBB_msgUnit *phbbmu;
4567 
4568 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4569 
4570 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4571 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4572 	    ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
4573 
4574 	if (!arcmsr_hbb_wait_msgint_ready(acb))
4575 		cmn_err(CE_WARN,
4576 		    "arcmsr%d (Adapter type B): "
4577 		    "'iop enable eoi mode' timeout ",
4578 		    ddi_get_instance(acb->dev_info));
4579 
4580 }
4581 
4582 /* start background rebuild */
4583 static void
4584 arcmsr_iop_init(struct ACB *acb) {
4585 
4586 	uint32_t intmask_org;
4587 
4588 	/* disable all outbound interrupt */
4589 	intmask_org = arcmsr_disable_allintr(acb);
4590 	arcmsr_wait_firmware_ready(acb);
4591 	(void) arcmsr_iop_confirm(acb);
4592 
4593 	/* start background rebuild */
4594 	if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
4595 		arcmsr_get_hba_config(acb);
4596 		arcmsr_start_hba_bgrb(acb);
4597 	} else {
4598 		arcmsr_get_hbb_config(acb);
4599 		arcmsr_start_hbb_bgrb(acb);
4600 	}
4601 
4602 	/* empty doorbell Qbuffer if door bell rang */
4603 	arcmsr_clear_doorbell_queue_buffer(acb);
4604 
4605 	if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
4606 		arcmsr_enable_eoi_mode(acb);
4607 
4608 	/* enable outbound Post Queue, outbound doorbell Interrupt */
4609 	arcmsr_enable_allintr(acb, intmask_org);
4610 	acb->acb_flags |= ACB_F_IOP_INITED;
4611 }
4612 
4613 
4614 static int
4615 arcmsr_initialize(struct ACB *acb) {
4616 
4617 	struct CCB *pccb_tmp;
4618 	size_t allocated_length;
4619 	uint16_t wval;
4620 	uint32_t wlval;
4621 	uint_t intmask_org, count;
4622 	caddr_t	arcmsr_ccbs_area;
4623 	unsigned long ccb_phyaddr;
4624 	int32_t dma_sync_size;
4625 	int i, id, lun;
4626 
4627 	acb->irq = pci_config_get8(acb->pci_acc_handle,
4628 	    ARCMSR_PCI2PCI_PRIMARY_INTERRUPT_LINE_REG);
4629 	wlval = pci_config_get32(acb->pci_acc_handle, 0);
4630 	wval = (uint16_t)((wlval >> 16) & 0xffff);
4631 
4632 	if (wval == PCI_DEVICE_ID_ARECA_1201) {
4633 		uint32_t *iop_mu_regs_map0;
4634 		uint32_t *iop_mu_regs_map1;
4635 		struct CCB *freeccb;
4636 		struct HBB_msgUnit *phbbmu;
4637 
4638 		acb->adapter_type = ACB_ADAPTER_TYPE_B; /* marvell */
4639 		dma_sync_size = (ARCMSR_MAX_FREECCB_NUM*
4640 		    sizeof (struct CCB) + 0x20) +
4641 		    sizeof (struct HBB_msgUnit);
4642 
4643 
4644 		/* Allocate memory for the ccb */
4645 		if ((i = ddi_dma_alloc_handle(acb->dev_info,
4646 		    &arcmsr_ccb_attr, DDI_DMA_SLEEP, NULL,
4647 		    &acb->ccbs_pool_handle)) != DDI_SUCCESS) {
4648 			switch (i) {
4649 			case DDI_DMA_BADATTR:
4650 				cmn_err(CE_WARN,
4651 				    "arcmsr%d: ddi_dma_alloc_handle got "
4652 				    "DDI_DMA_BADATTR",
4653 				    ddi_get_instance(acb->dev_info));
4654 				return (DDI_FAILURE);
4655 
4656 			case DDI_DMA_NORESOURCES:
4657 				cmn_err(CE_WARN, "arcmsr%d: "
4658 				    "ddi_dma_alloc_handle got "
4659 				    "DDI_DMA_NORESOURCES ",
4660 				    ddi_get_instance(acb->dev_info));
4661 				return (DDI_FAILURE);
4662 			}
4663 			cmn_err(CE_WARN,
4664 			    "arcmsr%d: ddi_dma_alloc_handle got DDI_FAILURE",
4665 			    ddi_get_instance(acb->dev_info));
4666 			return (DDI_FAILURE);
4667 		}
4668 
4669 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
4670 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
4671 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
4672 		    &allocated_length, &acb->ccbs_acc_handle)
4673 		    != DDI_SUCCESS) {
4674 			cmn_err(CE_CONT,
4675 			    "arcmsr%d: ddi_dma_mem_alloc failed ",
4676 			    ddi_get_instance(acb->dev_info));
4677 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
4678 			return (DDI_FAILURE);
4679 		}
4680 
4681 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
4682 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size,
4683 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
4684 		    NULL, &acb->ccb_cookie, &count) != DDI_DMA_MAPPED) {
4685 			cmn_err(CE_WARN,
4686 			    "arcmsr%d: ddi_dma_addr_bind_handle failed",
4687 			    ddi_get_instance(acb->dev_info));
4688 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
4689 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
4690 			return (DDI_FAILURE);
4691 		}
4692 		bzero(arcmsr_ccbs_area, dma_sync_size);
4693 		freeccb = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
4694 		acb->pmu = (struct msgUnit *)
4695 		    &freeccb[ARCMSR_MAX_FREECCB_NUM];
4696 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4697 
4698 		/* setup device register */
4699 		if (ddi_regs_map_setup(acb->dev_info, 1,
4700 		    (caddr_t *)&iop_mu_regs_map0, 0,
4701 		    sizeof (struct HBB_DOORBELL), &acb->dev_acc_attr,
4702 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
4703 			arcmsr_log(NULL, CE_WARN,
4704 			    "arcmsr%d: unable to map PCI device "
4705 			    "base0 address registers",
4706 			    ddi_get_instance(acb->dev_info));
4707 			return (DDI_FAILURE);
4708 		}
4709 
4710 		/* ARCMSR_DRV2IOP_DOORBELL */
4711 		phbbmu->hbb_doorbell =
4712 		    (struct HBB_DOORBELL *)iop_mu_regs_map0;
4713 		if (ddi_regs_map_setup(acb->dev_info, 2,
4714 		    (caddr_t *)&iop_mu_regs_map1, 0,
4715 		    sizeof (struct HBB_RWBUFFER), &acb->dev_acc_attr,
4716 		    &acb->reg_mu_acc_handle1) != DDI_SUCCESS) {
4717 			arcmsr_log(NULL, CE_WARN,
4718 			    "arcmsr%d: unable to map PCI device "
4719 			    "base1 address registers",
4720 			    ddi_get_instance(acb->dev_info));
4721 			return (DDI_FAILURE);
4722 		}
4723 
4724 		/* ARCMSR_MSGCODE_RWBUFFER */
4725 		phbbmu->hbb_rwbuffer =
4726 		    (struct HBB_RWBUFFER *)iop_mu_regs_map1;
4727 	} else {
4728 		uint32_t *iop_mu_regs_map0;
4729 
4730 		acb->adapter_type = ACB_ADAPTER_TYPE_A; /* intel */
4731 		dma_sync_size = ARCMSR_MAX_FREECCB_NUM*
4732 		    sizeof (struct CCB) + 0x20;
4733 		if (ddi_regs_map_setup(acb->dev_info, 1,
4734 		    (caddr_t *)&iop_mu_regs_map0, 0,
4735 		    sizeof (struct HBA_msgUnit), &acb->dev_acc_attr,
4736 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
4737 			arcmsr_log(NULL, CE_WARN,
4738 			    "arcmsr%d: unable to map registers",
4739 			    ddi_get_instance(acb->dev_info));
4740 			return (DDI_FAILURE);
4741 		}
4742 
4743 		if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
4744 		    DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
4745 		    DDI_SUCCESS) {
4746 			switch (i) {
4747 			case DDI_DMA_BADATTR:
4748 				cmn_err(CE_WARN,
4749 				    "arcmsr%d: ddi_dma_alloc_handle "
4750 				    "got DDI_DMA_BADATTR",
4751 				    ddi_get_instance(acb->dev_info));
4752 				return (DDI_FAILURE);
4753 			case DDI_DMA_NORESOURCES:
4754 				cmn_err(CE_WARN, "arcmsr%d: "
4755 				    "ddi_dma_alloc_handle got "
4756 				    "DDI_DMA_NORESOURCES",
4757 				    ddi_get_instance(acb->dev_info));
4758 				return (DDI_FAILURE);
4759 			}
4760 			cmn_err(CE_WARN,
4761 			    "arcmsr%d: ddi_dma_alloc_handle failed",
4762 			    ddi_get_instance(acb->dev_info));
4763 			return (DDI_FAILURE);
4764 		}
4765 
4766 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
4767 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
4768 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
4769 		    &allocated_length, &acb->ccbs_acc_handle)
4770 		    != DDI_SUCCESS) {
4771 			cmn_err(CE_WARN, "arcmsr%d: ddi_dma_mem_alloc failed",
4772 			    ddi_get_instance(acb->dev_info));
4773 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
4774 			return (DDI_FAILURE);
4775 		}
4776 
4777 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
4778 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
4779 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
4780 		    &count) != DDI_DMA_MAPPED) {
4781 			cmn_err(CE_WARN, "arcmsr%d: ddi_dma_addr_bind_handle "
4782 			    "failed",
4783 			    ddi_get_instance(acb->dev_info));
4784 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
4785 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
4786 			return (DDI_FAILURE);
4787 		}
4788 		bzero(arcmsr_ccbs_area, dma_sync_size);
4789 		/* ioport base */
4790 		acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
4791 	}
4792 
4793 	/* here we can not access pci configuration again */
4794 	acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4795 	    ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ);
4796 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
4797 	/* physical address of acb->pccb_pool */
4798 	ccb_phyaddr = acb->ccb_cookie.dmac_address;
4799 
4800 	if (((unsigned long)arcmsr_ccbs_area & 0x1F) != 0) {
4801 		/* ccb address must 32 (0x20) boundary */
4802 		arcmsr_ccbs_area = (caddr_t)((unsigned long)arcmsr_ccbs_area +
4803 		    (0x20 - ((unsigned long)arcmsr_ccbs_area & 0x1F)));
4804 		ccb_phyaddr = (unsigned long)ccb_phyaddr +
4805 		    (0x20 - ((unsigned long)ccb_phyaddr & 0x1F));
4806 	}
4807 
4808 	pccb_tmp = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
4809 
4810 	for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
4811 		pccb_tmp->cdb_shifted_phyaddr = ccb_phyaddr >> 5;
4812 		pccb_tmp->acb = acb;
4813 		acb->ccbworkingQ[i] = acb->pccb_pool[i] = pccb_tmp;
4814 		ccb_phyaddr = ccb_phyaddr + sizeof (struct CCB);
4815 		pccb_tmp++;
4816 	}
4817 
4818 	acb->vir2phy_offset = (unsigned long)pccb_tmp -
4819 	    (unsigned long)ccb_phyaddr;
4820 
4821 	/* disable all outbound interrupt */
4822 	intmask_org = arcmsr_disable_allintr(acb);
4823 
4824 	if (!arcmsr_iop_confirm(acb)) {
4825 		cmn_err(CE_WARN, "arcmsr%d: arcmsr_iop_confirm error",
4826 		    ddi_get_instance(acb->dev_info));
4827 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
4828 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
4829 		return (DDI_FAILURE);
4830 	}
4831 
4832 	for (id = 0; id < ARCMSR_MAX_TARGETID; id++) {
4833 		for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
4834 			acb->devstate[id][lun] = ARECA_RAID_GONE;
4835 		}
4836 	}
4837 
4838 	/* enable outbound Post Queue, outbound doorbell Interrupt */
4839 	arcmsr_enable_allintr(acb, intmask_org);
4840 
4841 	return (0);
4842 }
4843 
4844 /*
4845  * Autoconfiguration support
4846  */
4847 static int
4848 arcmsr_parse_devname(char *devnm, int *tgt, int *lun)
4849 {
4850 	char devbuf[SCSI_MAXNAMELEN];
4851 	char *addr;
4852 	char *p,  *tp, *lp;
4853 	long num;
4854 
4855 	/* Parse dev name and address */
4856 	(void) strcpy(devbuf, devnm);
4857 	addr = "";
4858 	for (p = devbuf; *p != '\0'; p++) {
4859 		if (*p == '@') {
4860 			addr = p + 1;
4861 			*p = '\0';
4862 		} else if (*p == ':') {
4863 			*p = '\0';
4864 			break;
4865 		}
4866 	}
4867 
4868 	/* Parse target and lun */
4869 	for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
4870 		if (*p == ',') {
4871 			lp = p + 1;
4872 			*p = '\0';
4873 			break;
4874 		}
4875 	}
4876 	if (tgt && tp) {
4877 		if (ddi_strtol(tp, NULL, 0x10, &num))
4878 			return (-1);
4879 		*tgt = (int)num;
4880 	}
4881 	if (lun && lp) {
4882 		if (ddi_strtol(lp, NULL, 0x10, &num))
4883 			return (-1);
4884 		*lun = (int)num;
4885 	}
4886 	return (0);
4887 }
4888 
4889 static int
4890 arcmsr_name_node(dev_info_t *dip, char *name, int len)
4891 {
4892 	int tgt, lun;
4893 
4894 	tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4895 	    DDI_PROP_DONTPASS, "target", -1);
4896 	if (tgt == -1)
4897 		return (DDI_FAILURE);
4898 	lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4899 	    DDI_PROP_DONTPASS, "lun", -1);
4900 	if (lun == -1)
4901 		return (DDI_FAILURE);
4902 
4903 	(void) snprintf(name, len, "%x,%x", tgt, lun);
4904 	return (DDI_SUCCESS);
4905 }
4906 
4907 static dev_info_t *
4908 arcmsr_find_child(struct ACB *acb, uint16_t tgt, uint8_t lun)
4909 {
4910 	dev_info_t *child = NULL;
4911 	char addr[SCSI_MAXNAMELEN];
4912 	char tmp[MAXNAMELEN];
4913 
4914 	(void) sprintf(addr, "%x,%x", tgt, lun);
4915 	for (child = ddi_get_child(acb->dev_info);
4916 	    child; child = ddi_get_next_sibling(child)) {
4917 		/* We don't care about non-persistent node */
4918 		if (ndi_dev_is_persistent_node(child) == 0)
4919 			continue;
4920 
4921 		if (arcmsr_name_node(child, tmp, MAXNAMELEN) !=
4922 		    DDI_SUCCESS)
4923 			continue;
4924 		if (strcmp(addr, tmp) == 0)
4925 			break;
4926 	}
4927 	return (child);
4928 }
4929 
4930 static int
4931 arcmsr_config_child(struct ACB *acb, struct scsi_device *sd,
4932     dev_info_t **dipp)
4933 {
4934 	char *nodename = NULL;
4935 	char **compatible = NULL;
4936 	int ncompatible = 0;
4937 	dev_info_t *ldip = NULL;
4938 	int tgt = sd->sd_address.a_target;
4939 	int lun = sd->sd_address.a_lun;
4940 	int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
4941 	int rval;
4942 
4943 	scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
4944 	    NULL, &nodename, &compatible, &ncompatible);
4945 	if (nodename == NULL) {
4946 		cmn_err(CE_WARN,
4947 		    "found no comptible driver for T%dL%d", tgt, lun);
4948 		rval = NDI_FAILURE;
4949 		goto finish;
4950 	}
4951 
4952 	/* Create dev node */
4953 	rval = ndi_devi_alloc(acb->dev_info, nodename, DEVI_SID_NODEID,
4954 	    &ldip);
4955 	if (rval == NDI_SUCCESS) {
4956 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt)
4957 		    != DDI_PROP_SUCCESS) {
4958 			cmn_err(CE_WARN, "arcmsr%d: unable to create "
4959 			    "property for T%dL%d (target)",
4960 			    ddi_get_instance(acb->dev_info), tgt, lun);
4961 			rval = NDI_FAILURE;
4962 			goto finish;
4963 		}
4964 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun)
4965 		    != DDI_PROP_SUCCESS) {
4966 			cmn_err(CE_WARN, "arcmsr%d: unable to create "
4967 			    "property for T%dL%d (lun)",
4968 			    ddi_get_instance(acb->dev_info), tgt, lun);
4969 			rval = NDI_FAILURE;
4970 			goto finish;
4971 		}
4972 		if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
4973 		    "compatible", compatible, ncompatible)
4974 		    != DDI_PROP_SUCCESS) {
4975 			cmn_err(CE_WARN, "arcmsr%d: unable to create"
4976 			    "property for T%dL%d (compatible)",
4977 			    ddi_get_instance(acb->dev_info), tgt, lun);
4978 			rval = NDI_FAILURE;
4979 			goto finish;
4980 		}
4981 
4982 		rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
4983 		if (rval != NDI_SUCCESS) {
4984 			cmn_err(CE_WARN, "arcmsr%d: unable to online T%dL%d",
4985 			    ddi_get_instance(acb->dev_info), tgt, lun);
4986 			ndi_prop_remove_all(ldip);
4987 			(void) ndi_devi_free(ldip);
4988 		} else
4989 			cmn_err(CE_NOTE, "arcmsr%d: T%dL%d onlined",
4990 			    ddi_get_instance(acb->dev_info), tgt, lun);
4991 	}
4992 finish:
4993 	if (dipp)
4994 		*dipp = ldip;
4995 
4996 	scsi_hba_nodename_compatible_free(nodename, compatible);
4997 	return (rval);
4998 }
4999 
5000 static int
5001 arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun,
5002     dev_info_t **ldip)
5003 {
5004 	struct scsi_device sd;
5005 	dev_info_t *child;
5006 	int rval;
5007 
5008 	if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
5009 		if (ldip)
5010 			*ldip = child;
5011 		return (NDI_SUCCESS);
5012 	}
5013 
5014 	bzero(&sd, sizeof (struct scsi_device));
5015 	sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
5016 	sd.sd_address.a_target = (uint16_t)tgt;
5017 	sd.sd_address.a_lun = (uint8_t)lun;
5018 	rval = scsi_hba_probe(&sd, NULL);
5019 	if (rval == SCSIPROBE_EXISTS)
5020 		rval = arcmsr_config_child(acb, &sd, ldip);
5021 	scsi_unprobe(&sd);
5022 	return (rval);
5023 }
5024 
5025 static int
5026 arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op,
5027     void *arg, dev_info_t **childp)
5028 {
5029 	struct ACB *acb;
5030 	int circ = 0;
5031 	int rval;
5032 	int tgt, lun;
5033 	if ((acb = ddi_get_soft_state(arcmsr_soft_state,
5034 	    ddi_get_instance(parent))) == NULL)
5035 		return (NDI_FAILURE);
5036 
5037 	ndi_devi_enter(parent, &circ);
5038 	switch (op) {
5039 	case BUS_CONFIG_ONE:
5040 		if (arcmsr_parse_devname(arg, &tgt, &lun) != 0) {
5041 			rval = NDI_FAILURE;
5042 			break;
5043 		}
5044 		mutex_enter(&acb->acb_mutex);
5045 		if (acb->device_map[tgt] & 1 << lun) {
5046 			rval = arcmsr_config_lun(acb, tgt, lun, childp);
5047 		}
5048 		mutex_exit(&acb->acb_mutex);
5049 		break;
5050 
5051 	case BUS_CONFIG_DRIVER:
5052 	case BUS_CONFIG_ALL:
5053 		for (tgt = 0; tgt < ARCMSR_MAX_TARGETID; tgt++)
5054 			for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++)
5055 				if (acb->device_map[tgt] & 1 << lun)
5056 					(void) arcmsr_config_lun(acb, tgt,
5057 					    lun, NULL);
5058 
5059 		rval = NDI_SUCCESS;
5060 		break;
5061 	}
5062 	if (rval == NDI_SUCCESS)
5063 		rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
5064 	ndi_devi_exit(parent, circ);
5065 	return (rval);
5066 }
5067