xref: /illumos-gate/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c (revision 861a91627796c35220e75654dac61e5707536dcd)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  *
29  * nv_sata is a combo SATA HBA driver for ck804/mcp5x (mcp5x = mcp55/mcp51)
30  * based chipsets.
31  *
32  * NCQ
33  * ---
34  *
35  * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
36  * and is likely to be revisited in the future.
37  *
38  *
39  * Power Management
40  * ----------------
41  *
42  * Normally power management would be responsible for ensuring the device
43  * is quiescent and then changing power states to the device, such as
44  * powering down parts or all of the device.  mcp5x/ck804 is unique in
45  * that it is only available as part of a larger southbridge chipset, so
46  * removing power to the device isn't possible.  Switches to control
47  * power management states D0/D3 in the PCI configuration space appear to
48  * be supported but changes to these states are apparently are ignored.
49  * The only further PM that the driver _could_ do is shut down the PHY,
50  * but in order to deliver the first rev of the driver sooner than later,
51  * that will be deferred until some future phase.
52  *
53  * Since the driver currently will not directly change any power state to
54  * the device, no power() entry point will be required.  However, it is
55  * possible that in ACPI power state S3, aka suspend to RAM, that power
56  * can be removed to the device, and the driver cannot rely on BIOS to
57  * have reset any state.  For the time being, there is no known
58  * non-default configurations that need to be programmed.  This judgement
59  * is based on the port of the legacy ata driver not having any such
60  * functionality and based on conversations with the PM team.  If such a
61  * restoration is later deemed necessary it can be incorporated into the
62  * DDI_RESUME processing.
63  *
64  */
65 
66 #include <sys/scsi/scsi.h>
67 #include <sys/pci.h>
68 #include <sys/byteorder.h>
69 #include <sys/sunddi.h>
70 #include <sys/sata/sata_hba.h>
71 #ifdef SGPIO_SUPPORT
72 #include <sys/sata/adapters/nv_sata/nv_sgpio.h>
73 #include <sys/devctl.h>
74 #include <sys/sdt.h>
75 #endif
76 #include <sys/sata/adapters/nv_sata/nv_sata.h>
77 #include <sys/disp.h>
78 #include <sys/note.h>
79 #include <sys/promif.h>
80 
81 
82 /*
83  * Function prototypes for driver entry points
84  */
85 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
86 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
87 static int nv_quiesce(dev_info_t *dip);
88 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
89     void *arg, void **result);
90 
91 /*
92  * Function prototypes for entry points from sata service module
93  * These functions are distinguished from other local functions
94  * by the prefix "nv_sata_"
95  */
96 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
97 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
98 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
99 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
100 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
101 
102 /*
103  * Local function prototypes
104  */
105 static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
106 static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
107 static int nv_add_legacy_intrs(nv_ctl_t *nvc);
108 #ifdef NV_MSI_SUPPORTED
109 static int nv_add_msi_intrs(nv_ctl_t *nvc);
110 #endif
111 static void nv_rem_intrs(nv_ctl_t *nvc);
112 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
113 static int nv_start_nodata(nv_port_t *nvp, int slot);
114 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
115 static int nv_start_pio_in(nv_port_t *nvp, int slot);
116 static int nv_start_pio_out(nv_port_t *nvp, int slot);
117 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
118 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
119 static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
120 static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
121 static int nv_start_dma(nv_port_t *nvp, int slot);
122 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
123 static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
124 static void nv_uninit_ctl(nv_ctl_t *nvc);
125 static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
126 static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
127 static void nv_uninit_port(nv_port_t *nvp);
128 static int nv_init_port(nv_port_t *nvp);
129 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
130 static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
131 #ifdef NCQ
132 static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
133 #endif
134 static void nv_start_dma_engine(nv_port_t *nvp, int slot);
135 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
136     int state);
137 static void nv_common_reg_init(nv_ctl_t *nvc);
138 static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
139 static void nv_reset(nv_port_t *nvp);
140 static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
141 static void nv_timeout(void *);
142 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
143 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
144 static void nv_read_signature(nv_port_t *nvp);
145 static void mcp5x_set_intr(nv_port_t *nvp, int flag);
146 static void ck804_set_intr(nv_port_t *nvp, int flag);
147 static void nv_resume(nv_port_t *nvp);
148 static void nv_suspend(nv_port_t *nvp);
149 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
150 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
151     int flag);
152 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
153     sata_pkt_t *spkt);
154 static void nv_report_add_remove(nv_port_t *nvp, int flags);
155 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
156 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
157     uchar_t failure_onbits2, uchar_t failure_offbits2,
158     uchar_t failure_onbits3, uchar_t failure_offbits3,
159     uint_t timeout_usec, int type_wait);
160 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
161     uint_t timeout_usec, int type_wait);
162 static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
163 static void nv_init_port_link_processing(nv_ctl_t *nvc);
164 static void nv_setup_timeout(nv_port_t *nvp, int time);
165 static void nv_monitor_reset(nv_port_t *nvp);
166 static int nv_bm_status_clear(nv_port_t *nvp);
167 
168 #ifdef SGPIO_SUPPORT
169 static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
170 static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
171 static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
172     cred_t *credp, int *rvalp);
173 
174 static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
175 static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
176     uint32_t *cbpp);
177 static int nv_sgp_init(nv_ctl_t *nvc);
178 static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
179 static int nv_sgp_csr_read(nv_ctl_t *nvc);
180 static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
181 static int nv_sgp_write_data(nv_ctl_t *nvc);
182 static void nv_sgp_activity_led_ctl(void *arg);
183 static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
184 static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
185 static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
186 static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
187 static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
188 static void nv_sgp_cleanup(nv_ctl_t *nvc);
189 #endif
190 
191 
192 /*
193  * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
194  * Verify if needed if ported to other ISA.
195  */
196 static ddi_dma_attr_t buffer_dma_attr = {
197 	DMA_ATTR_V0,		/* dma_attr_version */
198 	0,			/* dma_attr_addr_lo: lowest bus address */
199 	0xffffffffull,		/* dma_attr_addr_hi: */
200 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
201 	4,			/* dma_attr_align */
202 	1,			/* dma_attr_burstsizes. */
203 	1,			/* dma_attr_minxfer */
204 	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
205 	0xffffffffull,		/* dma_attr_seg */
206 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
207 	512,			/* dma_attr_granular */
208 	0,			/* dma_attr_flags */
209 };
210 static ddi_dma_attr_t buffer_dma_40bit_attr = {
211 	DMA_ATTR_V0,		/* dma_attr_version */
212 	0,			/* dma_attr_addr_lo: lowest bus address */
213 	0xffffffffffull,	/* dma_attr_addr_hi: */
214 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
215 	4,			/* dma_attr_align */
216 	1,			/* dma_attr_burstsizes. */
217 	1,			/* dma_attr_minxfer */
218 	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
219 	0xffffffffull,		/* dma_attr_seg */
220 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
221 	512,			/* dma_attr_granular */
222 	0,			/* dma_attr_flags */
223 };
224 
225 
226 /*
227  * DMA attributes for PRD tables
228  */
229 ddi_dma_attr_t nv_prd_dma_attr = {
230 	DMA_ATTR_V0,		/* dma_attr_version */
231 	0,			/* dma_attr_addr_lo */
232 	0xffffffffull,		/* dma_attr_addr_hi */
233 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
234 	4,			/* dma_attr_align */
235 	1,			/* dma_attr_burstsizes */
236 	1,			/* dma_attr_minxfer */
237 	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
238 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
239 	1,			/* dma_attr_sgllen */
240 	1,			/* dma_attr_granular */
241 	0			/* dma_attr_flags */
242 };
243 
244 /*
245  * Device access attributes
246  */
247 static ddi_device_acc_attr_t accattr = {
248     DDI_DEVICE_ATTR_V0,
249     DDI_STRUCTURE_LE_ACC,
250     DDI_STRICTORDER_ACC
251 };
252 
253 
254 #ifdef SGPIO_SUPPORT
255 static struct cb_ops nv_cb_ops = {
256 	nv_open,		/* open */
257 	nv_close,		/* close */
258 	nodev,			/* strategy (block) */
259 	nodev,			/* print (block) */
260 	nodev,			/* dump (block) */
261 	nodev,			/* read */
262 	nodev,			/* write */
263 	nv_ioctl,		/* ioctl */
264 	nodev,			/* devmap */
265 	nodev,			/* mmap */
266 	nodev,			/* segmap */
267 	nochpoll,		/* chpoll */
268 	ddi_prop_op,		/* prop_op */
269 	NULL,			/* streams */
270 	D_NEW | D_MP |
271 	D_64BIT | D_HOTPLUG,	/* flags */
272 	CB_REV			/* rev */
273 };
274 #endif  /* SGPIO_SUPPORT */
275 
276 
277 static struct dev_ops nv_dev_ops = {
278 	DEVO_REV,		/* devo_rev */
279 	0,			/* refcnt  */
280 	nv_getinfo,		/* info */
281 	nulldev,		/* identify */
282 	nulldev,		/* probe */
283 	nv_attach,		/* attach */
284 	nv_detach,		/* detach */
285 	nodev,			/* no reset */
286 #ifdef SGPIO_SUPPORT
287 	&nv_cb_ops,		/* driver operations */
288 #else
289 	(struct cb_ops *)0,	/* driver operations */
290 #endif
291 	NULL,			/* bus operations */
292 	NULL,			/* power */
293 	nv_quiesce		/* quiesce */
294 };
295 
296 
297 /*
298  * Request Sense CDB for ATAPI
299  */
300 static const uint8_t nv_rqsense_cdb[16] = {
301 	SCMD_REQUEST_SENSE,
302 	0,
303 	0,
304 	0,
305 	SATA_ATAPI_MIN_RQSENSE_LEN,
306 	0,
307 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
308 };
309 
310 
311 static sata_tran_hotplug_ops_t nv_hotplug_ops;
312 
313 extern struct mod_ops mod_driverops;
314 
315 static  struct modldrv modldrv = {
316 	&mod_driverops,	/* driverops */
317 	"Nvidia ck804/mcp51/mcp55 HBA",
318 	&nv_dev_ops,	/* driver ops */
319 };
320 
321 static  struct modlinkage modlinkage = {
322 	MODREV_1,
323 	&modldrv,
324 	NULL
325 };
326 
327 
328 /*
329  * Wait for a signature.
330  * If this variable is non-zero, the driver will wait for a device signature
331  * before reporting a device reset to the sata module.
332  * Some (most?) drives will not process commands sent to them before D2H FIS
333  * is sent to a host.
334  */
335 int nv_wait_for_signature = 1;
336 
337 /*
338  * Check for a signature availability.
339  * If this variable is non-zero, the driver will check task file error register
340  * for indication of a signature availability before reading a signature.
341  * Task file error register bit 0 set to 1 indicates that the drive
342  * is ready and it has sent the D2H FIS with a signature.
343  * This behavior of the error register is not reliable in the mcp5x controller.
344  */
345 int nv_check_tfr_error = 0;
346 
347 /*
348  * Max signature acquisition time, in milliseconds.
349  * The driver will try to acquire a device signature within specified time and
350  * quit acquisition operation if signature was not acquired.
351  */
352 long nv_sig_acquisition_time = NV_SIG_ACQUISITION_TIME;
353 
354 /*
355  * If this variable is non-zero, the driver will wait for a signature in the
356  * nv_monitor_reset function without any time limit.
357  * Used for debugging and drive evaluation.
358  */
359 int nv_wait_here_forever = 0;
360 
361 /*
362  * Reset after hotplug.
363  * If this variable is non-zero, driver will reset device after hotplug
364  * (device attached) interrupt.
365  * If the variable is zero, driver will not reset the new device nor will it
366  * try to read device signature.
367  * Chipset is generating a hotplug (device attached) interrupt with a delay, so
368  * the device should have already sent the D2H FIS with the signature.
369  */
370 int nv_reset_after_hotplug = 1;
371 
372 /*
373  * Delay after device hotplug.
374  * It specifies the time between detecting a hotplugged device and sending
375  * a notification to the SATA module.
376  * It is used when device is not reset after hotpugging and acquiring signature
377  * may be unreliable. The delay should be long enough for a device to become
378  * ready to accept commands.
379  */
380 int nv_hotplug_delay = NV_HOTPLUG_DELAY;
381 
382 
383 /*
384  * Maximum number of consecutive interrupts processed in the loop in the
385  * single invocation of the port interrupt routine.
386  */
387 int nv_max_intr_loops = NV_MAX_INTR_PER_DEV;
388 
389 
390 
391 /*
392  * wait between checks of reg status
393  */
394 int nv_usec_delay = NV_WAIT_REG_CHECK;
395 
396 /*
397  * The following is needed for nv_vcmn_err()
398  */
399 static kmutex_t nv_log_mutex; /* protects nv_log_buf */
400 static char nv_log_buf[NV_STRING_512];
401 int nv_debug_flags = NVDBG_ALWAYS;
402 int nv_log_to_console = B_FALSE;
403 
404 int nv_log_delay = 0;
405 int nv_prom_print = B_FALSE;
406 
407 /*
408  * for debugging
409  */
410 #ifdef DEBUG
411 int ncq_commands = 0;
412 int non_ncq_commands = 0;
413 #endif
414 
415 /*
416  * Opaque state pointer to be initialized by ddi_soft_state_init()
417  */
418 static void *nv_statep	= NULL;
419 
420 /*
421  * Map from CBP to shared space
422  *
423  * When a MCP55/IO55 parts supports SGPIO, there is a single CBP (SGPIO
424  * Control Block Pointer as well as the corresponding Control Block) that
425  * is shared across all driver instances associated with that part.  The
426  * Control Block is used to update and query the LED state for the devices
427  * on the controllers associated with those instances.  There is also some
428  * driver state (called the 'common' area here) associated with each SGPIO
429  * Control Block.  The nv_sgp_cpb2cmn is used to map a given CBP to its
430  * control area.
431  *
432  * The driver can also use this mapping array to determine whether the
433  * common area for a given CBP has been initialized, and, if it isn't
434  * initialized, initialize it.
435  *
436  * When a driver instance with a CBP value that is already in the array is
437  * initialized, it will use the pointer to the previously initialized common
438  * area associated with that SGPIO CBP value, rather than initialize it
439  * itself.
440  *
441  * nv_sgp_c2c_mutex is used to synchronize access to this mapping array.
442  */
443 #ifdef SGPIO_SUPPORT
444 static kmutex_t nv_sgp_c2c_mutex;
445 static struct nv_sgp_cbp2cmn nv_sgp_cbp2cmn[NV_MAX_CBPS];
446 #endif
447 
448 /* We still have problems in 40-bit DMA support, so disable it by default */
449 int nv_sata_40bit_dma = B_TRUE;
450 
451 static sata_tran_hotplug_ops_t nv_hotplug_ops = {
452 	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
453 	nv_sata_activate,	/* activate port. cfgadm -c connect */
454 	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
455 };
456 
457 
458 /*
459  *  nv module initialization
460  */
461 int
462 _init(void)
463 {
464 	int	error;
465 #ifdef SGPIO_SUPPORT
466 	int	i;
467 #endif
468 
469 	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
470 
471 	if (error != 0) {
472 
473 		return (error);
474 	}
475 
476 	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
477 #ifdef SGPIO_SUPPORT
478 	mutex_init(&nv_sgp_c2c_mutex, NULL, MUTEX_DRIVER, NULL);
479 
480 	for (i = 0; i < NV_MAX_CBPS; i++) {
481 		nv_sgp_cbp2cmn[i].c2cm_cbp = 0;
482 		nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
483 	}
484 #endif
485 
486 	if ((error = sata_hba_init(&modlinkage)) != 0) {
487 		ddi_soft_state_fini(&nv_statep);
488 		mutex_destroy(&nv_log_mutex);
489 
490 		return (error);
491 	}
492 
493 	error = mod_install(&modlinkage);
494 	if (error != 0) {
495 		sata_hba_fini(&modlinkage);
496 		ddi_soft_state_fini(&nv_statep);
497 		mutex_destroy(&nv_log_mutex);
498 
499 		return (error);
500 	}
501 
502 	return (error);
503 }
504 
505 
506 /*
507  * nv module uninitialize
508  */
509 int
510 _fini(void)
511 {
512 	int	error;
513 
514 	error = mod_remove(&modlinkage);
515 
516 	if (error != 0) {
517 		return (error);
518 	}
519 
520 	/*
521 	 * remove the resources allocated in _init()
522 	 */
523 	mutex_destroy(&nv_log_mutex);
524 #ifdef SGPIO_SUPPORT
525 	mutex_destroy(&nv_sgp_c2c_mutex);
526 #endif
527 	sata_hba_fini(&modlinkage);
528 	ddi_soft_state_fini(&nv_statep);
529 
530 	return (error);
531 }
532 
533 
534 /*
535  * nv _info entry point
536  */
537 int
538 _info(struct modinfo *modinfop)
539 {
540 	return (mod_info(&modlinkage, modinfop));
541 }
542 
543 
544 /*
545  * these wrappers for ddi_{get,put}8 are for observability
546  * with dtrace
547  */
548 #ifdef DEBUG
549 
550 static void
551 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
552 {
553 	ddi_put8(handle, dev_addr, value);
554 }
555 
556 static void
557 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
558 {
559 	ddi_put32(handle, dev_addr, value);
560 }
561 
562 static uint32_t
563 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
564 {
565 	return (ddi_get32(handle, dev_addr));
566 }
567 
568 static void
569 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
570 {
571 	ddi_put16(handle, dev_addr, value);
572 }
573 
574 static uint16_t
575 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
576 {
577 	return (ddi_get16(handle, dev_addr));
578 }
579 
580 static uint8_t
581 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
582 {
583 	return (ddi_get8(handle, dev_addr));
584 }
585 
586 #else
587 
588 #define	nv_put8 ddi_put8
589 #define	nv_put32 ddi_put32
590 #define	nv_get32 ddi_get32
591 #define	nv_put16 ddi_put16
592 #define	nv_get16 ddi_get16
593 #define	nv_get8 ddi_get8
594 
595 #endif
596 
597 
598 /*
599  * Driver attach
600  */
601 static int
602 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
603 {
604 	int status, attach_state, intr_types, bar, i, command;
605 	int inst = ddi_get_instance(dip);
606 	ddi_acc_handle_t pci_conf_handle;
607 	nv_ctl_t *nvc;
608 	uint8_t subclass;
609 	uint32_t reg32;
610 #ifdef SGPIO_SUPPORT
611 	pci_regspec_t *regs;
612 	int rlen;
613 #endif
614 
615 	switch (cmd) {
616 
617 	case DDI_ATTACH:
618 
619 		NVLOG((NVDBG_INIT, NULL, NULL,
620 		    "nv_attach(): DDI_ATTACH inst %d", inst));
621 
622 		attach_state = ATTACH_PROGRESS_NONE;
623 
624 		status = ddi_soft_state_zalloc(nv_statep, inst);
625 
626 		if (status != DDI_SUCCESS) {
627 			break;
628 		}
629 
630 		nvc = ddi_get_soft_state(nv_statep, inst);
631 
632 		nvc->nvc_dip = dip;
633 
634 		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
635 
636 		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
637 			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
638 			    PCI_CONF_REVID);
639 			NVLOG((NVDBG_INIT, NULL, NULL,
640 			    "inst %d: silicon revid is %x nv_debug_flags=%x",
641 			    inst, nvc->nvc_revid, nv_debug_flags));
642 		} else {
643 			break;
644 		}
645 
646 		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
647 
648 		/*
649 		 * Set the PCI command register: enable IO/MEM/Master.
650 		 */
651 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
652 		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
653 		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
654 
655 		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
656 
657 		if (subclass & PCI_MASS_RAID) {
658 			cmn_err(CE_WARN,
659 			    "attach failed: RAID mode not supported");
660 			break;
661 		}
662 
663 		/*
664 		 * the 6 bars of the controller are:
665 		 * 0: port 0 task file
666 		 * 1: port 0 status
667 		 * 2: port 1 task file
668 		 * 3: port 1 status
669 		 * 4: bus master for both ports
670 		 * 5: extended registers for SATA features
671 		 */
672 		for (bar = 0; bar < 6; bar++) {
673 			status = ddi_regs_map_setup(dip, bar + 1,
674 			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
675 			    &nvc->nvc_bar_hdl[bar]);
676 
677 			if (status != DDI_SUCCESS) {
678 				NVLOG((NVDBG_INIT, nvc, NULL,
679 				    "ddi_regs_map_setup failure for bar"
680 				    " %d status = %d", bar, status));
681 				break;
682 			}
683 		}
684 
685 		attach_state |= ATTACH_PROGRESS_BARS;
686 
687 		/*
688 		 * initialize controller structures
689 		 */
690 		status = nv_init_ctl(nvc, pci_conf_handle);
691 
692 		if (status == NV_FAILURE) {
693 			NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed"));
694 
695 			break;
696 		}
697 
698 		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
699 
700 		/*
701 		 * initialize mutexes
702 		 */
703 		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
704 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
705 
706 		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
707 
708 		/*
709 		 * get supported interrupt types
710 		 */
711 		if (ddi_intr_get_supported_types(dip, &intr_types) !=
712 		    DDI_SUCCESS) {
713 			nv_cmn_err(CE_WARN, nvc, NULL,
714 			    "!ddi_intr_get_supported_types failed");
715 			NVLOG((NVDBG_INIT, nvc, NULL,
716 			    "interrupt supported types failed"));
717 
718 			break;
719 		}
720 
721 		NVLOG((NVDBG_INIT, nvc, NULL,
722 		    "ddi_intr_get_supported_types() returned: 0x%x",
723 		    intr_types));
724 
725 #ifdef NV_MSI_SUPPORTED
726 		if (intr_types & DDI_INTR_TYPE_MSI) {
727 			NVLOG((NVDBG_INIT, nvc, NULL,
728 			    "using MSI interrupt type"));
729 
730 			/*
731 			 * Try MSI first, but fall back to legacy if MSI
732 			 * attach fails
733 			 */
734 			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
735 				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
736 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
737 				NVLOG((NVDBG_INIT, nvc, NULL,
738 				    "MSI interrupt setup done"));
739 			} else {
740 				nv_cmn_err(CE_CONT, nvc, NULL,
741 				    "!MSI registration failed "
742 				    "will try Legacy interrupts");
743 			}
744 		}
745 #endif
746 
747 		/*
748 		 * Either the MSI interrupt setup has failed or only
749 		 * the fixed interrupts are available on the system.
750 		 */
751 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
752 		    (intr_types & DDI_INTR_TYPE_FIXED)) {
753 
754 			NVLOG((NVDBG_INIT, nvc, NULL,
755 			    "using Legacy interrupt type"));
756 
757 			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
758 				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
759 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
760 				NVLOG((NVDBG_INIT, nvc, NULL,
761 				    "Legacy interrupt setup done"));
762 			} else {
763 				nv_cmn_err(CE_WARN, nvc, NULL,
764 				    "!legacy interrupt setup failed");
765 				NVLOG((NVDBG_INIT, nvc, NULL,
766 				    "legacy interrupt setup failed"));
767 				break;
768 			}
769 		}
770 
771 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
772 			NVLOG((NVDBG_INIT, nvc, NULL,
773 			    "no interrupts registered"));
774 			break;
775 		}
776 
777 #ifdef SGPIO_SUPPORT
778 		/*
779 		 * save off the controller number
780 		 */
781 		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
782 		    "reg", (caddr_t)&regs, &rlen);
783 		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
784 		kmem_free(regs, rlen);
785 
786 		/*
787 		 * initialize SGPIO
788 		 */
789 		nv_sgp_led_init(nvc, pci_conf_handle);
790 #endif	/* SGPIO_SUPPORT */
791 
792 		/*
793 		 * Initiate link processing and device identification
794 		 */
795 		nv_init_port_link_processing(nvc);
796 		/*
797 		 * attach to sata module
798 		 */
799 		if (sata_hba_attach(nvc->nvc_dip,
800 		    &nvc->nvc_sata_hba_tran,
801 		    DDI_ATTACH) != DDI_SUCCESS) {
802 			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
803 
804 			break;
805 		}
806 
807 		pci_config_teardown(&pci_conf_handle);
808 
809 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS"));
810 
811 		return (DDI_SUCCESS);
812 
813 	case DDI_RESUME:
814 
815 		nvc = ddi_get_soft_state(nv_statep, inst);
816 
817 		NVLOG((NVDBG_INIT, nvc, NULL,
818 		    "nv_attach(): DDI_RESUME inst %d", inst));
819 
820 		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
821 			return (DDI_FAILURE);
822 		}
823 
824 		/*
825 		 * Set the PCI command register: enable IO/MEM/Master.
826 		 */
827 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
828 		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
829 		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
830 
831 		/*
832 		 * Need to set bit 2 to 1 at config offset 0x50
833 		 * to enable access to the bar5 registers.
834 		 */
835 		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
836 
837 		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
838 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
839 			    reg32 | NV_BAR5_SPACE_EN);
840 		}
841 
842 		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
843 
844 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
845 			nv_resume(&(nvc->nvc_port[i]));
846 		}
847 
848 		pci_config_teardown(&pci_conf_handle);
849 
850 		return (DDI_SUCCESS);
851 
852 	default:
853 		return (DDI_FAILURE);
854 	}
855 
856 
857 	/*
858 	 * DDI_ATTACH failure path starts here
859 	 */
860 
861 	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
862 		nv_rem_intrs(nvc);
863 	}
864 
865 	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
866 		/*
867 		 * Remove timers
868 		 */
869 		int port = 0;
870 		nv_port_t *nvp;
871 
872 		for (; port < NV_MAX_PORTS(nvc); port++) {
873 			nvp = &(nvc->nvc_port[port]);
874 			if (nvp->nvp_timeout_id != 0) {
875 				(void) untimeout(nvp->nvp_timeout_id);
876 			}
877 		}
878 	}
879 
880 	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
881 		mutex_destroy(&nvc->nvc_mutex);
882 	}
883 
884 	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
885 		nv_uninit_ctl(nvc);
886 	}
887 
888 	if (attach_state & ATTACH_PROGRESS_BARS) {
889 		while (--bar >= 0) {
890 			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
891 		}
892 	}
893 
894 	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
895 		ddi_soft_state_free(nv_statep, inst);
896 	}
897 
898 	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
899 		pci_config_teardown(&pci_conf_handle);
900 	}
901 
902 	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
903 
904 	return (DDI_FAILURE);
905 }
906 
907 
908 static int
909 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
910 {
911 	int i, port, inst = ddi_get_instance(dip);
912 	nv_ctl_t *nvc;
913 	nv_port_t *nvp;
914 
915 	nvc = ddi_get_soft_state(nv_statep, inst);
916 
917 	switch (cmd) {
918 
919 	case DDI_DETACH:
920 
921 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH"));
922 
923 		/*
924 		 * Remove interrupts
925 		 */
926 		nv_rem_intrs(nvc);
927 
928 		/*
929 		 * Remove timers
930 		 */
931 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
932 			nvp = &(nvc->nvc_port[port]);
933 			if (nvp->nvp_timeout_id != 0) {
934 				(void) untimeout(nvp->nvp_timeout_id);
935 			}
936 		}
937 
938 		/*
939 		 * Remove maps
940 		 */
941 		for (i = 0; i < 6; i++) {
942 			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
943 		}
944 
945 		/*
946 		 * Destroy mutexes
947 		 */
948 		mutex_destroy(&nvc->nvc_mutex);
949 
950 		/*
951 		 * Uninitialize the controller structures
952 		 */
953 		nv_uninit_ctl(nvc);
954 
955 #ifdef SGPIO_SUPPORT
956 		/*
957 		 * release SGPIO resources
958 		 */
959 		nv_sgp_cleanup(nvc);
960 #endif
961 
962 		/*
963 		 * unregister from the sata module
964 		 */
965 		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
966 
967 		/*
968 		 * Free soft state
969 		 */
970 		ddi_soft_state_free(nv_statep, inst);
971 
972 		return (DDI_SUCCESS);
973 
974 	case DDI_SUSPEND:
975 
976 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
977 
978 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
979 			nv_suspend(&(nvc->nvc_port[i]));
980 		}
981 
982 		nvc->nvc_state |= NV_CTRL_SUSPEND;
983 
984 		return (DDI_SUCCESS);
985 
986 	default:
987 		return (DDI_FAILURE);
988 	}
989 }
990 
991 
992 /*ARGSUSED*/
993 static int
994 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
995 {
996 	nv_ctl_t *nvc;
997 	int instance;
998 	dev_t dev;
999 
1000 	dev = (dev_t)arg;
1001 	instance = getminor(dev);
1002 
1003 	switch (infocmd) {
1004 	case DDI_INFO_DEVT2DEVINFO:
1005 		nvc = ddi_get_soft_state(nv_statep,  instance);
1006 		if (nvc != NULL) {
1007 			*result = nvc->nvc_dip;
1008 			return (DDI_SUCCESS);
1009 		} else {
1010 			*result = NULL;
1011 			return (DDI_FAILURE);
1012 		}
1013 	case DDI_INFO_DEVT2INSTANCE:
1014 		*(int *)result = instance;
1015 		break;
1016 	default:
1017 		break;
1018 	}
1019 	return (DDI_SUCCESS);
1020 }
1021 
1022 
1023 #ifdef SGPIO_SUPPORT
1024 /* ARGSUSED */
1025 static int
1026 nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1027 {
1028 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
1029 
1030 	if (nvc == NULL) {
1031 		return (ENXIO);
1032 	}
1033 
1034 	return (0);
1035 }
1036 
1037 
1038 /* ARGSUSED */
1039 static int
1040 nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
1041 {
1042 	return (0);
1043 }
1044 
1045 
1046 /* ARGSUSED */
1047 static int
1048 nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
1049 {
1050 	nv_ctl_t *nvc;
1051 	int inst;
1052 	int status;
1053 	int ctlr, port;
1054 	int drive;
1055 	uint8_t curr_led;
1056 	struct dc_led_ctl led;
1057 
1058 	inst = getminor(dev);
1059 	if (inst == -1) {
1060 		return (EBADF);
1061 	}
1062 
1063 	nvc = ddi_get_soft_state(nv_statep, inst);
1064 	if (nvc == NULL) {
1065 		return (EBADF);
1066 	}
1067 
1068 	if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
1069 		return (EIO);
1070 	}
1071 
1072 	switch (cmd) {
1073 	case DEVCTL_SET_LED:
1074 		status = ddi_copyin((void *)arg, &led,
1075 		    sizeof (struct dc_led_ctl), mode);
1076 		if (status != 0)
1077 			return (EFAULT);
1078 
1079 		/*
1080 		 * Since only the first two controller currently support
1081 		 * SGPIO (as per NVIDIA docs), this code will as well.
1082 		 * Note that this validate the port value within led_state
1083 		 * as well.
1084 		 */
1085 
1086 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1087 		if ((ctlr != 0) && (ctlr != 1))
1088 			return (ENXIO);
1089 
1090 		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
1091 		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
1092 			return (EINVAL);
1093 		}
1094 
1095 		drive = led.led_number;
1096 
1097 		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
1098 		    (led.led_state == DCL_STATE_OFF)) {
1099 
1100 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1101 				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
1102 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1103 				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
1104 			} else {
1105 				return (ENXIO);
1106 			}
1107 
1108 			port = SGP_DRV_TO_PORT(led.led_number);
1109 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1110 		}
1111 
1112 		if (led.led_ctl_active == DCL_CNTRL_ON) {
1113 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1114 				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1115 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1116 				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1117 			} else {
1118 				return (ENXIO);
1119 			}
1120 
1121 			port = SGP_DRV_TO_PORT(led.led_number);
1122 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1123 		}
1124 
1125 		break;
1126 
1127 	case DEVCTL_GET_LED:
1128 		status = ddi_copyin((void *)arg, &led,
1129 		    sizeof (struct dc_led_ctl), mode);
1130 		if (status != 0)
1131 			return (EFAULT);
1132 
1133 		/*
1134 		 * Since only the first two controller currently support
1135 		 * SGPIO (as per NVIDIA docs), this code will as well.
1136 		 * Note that this validate the port value within led_state
1137 		 * as well.
1138 		 */
1139 
1140 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1141 		if ((ctlr != 0) && (ctlr != 1))
1142 			return (ENXIO);
1143 
1144 		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1145 		    led.led_number);
1146 
1147 		port = SGP_DRV_TO_PORT(led.led_number);
1148 		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1149 			led.led_ctl_active = DCL_CNTRL_ON;
1150 
1151 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1152 				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1153 					led.led_state = DCL_STATE_OFF;
1154 				else
1155 					led.led_state = DCL_STATE_ON;
1156 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1157 				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1158 					led.led_state = DCL_STATE_OFF;
1159 				else
1160 					led.led_state = DCL_STATE_ON;
1161 			} else {
1162 				return (ENXIO);
1163 			}
1164 		} else {
1165 			led.led_ctl_active = DCL_CNTRL_OFF;
1166 			/*
1167 			 * Not really off, but never set and no constant for
1168 			 * tri-state
1169 			 */
1170 			led.led_state = DCL_STATE_OFF;
1171 		}
1172 
1173 		status = ddi_copyout(&led, (void *)arg,
1174 		    sizeof (struct dc_led_ctl), mode);
1175 		if (status != 0)
1176 			return (EFAULT);
1177 
1178 		break;
1179 
1180 	case DEVCTL_NUM_LEDS:
1181 		led.led_number = SGPIO_DRV_CNT_VALUE;
1182 		led.led_ctl_active = 1;
1183 		led.led_type = 3;
1184 
1185 		/*
1186 		 * According to documentation, NVIDIA SGPIO is supposed to
1187 		 * support blinking, but it does not seem to work in practice.
1188 		 */
1189 		led.led_state = DCL_STATE_ON;
1190 
1191 		status = ddi_copyout(&led, (void *)arg,
1192 		    sizeof (struct dc_led_ctl), mode);
1193 		if (status != 0)
1194 			return (EFAULT);
1195 
1196 		break;
1197 
1198 	default:
1199 		return (EINVAL);
1200 	}
1201 
1202 	return (0);
1203 }
1204 #endif	/* SGPIO_SUPPORT */
1205 
1206 
1207 /*
1208  * Called by sata module to probe a port.  Port and device state
1209  * are not changed here... only reported back to the sata module.
1210  *
1211  */
1212 static int
1213 nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1214 {
1215 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1216 	uint8_t cport = sd->satadev_addr.cport;
1217 	uint8_t pmport = sd->satadev_addr.pmport;
1218 	uint8_t qual = sd->satadev_addr.qual;
1219 	nv_port_t *nvp;
1220 
1221 	if (cport >= NV_MAX_PORTS(nvc)) {
1222 		sd->satadev_type = SATA_DTYPE_NONE;
1223 		sd->satadev_state = SATA_STATE_UNKNOWN;
1224 
1225 		return (SATA_FAILURE);
1226 	}
1227 
1228 	ASSERT(nvc->nvc_port != NULL);
1229 	nvp = &(nvc->nvc_port[cport]);
1230 	ASSERT(nvp != NULL);
1231 
1232 	NVLOG((NVDBG_RESET, nvc, nvp,
1233 	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1234 	    "qual: 0x%x", cport, pmport, qual));
1235 
1236 	mutex_enter(&nvp->nvp_mutex);
1237 
1238 	/*
1239 	 * This check seems to be done in the SATA module.
1240 	 * It may not be required here
1241 	 */
1242 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1243 		nv_cmn_err(CE_WARN, nvc, nvp,
1244 		    "port inactive.  Use cfgadm to activate");
1245 		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1246 		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1247 		mutex_exit(&nvp->nvp_mutex);
1248 
1249 		return (SATA_SUCCESS);
1250 	}
1251 
1252 	if (nvp->nvp_state & NV_PORT_FAILED) {
1253 		NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
1254 		    "probe: port failed"));
1255 		sd->satadev_type = SATA_DTYPE_NONE;
1256 		sd->satadev_state = SATA_PSTATE_FAILED;
1257 		mutex_exit(&nvp->nvp_mutex);
1258 
1259 		return (SATA_SUCCESS);
1260 	}
1261 
1262 	if (qual == SATA_ADDR_PMPORT) {
1263 		sd->satadev_type = SATA_DTYPE_NONE;
1264 		sd->satadev_state = SATA_STATE_UNKNOWN;
1265 		mutex_exit(&nvp->nvp_mutex);
1266 		nv_cmn_err(CE_WARN, nvc, nvp,
1267 		    "controller does not support port multiplier");
1268 
1269 		return (SATA_SUCCESS);
1270 	}
1271 
1272 	sd->satadev_state = SATA_PSTATE_PWRON;
1273 
1274 	nv_copy_registers(nvp, sd, NULL);
1275 
1276 	if (nvp->nvp_state & (NV_PORT_RESET | NV_PORT_RESET_RETRY)) {
1277 		/*
1278 		 * We are waiting for reset to complete and to fetch
1279 		 * a signature.
1280 		 * Reset will cause the link to go down for a short period of
1281 		 * time.  If reset processing continues for less than
1282 		 * NV_LINK_DOWN_TIMEOUT, fake the status of the link so that
1283 		 * we will not report intermittent link down.
1284 		 * Maybe we should report previous link state?
1285 		 */
1286 		if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) <
1287 		    NV_LINK_DOWN_TIMEOUT) {
1288 			SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1289 			    SSTATUS_IPM_ACTIVE);
1290 			SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1291 			    SSTATUS_DET_DEVPRE_PHYCOM);
1292 			sd->satadev_type = nvp->nvp_type;
1293 			mutex_exit(&nvp->nvp_mutex);
1294 
1295 			return (SATA_SUCCESS);
1296 		}
1297 	}
1298 	/*
1299 	 * Just report the current port state
1300 	 */
1301 	sd->satadev_type = nvp->nvp_type;
1302 	sd->satadev_state = nvp->nvp_state | SATA_PSTATE_PWRON;
1303 	mutex_exit(&nvp->nvp_mutex);
1304 
1305 #ifdef SGPIO_SUPPORT
1306 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
1307 		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1308 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1309 	} else {
1310 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1311 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1312 	}
1313 #endif
1314 
1315 	return (SATA_SUCCESS);
1316 }
1317 
1318 
1319 /*
1320  * Called by sata module to start a new command.
1321  */
1322 static int
1323 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1324 {
1325 	int cport = spkt->satapkt_device.satadev_addr.cport;
1326 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1327 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1328 	int ret;
1329 
1330 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1331 	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg));
1332 
1333 	mutex_enter(&nvp->nvp_mutex);
1334 
1335 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1336 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1337 		NVLOG((NVDBG_ERRS, nvc, nvp,
1338 		    "nv_sata_start: port not yet initialized"));
1339 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1340 		mutex_exit(&nvp->nvp_mutex);
1341 
1342 		return (SATA_TRAN_PORT_ERROR);
1343 	}
1344 
1345 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1346 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1347 		NVLOG((NVDBG_ERRS, nvc, nvp,
1348 		    "nv_sata_start: NV_PORT_INACTIVE"));
1349 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1350 		mutex_exit(&nvp->nvp_mutex);
1351 
1352 		return (SATA_TRAN_PORT_ERROR);
1353 	}
1354 
1355 	if (nvp->nvp_state & NV_PORT_FAILED) {
1356 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1357 		NVLOG((NVDBG_ERRS, nvc, nvp,
1358 		    "nv_sata_start: NV_PORT_FAILED state"));
1359 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1360 		mutex_exit(&nvp->nvp_mutex);
1361 
1362 		return (SATA_TRAN_PORT_ERROR);
1363 	}
1364 
1365 	if (nvp->nvp_state & NV_PORT_RESET) {
1366 		NVLOG((NVDBG_VERBOSE, nvc, nvp,
1367 		    "still waiting for reset completion"));
1368 		spkt->satapkt_reason = SATA_PKT_BUSY;
1369 		mutex_exit(&nvp->nvp_mutex);
1370 
1371 		/*
1372 		 * If in panic, timeouts do not occur, so fake one
1373 		 * so that the signature can be acquired to complete
1374 		 * the reset handling.
1375 		 */
1376 		if (ddi_in_panic()) {
1377 			nv_timeout(nvp);
1378 		}
1379 
1380 		return (SATA_TRAN_BUSY);
1381 	}
1382 
1383 	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1384 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1385 		NVLOG((NVDBG_ERRS, nvc, nvp,
1386 		    "nv_sata_start: SATA_DTYPE_NONE"));
1387 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1388 		mutex_exit(&nvp->nvp_mutex);
1389 
1390 		return (SATA_TRAN_PORT_ERROR);
1391 	}
1392 
1393 	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1394 		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1395 		nv_cmn_err(CE_WARN, nvc, nvp,
1396 		    "port multipliers not supported by controller");
1397 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1398 		mutex_exit(&nvp->nvp_mutex);
1399 
1400 		return (SATA_TRAN_CMD_UNSUPPORTED);
1401 	}
1402 
1403 	/*
1404 	 * after a device reset, and then when sata module restore processing
1405 	 * is complete, the sata module will set sata_clear_dev_reset which
1406 	 * indicates that restore processing has completed and normal
1407 	 * non-restore related commands should be processed.
1408 	 */
1409 	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1410 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1411 		NVLOG((NVDBG_RESET, nvc, nvp,
1412 		    "nv_sata_start: clearing NV_PORT_RESTORE"));
1413 	}
1414 
1415 	/*
1416 	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1417 	 * only allow commands which restore device state.  The sata module
1418 	 * marks such commands with with sata_ignore_dev_reset.
1419 	 *
1420 	 * during coredump, nv_reset is called and but then the restore
1421 	 * doesn't happen.  For now, workaround by ignoring the wait for
1422 	 * restore if the system is panicing.
1423 	 */
1424 	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1425 	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1426 	    (ddi_in_panic() == 0)) {
1427 		spkt->satapkt_reason = SATA_PKT_BUSY;
1428 		NVLOG((NVDBG_VERBOSE, nvc, nvp,
1429 		    "nv_sata_start: waiting for restore "));
1430 		mutex_exit(&nvp->nvp_mutex);
1431 
1432 		return (SATA_TRAN_BUSY);
1433 	}
1434 
1435 	if (nvp->nvp_state & NV_PORT_ABORTING) {
1436 		spkt->satapkt_reason = SATA_PKT_BUSY;
1437 		NVLOG((NVDBG_ERRS, nvc, nvp,
1438 		    "nv_sata_start: NV_PORT_ABORTING"));
1439 		mutex_exit(&nvp->nvp_mutex);
1440 
1441 		return (SATA_TRAN_BUSY);
1442 	}
1443 
1444 	/* Clear SError to be able to check errors after the command failure */
1445 	nv_put32(nvp->nvp_ctlp->nvc_bar_hdl[5], nvp->nvp_serror, 0xffffffff);
1446 
1447 	if (spkt->satapkt_op_mode &
1448 	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1449 
1450 		ret = nv_start_sync(nvp, spkt);
1451 
1452 		mutex_exit(&nvp->nvp_mutex);
1453 
1454 		return (ret);
1455 	}
1456 
1457 	/*
1458 	 * start command asynchronous command
1459 	 */
1460 	ret = nv_start_async(nvp, spkt);
1461 
1462 	mutex_exit(&nvp->nvp_mutex);
1463 
1464 	return (ret);
1465 }
1466 
1467 
1468 /*
1469  * SATA_OPMODE_POLLING implies the driver is in a
1470  * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1471  * If only SATA_OPMODE_SYNCH is set, the driver can use
1472  * interrupts and sleep wait on a cv.
1473  *
1474  * If SATA_OPMODE_POLLING is set, the driver can't use
1475  * interrupts and must busy wait and simulate the
1476  * interrupts by waiting for BSY to be cleared.
1477  *
1478  * Synchronous mode has to return BUSY if there are
1479  * any other commands already on the drive.
1480  */
1481 static int
1482 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1483 {
1484 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1485 	int ret;
1486 
1487 	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry"));
1488 
1489 	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1490 		spkt->satapkt_reason = SATA_PKT_BUSY;
1491 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1492 		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1493 		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1494 		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1495 		    (&(nvp->nvp_slot[0]))->nvslot_spkt));
1496 
1497 		return (SATA_TRAN_BUSY);
1498 	}
1499 
1500 	/*
1501 	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1502 	 */
1503 	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1504 	    servicing_interrupt()) {
1505 		spkt->satapkt_reason = SATA_PKT_BUSY;
1506 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1507 		    "SYNC mode not allowed during interrupt"));
1508 
1509 		return (SATA_TRAN_BUSY);
1510 
1511 	}
1512 
1513 	/*
1514 	 * disable interrupt generation if in polled mode
1515 	 */
1516 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1517 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1518 	}
1519 
1520 	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1521 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1522 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1523 		}
1524 
1525 		return (ret);
1526 	}
1527 
1528 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1529 		mutex_exit(&nvp->nvp_mutex);
1530 		ret = nv_poll_wait(nvp, spkt);
1531 		mutex_enter(&nvp->nvp_mutex);
1532 
1533 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1534 
1535 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1536 		    " done % reason %d", ret));
1537 
1538 		return (ret);
1539 	}
1540 
1541 	/*
1542 	 * non-polling synchronous mode handling.  The interrupt will signal
1543 	 * when the IO is completed.
1544 	 */
1545 	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1546 
1547 	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1548 
1549 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1550 	}
1551 
1552 	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1553 	    " done % reason %d", spkt->satapkt_reason));
1554 
1555 	return (SATA_TRAN_ACCEPTED);
1556 }
1557 
1558 
1559 static int
1560 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1561 {
1562 	int ret;
1563 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1564 #if ! defined(__lock_lint)
1565 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1566 #endif
1567 
1568 	NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter"));
1569 
1570 	for (;;) {
1571 
1572 		NV_DELAY_NSEC(400);
1573 
1574 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait"));
1575 		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1576 		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1577 			mutex_enter(&nvp->nvp_mutex);
1578 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1579 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1580 			nvp->nvp_state |= NV_PORT_RESET;
1581 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
1582 			    NV_PORT_RESET_RETRY);
1583 			nv_reset(nvp);
1584 			nv_complete_io(nvp, spkt, 0);
1585 			mutex_exit(&nvp->nvp_mutex);
1586 			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1587 			    "SATA_STATUS_BSY"));
1588 
1589 			return (SATA_TRAN_ACCEPTED);
1590 		}
1591 
1592 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr"));
1593 
1594 		/*
1595 		 * Simulate interrupt.
1596 		 */
1597 		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1598 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr"));
1599 
1600 		if (ret != DDI_INTR_CLAIMED) {
1601 			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1602 			    " unclaimed -- resetting"));
1603 			mutex_enter(&nvp->nvp_mutex);
1604 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1605 			nvp->nvp_state |= NV_PORT_RESET;
1606 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
1607 			    NV_PORT_RESET_RETRY);
1608 			nv_reset(nvp);
1609 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1610 			nv_complete_io(nvp, spkt, 0);
1611 			mutex_exit(&nvp->nvp_mutex);
1612 
1613 			return (SATA_TRAN_ACCEPTED);
1614 		}
1615 
1616 #if ! defined(__lock_lint)
1617 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1618 			/*
1619 			 * packet is complete
1620 			 */
1621 			return (SATA_TRAN_ACCEPTED);
1622 		}
1623 #endif
1624 	}
1625 	/*NOTREACHED*/
1626 }
1627 
1628 
1629 /*
1630  * Called by sata module to abort outstanding packets.
1631  */
1632 /*ARGSUSED*/
1633 static int
1634 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1635 {
1636 	int cport = spkt->satapkt_device.satadev_addr.cport;
1637 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1638 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1639 	int c_a, ret;
1640 
1641 	ASSERT(cport < NV_MAX_PORTS(nvc));
1642 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt));
1643 
1644 	mutex_enter(&nvp->nvp_mutex);
1645 
1646 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1647 		mutex_exit(&nvp->nvp_mutex);
1648 		nv_cmn_err(CE_WARN, nvc, nvp,
1649 		    "abort request failed: port inactive");
1650 
1651 		return (SATA_FAILURE);
1652 	}
1653 
1654 	/*
1655 	 * spkt == NULL then abort all commands
1656 	 */
1657 	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED, B_TRUE);
1658 
1659 	if (c_a) {
1660 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1661 		    "packets aborted running=%d", c_a));
1662 		ret = SATA_SUCCESS;
1663 	} else {
1664 		if (spkt == NULL) {
1665 			NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort"));
1666 		} else {
1667 			NVLOG((NVDBG_ENTRY, nvc, nvp,
1668 			    "can't find spkt to abort"));
1669 		}
1670 		ret = SATA_FAILURE;
1671 	}
1672 
1673 	mutex_exit(&nvp->nvp_mutex);
1674 
1675 	return (ret);
1676 }
1677 
1678 
1679 /*
1680  * if spkt == NULL abort all pkts running, otherwise
1681  * abort the requested packet.  must be called with nv_mutex
1682  * held and returns with it held.  Not NCQ aware.
1683  */
1684 static int
1685 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason, int flag)
1686 {
1687 	int aborted = 0, i, reset_once = B_FALSE;
1688 	struct nv_slot *nv_slotp;
1689 	sata_pkt_t *spkt_slot;
1690 
1691 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1692 
1693 	/*
1694 	 * return if the port is not configured
1695 	 */
1696 	if (nvp->nvp_slot == NULL) {
1697 		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1698 		    "nv_abort_active: not configured so returning"));
1699 
1700 		return (0);
1701 	}
1702 
1703 	NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp, "nv_abort_active"));
1704 
1705 	nvp->nvp_state |= NV_PORT_ABORTING;
1706 
1707 	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1708 
1709 		nv_slotp = &(nvp->nvp_slot[i]);
1710 		spkt_slot = nv_slotp->nvslot_spkt;
1711 
1712 		/*
1713 		 * skip if not active command in slot
1714 		 */
1715 		if (spkt_slot == NULL) {
1716 			continue;
1717 		}
1718 
1719 		/*
1720 		 * if a specific packet was requested, skip if
1721 		 * this is not a match
1722 		 */
1723 		if ((spkt != NULL) && (spkt != spkt_slot)) {
1724 			continue;
1725 		}
1726 
1727 		/*
1728 		 * stop the hardware.  This could need reworking
1729 		 * when NCQ is enabled in the driver.
1730 		 */
1731 		if (reset_once == B_FALSE) {
1732 			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1733 
1734 			/*
1735 			 * stop DMA engine
1736 			 */
1737 			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1738 
1739 			/*
1740 			 * Reset only if explicitly specified by the arg flag
1741 			 */
1742 			if (flag == B_TRUE) {
1743 				reset_once = B_TRUE;
1744 				nvp->nvp_state |= NV_PORT_RESET;
1745 				nvp->nvp_state &= ~(NV_PORT_RESTORE |
1746 				    NV_PORT_RESET_RETRY);
1747 				nv_reset(nvp);
1748 			}
1749 		}
1750 
1751 		spkt_slot->satapkt_reason = abort_reason;
1752 		nv_complete_io(nvp, spkt_slot, i);
1753 		aborted++;
1754 	}
1755 
1756 	nvp->nvp_state &= ~NV_PORT_ABORTING;
1757 
1758 	return (aborted);
1759 }
1760 
1761 
1762 /*
1763  * Called by sata module to reset a port, device, or the controller.
1764  */
1765 static int
1766 nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1767 {
1768 	int cport = sd->satadev_addr.cport;
1769 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1770 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1771 	int ret = SATA_SUCCESS;
1772 
1773 	ASSERT(cport < NV_MAX_PORTS(nvc));
1774 
1775 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset"));
1776 
1777 	mutex_enter(&nvp->nvp_mutex);
1778 
1779 	switch (sd->satadev_addr.qual) {
1780 
1781 	case SATA_ADDR_CPORT:
1782 		/*FALLTHROUGH*/
1783 	case SATA_ADDR_DCPORT:
1784 		nvp->nvp_state |= NV_PORT_RESET;
1785 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1786 		nv_reset(nvp);
1787 		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET, B_FALSE);
1788 
1789 		break;
1790 	case SATA_ADDR_CNTRL:
1791 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1792 		    "nv_sata_reset: constroller reset not supported"));
1793 
1794 		break;
1795 	case SATA_ADDR_PMPORT:
1796 	case SATA_ADDR_DPMPORT:
1797 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1798 		    "nv_sata_reset: port multipliers not supported"));
1799 		/*FALLTHROUGH*/
1800 	default:
1801 		/*
1802 		 * unsupported case
1803 		 */
1804 		ret = SATA_FAILURE;
1805 		break;
1806 	}
1807 
1808 	if (ret == SATA_SUCCESS) {
1809 		/*
1810 		 * If the port is inactive, do a quiet reset and don't attempt
1811 		 * to wait for reset completion or do any post reset processing
1812 		 */
1813 		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1814 			nvp->nvp_state &= ~NV_PORT_RESET;
1815 			nvp->nvp_reset_time = 0;
1816 		}
1817 
1818 		/*
1819 		 * clear the port failed flag
1820 		 */
1821 		nvp->nvp_state &= ~NV_PORT_FAILED;
1822 	}
1823 
1824 	mutex_exit(&nvp->nvp_mutex);
1825 
1826 	return (ret);
1827 }
1828 
1829 
1830 /*
1831  * Sata entry point to handle port activation.  cfgadm -c connect
1832  */
1833 static int
1834 nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1835 {
1836 	int cport = sd->satadev_addr.cport;
1837 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1838 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1839 
1840 	ASSERT(cport < NV_MAX_PORTS(nvc));
1841 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate"));
1842 
1843 	mutex_enter(&nvp->nvp_mutex);
1844 
1845 	sd->satadev_state = SATA_STATE_READY;
1846 
1847 	nv_copy_registers(nvp, sd, NULL);
1848 
1849 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1850 
1851 	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1852 	/* Initiate link probing and device signature acquisition */
1853 	nvp->nvp_type = SATA_DTYPE_NONE;
1854 	nvp->nvp_signature = 0;
1855 	nvp->nvp_state |= NV_PORT_RESET; /* | NV_PORT_PROBE; */
1856 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
1857 	nv_reset(nvp);
1858 
1859 	mutex_exit(&nvp->nvp_mutex);
1860 
1861 	return (SATA_SUCCESS);
1862 }
1863 
1864 
1865 /*
1866  * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1867  */
1868 static int
1869 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1870 {
1871 	int cport = sd->satadev_addr.cport;
1872 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1873 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1874 
1875 	ASSERT(cport < NV_MAX_PORTS(nvc));
1876 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate"));
1877 
1878 	mutex_enter(&nvp->nvp_mutex);
1879 
1880 	(void) nv_abort_active(nvp, NULL, SATA_PKT_ABORTED, B_FALSE);
1881 
1882 	/*
1883 	 * make the device inaccessible
1884 	 */
1885 	nvp->nvp_state |= NV_PORT_INACTIVE;
1886 
1887 	/*
1888 	 * disable the interrupts on port
1889 	 */
1890 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1891 
1892 	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1893 	nv_copy_registers(nvp, sd, NULL);
1894 
1895 	mutex_exit(&nvp->nvp_mutex);
1896 
1897 	return (SATA_SUCCESS);
1898 }
1899 
1900 
1901 /*
1902  * find an empty slot in the driver's queue, increment counters,
1903  * and then invoke the appropriate PIO or DMA start routine.
1904  */
1905 static int
1906 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1907 {
1908 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1909 	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1910 	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1911 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1912 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1913 	nv_slot_t *nv_slotp;
1914 	boolean_t dma_cmd;
1915 
1916 	NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1917 	    sata_cmdp->satacmd_cmd_reg));
1918 
1919 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1920 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1921 		nvp->nvp_ncq_run++;
1922 		/*
1923 		 * search for an empty NCQ slot.  by the time, it's already
1924 		 * been determined by the caller that there is room on the
1925 		 * queue.
1926 		 */
1927 		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1928 		    on_bit <<= 1) {
1929 			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1930 				break;
1931 			}
1932 		}
1933 
1934 		/*
1935 		 * the first empty slot found, should not exceed the queue
1936 		 * depth of the drive.  if it does it's an error.
1937 		 */
1938 		ASSERT(slot != nvp->nvp_queue_depth);
1939 
1940 		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1941 		    nvp->nvp_sactive);
1942 		ASSERT((sactive & on_bit) == 0);
1943 		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1944 		NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X",
1945 		    on_bit));
1946 		nvp->nvp_sactive_cache |= on_bit;
1947 
1948 		ncq = NVSLOT_NCQ;
1949 
1950 	} else {
1951 		nvp->nvp_non_ncq_run++;
1952 		slot = 0;
1953 	}
1954 
1955 	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1956 
1957 	ASSERT(nv_slotp->nvslot_spkt == NULL);
1958 
1959 	nv_slotp->nvslot_spkt = spkt;
1960 	nv_slotp->nvslot_flags = ncq;
1961 
1962 	/*
1963 	 * the sata module doesn't indicate which commands utilize the
1964 	 * DMA engine, so find out using this switch table.
1965 	 */
1966 	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1967 	case SATAC_READ_DMA_EXT:
1968 	case SATAC_WRITE_DMA_EXT:
1969 	case SATAC_WRITE_DMA:
1970 	case SATAC_READ_DMA:
1971 	case SATAC_READ_DMA_QUEUED:
1972 	case SATAC_READ_DMA_QUEUED_EXT:
1973 	case SATAC_WRITE_DMA_QUEUED:
1974 	case SATAC_WRITE_DMA_QUEUED_EXT:
1975 	case SATAC_READ_FPDMA_QUEUED:
1976 	case SATAC_WRITE_FPDMA_QUEUED:
1977 		dma_cmd = B_TRUE;
1978 		break;
1979 	default:
1980 		dma_cmd = B_FALSE;
1981 	}
1982 
1983 	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1984 		NVLOG((NVDBG_DELIVER, nvc,  nvp, "DMA command"));
1985 		nv_slotp->nvslot_start = nv_start_dma;
1986 		nv_slotp->nvslot_intr = nv_intr_dma;
1987 	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
1988 		NVLOG((NVDBG_DELIVER, nvc,  nvp, "packet command"));
1989 		nv_slotp->nvslot_start = nv_start_pkt_pio;
1990 		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
1991 		if ((direction == SATA_DIR_READ) ||
1992 		    (direction == SATA_DIR_WRITE)) {
1993 			nv_slotp->nvslot_byte_count =
1994 			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
1995 			nv_slotp->nvslot_v_addr =
1996 			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
1997 			/*
1998 			 * Freeing DMA resources allocated by the framework
1999 			 * now to avoid buffer overwrite (dma sync) problems
2000 			 * when the buffer is released at command completion.
2001 			 * Primarily an issue on systems with more than
2002 			 * 4GB of memory.
2003 			 */
2004 			sata_free_dma_resources(spkt);
2005 		}
2006 	} else if (direction == SATA_DIR_NODATA_XFER) {
2007 		NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command"));
2008 		nv_slotp->nvslot_start = nv_start_nodata;
2009 		nv_slotp->nvslot_intr = nv_intr_nodata;
2010 	} else if (direction == SATA_DIR_READ) {
2011 		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command"));
2012 		nv_slotp->nvslot_start = nv_start_pio_in;
2013 		nv_slotp->nvslot_intr = nv_intr_pio_in;
2014 		nv_slotp->nvslot_byte_count =
2015 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2016 		nv_slotp->nvslot_v_addr =
2017 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2018 		/*
2019 		 * Freeing DMA resources allocated by the framework now to
2020 		 * avoid buffer overwrite (dma sync) problems when the buffer
2021 		 * is released at command completion.  This is not an issue
2022 		 * for write because write does not update the buffer.
2023 		 * Primarily an issue on systems with more than 4GB of memory.
2024 		 */
2025 		sata_free_dma_resources(spkt);
2026 	} else if (direction == SATA_DIR_WRITE) {
2027 		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command"));
2028 		nv_slotp->nvslot_start = nv_start_pio_out;
2029 		nv_slotp->nvslot_intr = nv_intr_pio_out;
2030 		nv_slotp->nvslot_byte_count =
2031 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2032 		nv_slotp->nvslot_v_addr =
2033 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2034 	} else {
2035 		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2036 		    " %d cookies %d cmd %x",
2037 		    sata_cmdp->satacmd_flags.sata_data_direction,
2038 		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2039 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2040 		ret = SATA_TRAN_CMD_UNSUPPORTED;
2041 
2042 		goto fail;
2043 	}
2044 
2045 	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2046 	    SATA_TRAN_ACCEPTED) {
2047 #ifdef SGPIO_SUPPORT
2048 		nv_sgp_drive_active(nvp->nvp_ctlp,
2049 		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2050 #endif
2051 		nv_slotp->nvslot_stime = ddi_get_lbolt();
2052 
2053 		/*
2054 		 * start timer if it's not already running and this packet
2055 		 * is not requesting polled mode.
2056 		 */
2057 		if ((nvp->nvp_timeout_id == 0) &&
2058 		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2059 			nv_setup_timeout(nvp, NV_ONE_SEC);
2060 		}
2061 
2062 		nvp->nvp_previous_cmd = nvp->nvp_last_cmd;
2063 		nvp->nvp_last_cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2064 
2065 		return (SATA_TRAN_ACCEPTED);
2066 	}
2067 
2068 	fail:
2069 
2070 	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2071 
2072 	if (ncq == NVSLOT_NCQ) {
2073 		nvp->nvp_ncq_run--;
2074 		nvp->nvp_sactive_cache &= ~on_bit;
2075 	} else {
2076 		nvp->nvp_non_ncq_run--;
2077 	}
2078 	nv_slotp->nvslot_spkt = NULL;
2079 	nv_slotp->nvslot_flags = 0;
2080 
2081 	return (ret);
2082 }
2083 
2084 
2085 /*
2086  * Check if the signature is ready and if non-zero translate
2087  * it into a solaris sata defined type.
2088  */
2089 static void
2090 nv_read_signature(nv_port_t *nvp)
2091 {
2092 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2093 
2094 	/*
2095 	 * Task file error register bit 0 set to 1 indicate that drive
2096 	 * is ready and have sent D2H FIS with a signature.
2097 	 */
2098 	if (nv_check_tfr_error != 0) {
2099 		uint8_t tfr_error = nv_get8(cmdhdl, nvp->nvp_error);
2100 		if (!(tfr_error & SATA_ERROR_ILI)) {
2101 			NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
2102 			    "nv_read_signature: signature not ready"));
2103 			return;
2104 		}
2105 	}
2106 
2107 	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2108 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2109 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2110 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2111 
2112 	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
2113 	    "nv_read_signature: 0x%x ", nvp->nvp_signature));
2114 
2115 	switch (nvp->nvp_signature) {
2116 
2117 	case NV_SIG_DISK:
2118 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk"));
2119 		nvp->nvp_type = SATA_DTYPE_ATADISK;
2120 		break;
2121 	case NV_SIG_ATAPI:
2122 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2123 		    "drive is an optical device"));
2124 		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2125 		break;
2126 	case NV_SIG_PM:
2127 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2128 		    "device is a port multiplier"));
2129 		nvp->nvp_type = SATA_DTYPE_PMULT;
2130 		break;
2131 	case NV_SIG_NOTREADY:
2132 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2133 		    "signature not ready"));
2134 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2135 		break;
2136 	default:
2137 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
2138 		    " recognized", nvp->nvp_signature);
2139 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2140 		break;
2141 	}
2142 
2143 	if (nvp->nvp_signature) {
2144 		nvp->nvp_state &= ~(NV_PORT_RESET_RETRY | NV_PORT_RESET);
2145 	}
2146 
2147 #ifdef SGPIO_SUPPORT
2148 	if (nvp->nvp_signature == NV_SIG_DISK) {
2149 		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2150 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2151 	} else {
2152 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2153 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2154 	}
2155 #endif
2156 }
2157 
2158 
2159 /*
2160  * Set up a new timeout or complete a timeout.
2161  * Timeout value has to be specified in microseconds. If time is zero, no new
2162  * timeout is scheduled.
2163  * Must be called at the end of the timeout routine.
2164  */
2165 static void
2166 nv_setup_timeout(nv_port_t *nvp, int time)
2167 {
2168 	clock_t old_duration = nvp->nvp_timeout_duration;
2169 
2170 	ASSERT(time != 0);
2171 
2172 	if (nvp->nvp_timeout_id != 0 && nvp->nvp_timeout_duration == 0) {
2173 		/*
2174 		 * Since we are dropping the mutex for untimeout,
2175 		 * the timeout may be executed while we are trying to
2176 		 * untimeout and setting up a new timeout.
2177 		 * If nvp_timeout_duration is 0, then this function
2178 		 * was re-entered. Just exit.
2179 		 */
2180 	cmn_err(CE_WARN, "nv_setup_timeout re-entered");
2181 		return;
2182 	}
2183 	nvp->nvp_timeout_duration = 0;
2184 	if (nvp->nvp_timeout_id == 0) {
2185 		/* Start new timer */
2186 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2187 		    drv_usectohz(time));
2188 	} else {
2189 		/*
2190 		 * If the currently running timeout is due later than the
2191 		 * requested one, restart it with a new expiration.
2192 		 * Our timeouts do not need to be accurate - we would be just
2193 		 * checking that the specified time was exceeded.
2194 		 */
2195 		if (old_duration > time) {
2196 			mutex_exit(&nvp->nvp_mutex);
2197 			(void) untimeout(nvp->nvp_timeout_id);
2198 			mutex_enter(&nvp->nvp_mutex);
2199 			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2200 			    drv_usectohz(time));
2201 		}
2202 	}
2203 	nvp->nvp_timeout_duration = time;
2204 }
2205 
2206 
2207 
2208 int nv_reset_length = NV_RESET_LENGTH;
2209 
2210 /*
2211  * Reset the port
2212  *
2213  * Entered with nvp mutex held
2214  */
2215 static void
2216 nv_reset(nv_port_t *nvp)
2217 {
2218 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2219 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2220 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2221 	uint32_t sctrl, serr, sstatus;
2222 	uint8_t bmicx;
2223 	int i, j, reset = 0;
2224 
2225 	ASSERT(mutex_owned(&nvp->nvp_mutex));
2226 
2227 	NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset()"));
2228 	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2229 	NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset: serr 0x%x", serr));
2230 
2231 	/*
2232 	 * stop DMA engine.
2233 	 */
2234 	bmicx = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmicx);
2235 	nv_put8(nvp->nvp_bm_hdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2236 
2237 	nvp->nvp_state |= NV_PORT_RESET;
2238 	nvp->nvp_reset_time = ddi_get_lbolt();
2239 
2240 	/*
2241 	 * Issue hardware reset; retry if necessary.
2242 	 */
2243 	for (i = 0; i < NV_RESET_ATTEMPTS; i++) {
2244 		/*
2245 		 * Clear signature registers
2246 		 */
2247 		nv_put8(cmdhdl, nvp->nvp_sect, 0);
2248 		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2249 		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2250 		nv_put8(cmdhdl, nvp->nvp_count, 0);
2251 
2252 		/* Clear task file error register */
2253 		nv_put8(nvp->nvp_cmd_hdl, nvp->nvp_error, 0);
2254 
2255 		/*
2256 		 * assert reset in PHY by writing a 1 to bit 0 scontrol
2257 		 */
2258 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2259 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2260 		    sctrl | SCONTROL_DET_COMRESET);
2261 
2262 		/* Wait at least 1ms, as required by the spec */
2263 		drv_usecwait(nv_reset_length);
2264 
2265 		/* Reset all accumulated error bits */
2266 		nv_put32(bar5_hdl, nvp->nvp_serror, 0xffffffff);
2267 
2268 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2269 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2270 		NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset: applied (%d); "
2271 		    "sctrl 0x%x, sstatus 0x%x", i, sctrl, sstatus));
2272 
2273 		/* de-assert reset in PHY */
2274 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2275 		    sctrl & ~SCONTROL_DET_COMRESET);
2276 
2277 		/*
2278 		 * Wait up to 10ms for COMINIT to arrive, indicating that
2279 		 * the device recognized COMRESET.
2280 		 */
2281 		for (j = 0; j < 10; j++) {
2282 			drv_usecwait(NV_ONE_MSEC);
2283 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2284 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2285 			    (SSTATUS_GET_DET(sstatus) ==
2286 			    SSTATUS_DET_DEVPRE_PHYCOM)) {
2287 				reset = 1;
2288 				break;
2289 			}
2290 		}
2291 		if (reset == 1)
2292 			break;
2293 	}
2294 	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2295 	if (reset == 0) {
2296 		NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset not succeeded "
2297 		    "(serr 0x%x) after %d attempts", serr, i));
2298 	} else {
2299 		NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset succeeded (serr 0x%x)"
2300 		    "after %dms", serr, TICK_TO_MSEC(ddi_get_lbolt() -
2301 		    nvp->nvp_reset_time)));
2302 	}
2303 	nvp->nvp_reset_time = ddi_get_lbolt();
2304 
2305 	if (servicing_interrupt()) {
2306 		nv_setup_timeout(nvp, NV_ONE_MSEC);
2307 	} else if (!(nvp->nvp_state & NV_PORT_RESET_RETRY)) {
2308 		nv_monitor_reset(nvp);
2309 	}
2310 }
2311 
2312 
2313 /*
2314  * Initialize register handling specific to mcp51/mcp55
2315  */
2316 /* ARGSUSED */
2317 static void
2318 mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2319 {
2320 	nv_port_t *nvp;
2321 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2322 	uint8_t off, port;
2323 
2324 	nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2325 	nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2326 
2327 	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2328 		nvp = &(nvc->nvc_port[port]);
2329 		nvp->nvp_mcp5x_int_status =
2330 		    (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2331 		nvp->nvp_mcp5x_int_ctl =
2332 		    (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2333 
2334 		/*
2335 		 * clear any previous interrupts asserted
2336 		 */
2337 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2338 		    MCP5X_INT_CLEAR);
2339 
2340 		/*
2341 		 * These are the interrupts to accept for now.  The spec
2342 		 * says these are enable bits, but nvidia has indicated
2343 		 * these are masking bits.  Even though they may be masked
2344 		 * out to prevent asserting the main interrupt, they can
2345 		 * still be asserted while reading the interrupt status
2346 		 * register, so that needs to be considered in the interrupt
2347 		 * handler.
2348 		 */
2349 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2350 		    ~(MCP5X_INT_IGNORE));
2351 	}
2352 
2353 	/*
2354 	 * Allow the driver to program the BM on the first command instead
2355 	 * of waiting for an interrupt.
2356 	 */
2357 #ifdef NCQ
2358 	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2359 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2360 	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2361 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2362 #endif
2363 
2364 	/*
2365 	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2366 	 * Enable DMA to take advantage of that.
2367 	 *
2368 	 */
2369 	if (nvc->nvc_revid >= 0xa3) {
2370 		if (nv_sata_40bit_dma == B_TRUE) {
2371 			uint32_t reg32;
2372 			NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2373 			    "rev id is %X.  40-bit DMA addressing"
2374 			    " enabled", nvc->nvc_revid));
2375 			nvc->dma_40bit = B_TRUE;
2376 
2377 			reg32 = pci_config_get32(pci_conf_handle,
2378 			    NV_SATA_CFG_20);
2379 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2380 			    reg32 | NV_40BIT_PRD);
2381 
2382 			/*
2383 			 * CFG_23 bits 0-7 contain the top 8 bits (of 40
2384 			 * bits) for the primary PRD table, and bits 8-15
2385 			 * contain the top 8 bits for the secondary.  Set
2386 			 * to zero because the DMA attribute table for PRD
2387 			 * allocation forces it into 32 bit address space
2388 			 * anyway.
2389 			 */
2390 			reg32 = pci_config_get32(pci_conf_handle,
2391 			    NV_SATA_CFG_23);
2392 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_23,
2393 			    reg32 & 0xffff0000);
2394 		} else {
2395 			NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2396 			    "40-bit DMA disabled by nv_sata_40bit_dma"));
2397 		}
2398 	} else {
2399 		nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "rev id is %X and is "
2400 		    "not capable of 40-bit DMA addressing", nvc->nvc_revid);
2401 	}
2402 }
2403 
2404 
2405 /*
2406  * Initialize register handling specific to ck804
2407  */
2408 static void
2409 ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2410 {
2411 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2412 	uint32_t reg32;
2413 	uint16_t reg16;
2414 	nv_port_t *nvp;
2415 	int j;
2416 
2417 	/*
2418 	 * delay hotplug interrupts until PHYRDY.
2419 	 */
2420 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2421 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2422 	    reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2423 
2424 	/*
2425 	 * enable hot plug interrupts for channel x and y
2426 	 */
2427 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2428 	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2429 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2430 	    NV_HIRQ_EN | reg16);
2431 
2432 
2433 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2434 	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2435 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2436 	    NV_HIRQ_EN | reg16);
2437 
2438 	nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2439 
2440 	/*
2441 	 * clear any existing interrupt pending then enable
2442 	 */
2443 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2444 		nvp = &(nvc->nvc_port[j]);
2445 		mutex_enter(&nvp->nvp_mutex);
2446 		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2447 		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2448 		mutex_exit(&nvp->nvp_mutex);
2449 	}
2450 }
2451 
2452 
2453 /*
2454  * Initialize the controller and set up driver data structures.
2455  * determine if ck804 or mcp5x class.
2456  */
2457 static int
2458 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2459 {
2460 	struct sata_hba_tran stran;
2461 	nv_port_t *nvp;
2462 	int j, ck804;
2463 	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2464 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2465 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2466 	uint32_t reg32;
2467 	uint8_t reg8, reg8_save;
2468 
2469 	NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered"));
2470 
2471 	ck804 = B_TRUE;
2472 #ifdef SGPIO_SUPPORT
2473 	nvc->nvc_mcp5x_flag = B_FALSE;
2474 #endif
2475 
2476 	/*
2477 	 * Need to set bit 2 to 1 at config offset 0x50
2478 	 * to enable access to the bar5 registers.
2479 	 */
2480 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2481 	if (!(reg32 & NV_BAR5_SPACE_EN)) {
2482 		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2483 		    reg32 | NV_BAR5_SPACE_EN);
2484 	}
2485 
2486 	/*
2487 	 * Determine if this is ck804 or mcp5x.  ck804 will map in the
2488 	 * task file registers into bar5 while mcp5x won't.  The offset of
2489 	 * the task file registers in mcp5x's space is unused, so it will
2490 	 * return zero.  So check one of the task file registers to see if it is
2491 	 * writable and reads back what was written.  If it's mcp5x it will
2492 	 * return back 0xff whereas ck804 will return the value written.
2493 	 */
2494 	reg8_save = nv_get8(bar5_hdl,
2495 	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2496 
2497 
2498 	for (j = 1; j < 3; j++) {
2499 
2500 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2501 		reg8 = nv_get8(bar5_hdl,
2502 		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2503 
2504 		if (reg8 != j) {
2505 			ck804 = B_FALSE;
2506 			nvc->nvc_mcp5x_flag = B_TRUE;
2507 			break;
2508 		}
2509 	}
2510 
2511 	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2512 
2513 	if (ck804 == B_TRUE) {
2514 		NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804"));
2515 		nvc->nvc_interrupt = ck804_intr;
2516 		nvc->nvc_reg_init = ck804_reg_init;
2517 		nvc->nvc_set_intr = ck804_set_intr;
2518 	} else {
2519 		NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55"));
2520 		nvc->nvc_interrupt = mcp5x_intr;
2521 		nvc->nvc_reg_init = mcp5x_reg_init;
2522 		nvc->nvc_set_intr = mcp5x_set_intr;
2523 	}
2524 
2525 
2526 	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV;
2527 	stran.sata_tran_hba_dip = nvc->nvc_dip;
2528 	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2529 	stran.sata_tran_hba_features_support =
2530 	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2531 	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2532 	stran.sata_tran_probe_port = nv_sata_probe;
2533 	stran.sata_tran_start = nv_sata_start;
2534 	stran.sata_tran_abort = nv_sata_abort;
2535 	stran.sata_tran_reset_dport = nv_sata_reset;
2536 	stran.sata_tran_selftest = NULL;
2537 	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2538 	stran.sata_tran_pwrmgt_ops = NULL;
2539 	stran.sata_tran_ioctl = NULL;
2540 	nvc->nvc_sata_hba_tran = stran;
2541 
2542 	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2543 	    KM_SLEEP);
2544 
2545 	/*
2546 	 * initialize registers common to all chipsets
2547 	 */
2548 	nv_common_reg_init(nvc);
2549 
2550 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2551 		nvp = &(nvc->nvc_port[j]);
2552 
2553 		cmd_addr = nvp->nvp_cmd_addr;
2554 		ctl_addr = nvp->nvp_ctl_addr;
2555 		bm_addr = nvp->nvp_bm_addr;
2556 
2557 		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2558 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2559 
2560 		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2561 
2562 		nvp->nvp_data	= cmd_addr + NV_DATA;
2563 		nvp->nvp_error	= cmd_addr + NV_ERROR;
2564 		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2565 		nvp->nvp_count	= cmd_addr + NV_COUNT;
2566 		nvp->nvp_sect	= cmd_addr + NV_SECT;
2567 		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2568 		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2569 		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2570 		nvp->nvp_status	= cmd_addr + NV_STATUS;
2571 		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2572 		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2573 		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2574 
2575 		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2576 		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2577 		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2578 
2579 		nvp->nvp_state = 0;
2580 
2581 		/*
2582 		 * Initialize dma handles, etc.
2583 		 * If it fails, the port is in inactive state.
2584 		 */
2585 		(void) nv_init_port(nvp);
2586 	}
2587 
2588 	/*
2589 	 * initialize register by calling chip specific reg initialization
2590 	 */
2591 	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2592 
2593 	/* initialize the hba dma attribute */
2594 	if (nvc->dma_40bit == B_TRUE)
2595 		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2596 		    &buffer_dma_40bit_attr;
2597 	else
2598 		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2599 		    &buffer_dma_attr;
2600 
2601 	return (NV_SUCCESS);
2602 }
2603 
2604 
2605 /*
2606  * Initialize data structures with enough slots to handle queuing, if
2607  * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2608  * NCQ support is built into the driver and enabled.  It might have been
2609  * better to derive the true size from the drive itself, but the sata
2610  * module only sends down that information on the first NCQ command,
2611  * which means possibly re-sizing the structures on an interrupt stack,
2612  * making error handling more messy.  The easy way is to just allocate
2613  * all 32 slots, which is what most drives support anyway.
2614  */
2615 static int
2616 nv_init_port(nv_port_t *nvp)
2617 {
2618 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2619 	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2620 	dev_info_t *dip = nvc->nvc_dip;
2621 	ddi_device_acc_attr_t dev_attr;
2622 	size_t buf_size;
2623 	ddi_dma_cookie_t cookie;
2624 	uint_t count;
2625 	int rc, i;
2626 
2627 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2628 	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2629 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2630 
2631 	if (nvp->nvp_state & NV_PORT_INIT) {
2632 		NVLOG((NVDBG_INIT, nvc, nvp,
2633 		    "nv_init_port previously initialized"));
2634 
2635 		return (NV_SUCCESS);
2636 	} else {
2637 		NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing"));
2638 	}
2639 
2640 	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2641 	    NV_QUEUE_SLOTS, KM_SLEEP);
2642 
2643 	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2644 	    NV_QUEUE_SLOTS, KM_SLEEP);
2645 
2646 	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2647 	    NV_QUEUE_SLOTS, KM_SLEEP);
2648 
2649 	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2650 	    NV_QUEUE_SLOTS, KM_SLEEP);
2651 
2652 	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2653 	    KM_SLEEP);
2654 
2655 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2656 
2657 		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2658 		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2659 
2660 		if (rc != DDI_SUCCESS) {
2661 			nv_uninit_port(nvp);
2662 
2663 			return (NV_FAILURE);
2664 		}
2665 
2666 		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2667 		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2668 		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2669 		    &(nvp->nvp_sg_acc_hdl[i]));
2670 
2671 		if (rc != DDI_SUCCESS) {
2672 			nv_uninit_port(nvp);
2673 
2674 			return (NV_FAILURE);
2675 		}
2676 
2677 		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2678 		    nvp->nvp_sg_addr[i], buf_size,
2679 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2680 		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2681 
2682 		if (rc != DDI_DMA_MAPPED) {
2683 			nv_uninit_port(nvp);
2684 
2685 			return (NV_FAILURE);
2686 		}
2687 
2688 		ASSERT(count == 1);
2689 		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2690 
2691 		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2692 
2693 		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2694 	}
2695 
2696 	/*
2697 	 * nvp_queue_depth represents the actual drive queue depth, not the
2698 	 * number of slots allocated in the structures (which may be more).
2699 	 * Actual queue depth is only learned after the first NCQ command, so
2700 	 * initialize it to 1 for now.
2701 	 */
2702 	nvp->nvp_queue_depth = 1;
2703 
2704 	/*
2705 	 * Port is initialized whether the device is attached or not.
2706 	 * Link processing and device identification will be started later,
2707 	 * after interrupts are initialized.
2708 	 */
2709 	nvp->nvp_type = SATA_DTYPE_NONE;
2710 	nvp->nvp_signature = 0;
2711 
2712 	nvp->nvp_state |= NV_PORT_INIT;
2713 
2714 	return (NV_SUCCESS);
2715 }
2716 
2717 
2718 /*
2719  * Establish initial link & device type
2720  * Called only from nv_attach
2721  * Loops up to approximately 210ms; can exit earlier.
2722  * The time includes wait for the link up and completion of the initial
2723  * signature gathering operation.
2724  */
2725 static void
2726 nv_init_port_link_processing(nv_ctl_t *nvc)
2727 {
2728 	ddi_acc_handle_t bar5_hdl;
2729 	nv_port_t *nvp;
2730 	volatile uint32_t sstatus;
2731 	int port, links_up, ready_ports, i;
2732 
2733 
2734 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2735 		nvp = &(nvc->nvc_port[port]);
2736 		if (nvp != NULL && (nvp->nvp_state & NV_PORT_INIT)) {
2737 			/*
2738 			 * Initiate device identification, if any is attached
2739 			 * and reset was not already applied by hot-plug
2740 			 * event processing.
2741 			 */
2742 			mutex_enter(&nvp->nvp_mutex);
2743 			if (!(nvp->nvp_state & NV_PORT_RESET)) {
2744 				nvp->nvp_state |= NV_PORT_RESET | NV_PORT_PROBE;
2745 				nv_reset(nvp);
2746 			}
2747 			mutex_exit(&nvp->nvp_mutex);
2748 		}
2749 	}
2750 	/*
2751 	 * Wait up to 10ms for links up.
2752 	 * Spec says that link should be up in 1ms.
2753 	 */
2754 	for (i = 0; i < 10; i++) {
2755 		drv_usecwait(NV_ONE_MSEC);
2756 		links_up = 0;
2757 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2758 			nvp = &(nvc->nvc_port[port]);
2759 			mutex_enter(&nvp->nvp_mutex);
2760 			bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2761 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2762 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2763 			    (SSTATUS_GET_DET(sstatus) ==
2764 			    SSTATUS_DET_DEVPRE_PHYCOM)) {
2765 				if ((nvp->nvp_state & NV_PORT_RESET) &&
2766 				    nvp->nvp_type == SATA_DTYPE_NONE) {
2767 					nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2768 				}
2769 				NVLOG((NVDBG_INIT, nvc, nvp,
2770 				    "nv_init_port_link_processing()"
2771 				    "link up; time from reset %dms",
2772 				    TICK_TO_MSEC(ddi_get_lbolt() -
2773 				    nvp->nvp_reset_time)));
2774 				links_up++;
2775 			}
2776 			mutex_exit(&nvp->nvp_mutex);
2777 		}
2778 		if (links_up == NV_MAX_PORTS(nvc)) {
2779 			break;
2780 		}
2781 	}
2782 	NVLOG((NVDBG_RESET, nvc, nvp, "nv_init_port_link_processing():"
2783 	    "%d links up", links_up));
2784 	/*
2785 	 * At this point, if any device is attached, the link is established.
2786 	 * Wait till devices are ready to be accessed, no more than 200ms.
2787 	 * 200ms is empirical time in which a signature should be available.
2788 	 */
2789 	for (i = 0; i < 200; i++) {
2790 		ready_ports = 0;
2791 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2792 			nvp = &(nvc->nvc_port[port]);
2793 			mutex_enter(&nvp->nvp_mutex);
2794 			bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2795 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2796 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2797 			    (SSTATUS_GET_DET(sstatus) ==
2798 			    SSTATUS_DET_DEVPRE_PHYCOM) &&
2799 			    !(nvp->nvp_state & (NV_PORT_RESET |
2800 			    NV_PORT_RESET_RETRY))) {
2801 				/*
2802 				 * Reset already processed
2803 				 */
2804 				NVLOG((NVDBG_RESET, nvc, nvp,
2805 				    "nv_init_port_link_processing()"
2806 				    "device ready; port state %x; "
2807 				    "time from reset %dms", nvp->nvp_state,
2808 				    TICK_TO_MSEC(ddi_get_lbolt() -
2809 				    nvp->nvp_reset_time)));
2810 
2811 				ready_ports++;
2812 			}
2813 			mutex_exit(&nvp->nvp_mutex);
2814 		}
2815 		if (ready_ports == links_up) {
2816 			break;
2817 		}
2818 		drv_usecwait(NV_ONE_MSEC);
2819 	}
2820 	NVLOG((NVDBG_RESET, nvc, nvp, "nv_init_port_link_processing():"
2821 	    "%d devices ready", ready_ports));
2822 }
2823 
2824 /*
2825  * Free dynamically allocated structures for port.
2826  */
2827 static void
2828 nv_uninit_port(nv_port_t *nvp)
2829 {
2830 	int i;
2831 
2832 	/*
2833 	 * It is possible to reach here before a port has been initialized or
2834 	 * after it has already been uninitialized.  Just return in that case.
2835 	 */
2836 	if (nvp->nvp_slot == NULL) {
2837 
2838 		return;
2839 	}
2840 	/*
2841 	 * Mark port unusable now.
2842 	 */
2843 	nvp->nvp_state &= ~NV_PORT_INIT;
2844 
2845 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2846 	    "nv_uninit_port uninitializing"));
2847 
2848 #ifdef SGPIO_SUPPORT
2849 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
2850 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2851 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2852 	}
2853 #endif
2854 
2855 	nvp->nvp_type = SATA_DTYPE_NONE;
2856 
2857 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2858 		if (nvp->nvp_sg_paddr[i]) {
2859 			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2860 		}
2861 
2862 		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2863 			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2864 		}
2865 
2866 		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2867 			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2868 		}
2869 	}
2870 
2871 	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2872 	nvp->nvp_slot = NULL;
2873 
2874 	kmem_free(nvp->nvp_sg_dma_hdl,
2875 	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2876 	nvp->nvp_sg_dma_hdl = NULL;
2877 
2878 	kmem_free(nvp->nvp_sg_acc_hdl,
2879 	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2880 	nvp->nvp_sg_acc_hdl = NULL;
2881 
2882 	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2883 	nvp->nvp_sg_addr = NULL;
2884 
2885 	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2886 	nvp->nvp_sg_paddr = NULL;
2887 }
2888 
2889 
2890 /*
2891  * Cache register offsets and access handles to frequently accessed registers
2892  * which are common to either chipset.
2893  */
2894 static void
2895 nv_common_reg_init(nv_ctl_t *nvc)
2896 {
2897 	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2898 	uchar_t *bm_addr_offset, *sreg_offset;
2899 	uint8_t bar, port;
2900 	nv_port_t *nvp;
2901 
2902 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2903 		if (port == 0) {
2904 			bar = NV_BAR_0;
2905 			bm_addr_offset = 0;
2906 			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2907 		} else {
2908 			bar = NV_BAR_2;
2909 			bm_addr_offset = (uchar_t *)8;
2910 			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2911 		}
2912 
2913 		nvp = &(nvc->nvc_port[port]);
2914 		nvp->nvp_ctlp = nvc;
2915 		nvp->nvp_port_num = port;
2916 		NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings"));
2917 
2918 		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2919 		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2920 		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2921 		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2922 		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2923 		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2924 		    (long)bm_addr_offset;
2925 
2926 		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2927 		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2928 		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2929 		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2930 	}
2931 }
2932 
2933 
2934 static void
2935 nv_uninit_ctl(nv_ctl_t *nvc)
2936 {
2937 	int port;
2938 	nv_port_t *nvp;
2939 
2940 	NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered"));
2941 
2942 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2943 		nvp = &(nvc->nvc_port[port]);
2944 		mutex_enter(&nvp->nvp_mutex);
2945 		NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port"));
2946 		nv_uninit_port(nvp);
2947 		mutex_exit(&nvp->nvp_mutex);
2948 		mutex_destroy(&nvp->nvp_mutex);
2949 		cv_destroy(&nvp->nvp_poll_cv);
2950 	}
2951 
2952 	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2953 	nvc->nvc_port = NULL;
2954 }
2955 
2956 
2957 /*
2958  * ck804 interrupt.  This is a wrapper around ck804_intr_process so
2959  * that interrupts from other devices can be disregarded while dtracing.
2960  */
2961 /* ARGSUSED */
2962 static uint_t
2963 ck804_intr(caddr_t arg1, caddr_t arg2)
2964 {
2965 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2966 	uint8_t intr_status;
2967 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2968 
2969 	if (nvc->nvc_state & NV_CTRL_SUSPEND)
2970 		return (DDI_INTR_UNCLAIMED);
2971 
2972 	intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
2973 
2974 	if (intr_status == 0) {
2975 
2976 		return (DDI_INTR_UNCLAIMED);
2977 	}
2978 
2979 	ck804_intr_process(nvc, intr_status);
2980 
2981 	return (DDI_INTR_CLAIMED);
2982 }
2983 
2984 
2985 /*
2986  * Main interrupt handler for ck804.  handles normal device
2987  * interrupts as well as port hot plug and remove interrupts.
2988  *
2989  */
2990 static void
2991 ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
2992 {
2993 
2994 	int port, i;
2995 	nv_port_t *nvp;
2996 	nv_slot_t *nv_slotp;
2997 	uchar_t	status;
2998 	sata_pkt_t *spkt;
2999 	uint8_t bmstatus, clear_bits;
3000 	ddi_acc_handle_t bmhdl;
3001 	int nvcleared = 0;
3002 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3003 	uint32_t sstatus;
3004 	int port_mask_hot[] = {
3005 		CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
3006 	};
3007 	int port_mask_pm[] = {
3008 		CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
3009 	};
3010 
3011 	NVLOG((NVDBG_INTR, nvc, NULL,
3012 	    "ck804_intr_process entered intr_status=%x", intr_status));
3013 
3014 	/*
3015 	 * For command completion interrupt, explicit clear is not required.
3016 	 * however, for the error cases explicit clear is performed.
3017 	 */
3018 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3019 
3020 		int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
3021 
3022 		if ((port_mask[port] & intr_status) == 0) {
3023 			continue;
3024 		}
3025 
3026 		NVLOG((NVDBG_INTR, nvc, NULL,
3027 		    "ck804_intr_process interrupt on port %d", port));
3028 
3029 		nvp = &(nvc->nvc_port[port]);
3030 
3031 		mutex_enter(&nvp->nvp_mutex);
3032 
3033 		/*
3034 		 * there was a corner case found where an interrupt
3035 		 * arrived before nvp_slot was set.  Should
3036 		 * probably should track down why that happens and try
3037 		 * to eliminate that source and then get rid of this
3038 		 * check.
3039 		 */
3040 		if (nvp->nvp_slot == NULL) {
3041 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3042 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3043 			    "received before initialization "
3044 			    "completed status=%x", status));
3045 			mutex_exit(&nvp->nvp_mutex);
3046 
3047 			/*
3048 			 * clear interrupt bits
3049 			 */
3050 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3051 			    port_mask[port]);
3052 
3053 			continue;
3054 		}
3055 
3056 		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
3057 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3058 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3059 			    " no command in progress status=%x", status));
3060 			mutex_exit(&nvp->nvp_mutex);
3061 
3062 			/*
3063 			 * clear interrupt bits
3064 			 */
3065 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3066 			    port_mask[port]);
3067 
3068 			continue;
3069 		}
3070 
3071 		bmhdl = nvp->nvp_bm_hdl;
3072 		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3073 
3074 		if (!(bmstatus & BMISX_IDEINTS)) {
3075 			mutex_exit(&nvp->nvp_mutex);
3076 
3077 			continue;
3078 		}
3079 
3080 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3081 
3082 		if (status & SATA_STATUS_BSY) {
3083 			mutex_exit(&nvp->nvp_mutex);
3084 
3085 			continue;
3086 		}
3087 
3088 		nv_slotp = &(nvp->nvp_slot[0]);
3089 
3090 		ASSERT(nv_slotp);
3091 
3092 		spkt = nv_slotp->nvslot_spkt;
3093 
3094 		if (spkt == NULL) {
3095 			mutex_exit(&nvp->nvp_mutex);
3096 
3097 			continue;
3098 		}
3099 
3100 		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3101 
3102 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3103 
3104 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3105 
3106 			nv_complete_io(nvp, spkt, 0);
3107 		}
3108 
3109 		mutex_exit(&nvp->nvp_mutex);
3110 	}
3111 
3112 	/*
3113 	 * ck804 often doesn't correctly distinguish hot add/remove
3114 	 * interrupts.  Frequently both the ADD and the REMOVE bits
3115 	 * are asserted, whether it was a remove or add.  Use sstatus
3116 	 * to distinguish hot add from hot remove.
3117 	 */
3118 
3119 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3120 		clear_bits = 0;
3121 
3122 		nvp = &(nvc->nvc_port[port]);
3123 		mutex_enter(&nvp->nvp_mutex);
3124 
3125 		if ((port_mask_pm[port] & intr_status) != 0) {
3126 			clear_bits = port_mask_pm[port];
3127 			NVLOG((NVDBG_HOT, nvc, nvp,
3128 			    "clearing PM interrupt bit: %x",
3129 			    intr_status & port_mask_pm[port]));
3130 		}
3131 
3132 		if ((port_mask_hot[port] & intr_status) == 0) {
3133 			if (clear_bits != 0) {
3134 				goto clear;
3135 			} else {
3136 				mutex_exit(&nvp->nvp_mutex);
3137 				continue;
3138 			}
3139 		}
3140 
3141 		/*
3142 		 * reaching here means there was a hot add or remove.
3143 		 */
3144 		clear_bits |= port_mask_hot[port];
3145 
3146 		ASSERT(nvc->nvc_port[port].nvp_sstatus);
3147 
3148 		sstatus = nv_get32(bar5_hdl,
3149 		    nvc->nvc_port[port].nvp_sstatus);
3150 
3151 		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
3152 		    SSTATUS_DET_DEVPRE_PHYCOM) {
3153 			nv_report_add_remove(nvp, 0);
3154 		} else {
3155 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3156 		}
3157 	clear:
3158 		/*
3159 		 * clear interrupt bits.  explicit interrupt clear is
3160 		 * required for hotplug interrupts.
3161 		 */
3162 		nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
3163 
3164 		/*
3165 		 * make sure it's flushed and cleared.  If not try
3166 		 * again.  Sometimes it has been observed to not clear
3167 		 * on the first try.
3168 		 */
3169 		intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3170 
3171 		/*
3172 		 * make 10 additional attempts to clear the interrupt
3173 		 */
3174 		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
3175 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
3176 			    "still not clear try=%d", intr_status,
3177 			    ++nvcleared));
3178 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3179 			    clear_bits);
3180 			intr_status = nv_get8(bar5_hdl,
3181 			    nvc->nvc_ck804_int_status);
3182 		}
3183 
3184 		/*
3185 		 * if still not clear, log a message and disable the
3186 		 * port. highly unlikely that this path is taken, but it
3187 		 * gives protection against a wedged interrupt.
3188 		 */
3189 		if (intr_status & clear_bits) {
3190 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3191 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3192 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3193 			nvp->nvp_state |= NV_PORT_FAILED;
3194 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3195 			    B_TRUE);
3196 			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
3197 			    "interrupt.  disabling port intr_status=%X",
3198 			    intr_status);
3199 		}
3200 
3201 		mutex_exit(&nvp->nvp_mutex);
3202 	}
3203 }
3204 
3205 
3206 /*
3207  * Interrupt handler for mcp5x.  It is invoked by the wrapper for each port
3208  * on the controller, to handle completion and hot plug and remove events.
3209  *
3210  */
3211 static uint_t
3212 mcp5x_intr_port(nv_port_t *nvp)
3213 {
3214 	nv_ctl_t *nvc = nvp->nvp_ctlp;
3215 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3216 	uint8_t clear = 0, intr_cycles = 0;
3217 	int ret = DDI_INTR_UNCLAIMED;
3218 	uint16_t int_status;
3219 	clock_t intr_time;
3220 	int loop_cnt = 0;
3221 
3222 	nvp->intr_start_time = ddi_get_lbolt();
3223 
3224 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered"));
3225 
3226 	do {
3227 		/*
3228 		 * read current interrupt status
3229 		 */
3230 		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
3231 
3232 		NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status));
3233 
3234 		/*
3235 		 * MCP5X_INT_IGNORE interrupts will show up in the status,
3236 		 * but are masked out from causing an interrupt to be generated
3237 		 * to the processor.  Ignore them here by masking them out.
3238 		 */
3239 		int_status &= ~(MCP5X_INT_IGNORE);
3240 
3241 		/*
3242 		 * exit the loop when no more interrupts to process
3243 		 */
3244 		if (int_status == 0) {
3245 
3246 			break;
3247 		}
3248 
3249 		if (int_status & MCP5X_INT_COMPLETE) {
3250 			NVLOG((NVDBG_INTR, nvc, nvp,
3251 			    "mcp5x_packet_complete_intr"));
3252 			/*
3253 			 * since int_status was set, return DDI_INTR_CLAIMED
3254 			 * from the DDI's perspective even though the packet
3255 			 * completion may not have succeeded.  If it fails,
3256 			 * need to manually clear the interrupt, otherwise
3257 			 * clearing is implicit.
3258 			 */
3259 			ret = DDI_INTR_CLAIMED;
3260 			if (mcp5x_packet_complete_intr(nvc, nvp) ==
3261 			    NV_FAILURE) {
3262 				clear |= MCP5X_INT_COMPLETE;
3263 			} else {
3264 				intr_cycles = 0;
3265 			}
3266 		}
3267 
3268 		if (int_status & MCP5X_INT_DMA_SETUP) {
3269 			NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr"));
3270 
3271 			/*
3272 			 * Needs to be cleared before starting the BM, so do it
3273 			 * now.  make sure this is still working.
3274 			 */
3275 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3276 			    MCP5X_INT_DMA_SETUP);
3277 #ifdef NCQ
3278 			ret = mcp5x_dma_setup_intr(nvc, nvp);
3279 #endif
3280 		}
3281 
3282 		if (int_status & MCP5X_INT_REM) {
3283 			NVLOG((NVDBG_HOT, nvc, nvp, "mcp5x device removed"));
3284 			clear |= MCP5X_INT_REM;
3285 			ret = DDI_INTR_CLAIMED;
3286 
3287 			mutex_enter(&nvp->nvp_mutex);
3288 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3289 			mutex_exit(&nvp->nvp_mutex);
3290 
3291 		} else if (int_status & MCP5X_INT_ADD) {
3292 			NVLOG((NVDBG_HOT, nvc, nvp, "mcp5x device added"));
3293 			clear |= MCP5X_INT_ADD;
3294 			ret = DDI_INTR_CLAIMED;
3295 
3296 			mutex_enter(&nvp->nvp_mutex);
3297 			nv_report_add_remove(nvp, 0);
3298 			mutex_exit(&nvp->nvp_mutex);
3299 		}
3300 		if (clear) {
3301 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3302 			clear = 0;
3303 		}
3304 		/* Protect against a stuck interrupt */
3305 		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3306 			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
3307 			    "processing.  Disabling port int_status=%X"
3308 			    " clear=%X", int_status, clear);
3309 			mutex_enter(&nvp->nvp_mutex);
3310 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3311 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3312 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3313 			nvp->nvp_state |= NV_PORT_FAILED;
3314 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3315 			    B_TRUE);
3316 			mutex_exit(&nvp->nvp_mutex);
3317 		}
3318 
3319 	} while (loop_cnt++ < nv_max_intr_loops);
3320 
3321 	if (loop_cnt > nvp->intr_loop_cnt) {
3322 		NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp,
3323 		    "Exiting with multiple intr loop count %d", loop_cnt));
3324 		nvp->intr_loop_cnt = loop_cnt;
3325 	}
3326 
3327 	if ((nv_debug_flags & (NVDBG_INTR | NVDBG_VERBOSE)) ==
3328 	    (NVDBG_INTR | NVDBG_VERBOSE)) {
3329 		uint8_t status, bmstatus;
3330 		uint16_t int_status2;
3331 
3332 		if (int_status & MCP5X_INT_COMPLETE) {
3333 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3334 			bmstatus = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmisx);
3335 			int_status2 = nv_get16(nvp->nvp_ctlp->nvc_bar_hdl[5],
3336 			    nvp->nvp_mcp5x_int_status);
3337 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
3338 			    "mcp55_intr_port: Exiting with altstatus %x, "
3339 			    "bmicx %x, int_status2 %X, int_status %X, ret %x,"
3340 			    " loop_cnt %d ", status, bmstatus, int_status2,
3341 			    int_status, ret, loop_cnt));
3342 		}
3343 	}
3344 
3345 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret));
3346 
3347 	/*
3348 	 * To facilitate debugging, keep track of the length of time spent in
3349 	 * the port interrupt routine.
3350 	 */
3351 	intr_time = ddi_get_lbolt() - nvp->intr_start_time;
3352 	if (intr_time > nvp->intr_duration)
3353 		nvp->intr_duration = intr_time;
3354 
3355 	return (ret);
3356 }
3357 
3358 
3359 /* ARGSUSED */
3360 static uint_t
3361 mcp5x_intr(caddr_t arg1, caddr_t arg2)
3362 {
3363 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3364 	int ret;
3365 
3366 	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3367 		return (DDI_INTR_UNCLAIMED);
3368 
3369 	ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3370 	ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3371 
3372 	return (ret);
3373 }
3374 
3375 
3376 #ifdef NCQ
3377 /*
3378  * with software driven NCQ on mcp5x, an interrupt occurs right
3379  * before the drive is ready to do a DMA transfer.  At this point,
3380  * the PRD table needs to be programmed and the DMA engine enabled
3381  * and ready to go.
3382  *
3383  * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3384  * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3385  * -- clear bit 0 of master command reg
3386  * -- program PRD
3387  * -- clear the interrupt status bit for the DMA Setup FIS
3388  * -- set bit 0 of the bus master command register
3389  */
3390 static int
3391 mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3392 {
3393 	int slot;
3394 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3395 	uint8_t bmicx;
3396 	int port = nvp->nvp_port_num;
3397 	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3398 	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3399 
3400 	nv_cmn_err(CE_PANIC, nvc, nvp,
3401 	    "this is should not be executed at all until NCQ");
3402 
3403 	mutex_enter(&nvp->nvp_mutex);
3404 
3405 	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3406 
3407 	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3408 
3409 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3410 	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache));
3411 
3412 	/*
3413 	 * halt the DMA engine.  This step is necessary according to
3414 	 * the mcp5x spec, probably since there may have been a "first" packet
3415 	 * that already programmed the DMA engine, but may not turn out to
3416 	 * be the first one processed.
3417 	 */
3418 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3419 
3420 	if (bmicx & BMICX_SSBM) {
3421 		NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3422 		    "another packet.  Cancelling and reprogramming"));
3423 		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3424 	}
3425 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3426 
3427 	nv_start_dma_engine(nvp, slot);
3428 
3429 	mutex_exit(&nvp->nvp_mutex);
3430 
3431 	return (DDI_INTR_CLAIMED);
3432 }
3433 #endif /* NCQ */
3434 
3435 
3436 /*
3437  * packet completion interrupt.  If the packet is complete, invoke
3438  * the packet completion callback.
3439  */
3440 static int
3441 mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3442 {
3443 	uint8_t status, bmstatus;
3444 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3445 	int sactive;
3446 	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3447 	sata_pkt_t *spkt;
3448 	nv_slot_t *nv_slotp;
3449 
3450 	mutex_enter(&nvp->nvp_mutex);
3451 
3452 	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3453 
3454 	if (!(bmstatus & (BMISX_IDEINTS | BMISX_IDERR))) {
3455 		NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set"));
3456 		mutex_exit(&nvp->nvp_mutex);
3457 
3458 		return (NV_FAILURE);
3459 	}
3460 
3461 	/*
3462 	 * Commands may have been processed by abort or timeout before
3463 	 * interrupt processing acquired the mutex. So we may be processing
3464 	 * an interrupt for packets that were already removed.
3465 	 * For functionning NCQ processing all slots may be checked, but
3466 	 * with NCQ disabled (current code), relying on *_run flags is OK.
3467 	 */
3468 	if (nvp->nvp_non_ncq_run) {
3469 		/*
3470 		 * If the just completed item is a non-ncq command, the busy
3471 		 * bit should not be set
3472 		 */
3473 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3474 		if (status & SATA_STATUS_BSY) {
3475 			nv_cmn_err(CE_WARN, nvc, nvp,
3476 			    "unexpected SATA_STATUS_BSY set");
3477 			mutex_exit(&nvp->nvp_mutex);
3478 			/*
3479 			 * calling function will clear interrupt.  then
3480 			 * the real interrupt will either arrive or the
3481 			 * packet timeout handling will take over and
3482 			 * reset.
3483 			 */
3484 			return (NV_FAILURE);
3485 		}
3486 		ASSERT(nvp->nvp_ncq_run == 0);
3487 	} else {
3488 		ASSERT(nvp->nvp_non_ncq_run == 0);
3489 		/*
3490 		 * Pre-NCQ code!
3491 		 * Nothing to do. The packet for the command that just
3492 		 * completed is already gone. Just clear the interrupt.
3493 		 */
3494 		(void) nv_bm_status_clear(nvp);
3495 		(void) nv_get8(nvp->nvp_cmd_hdl, nvp->nvp_status);
3496 		mutex_exit(&nvp->nvp_mutex);
3497 		return (NV_SUCCESS);
3498 
3499 		/*
3500 		 * NCQ check for BSY here and wait if still bsy before
3501 		 * continuing. Rather than wait for it to be cleared
3502 		 * when starting a packet and wasting CPU time, the starting
3503 		 * thread can exit immediate, but might have to spin here
3504 		 * for a bit possibly.  Needs more work and experimentation.
3505 		 *
3506 		 */
3507 	}
3508 
3509 	/*
3510 	 * active_pkt_bit will represent the bitmap of the single completed
3511 	 * packet.  Because of the nature of sw assisted NCQ, only one
3512 	 * command will complete per interrupt.
3513 	 */
3514 
3515 	if (ncq_command == B_FALSE) {
3516 		active_pkt = 0;
3517 	} else {
3518 		/*
3519 		 * NCQ: determine which command just completed, by examining
3520 		 * which bit cleared in the register since last written.
3521 		 */
3522 		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3523 
3524 		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3525 
3526 		ASSERT(active_pkt_bit);
3527 
3528 
3529 		/*
3530 		 * this failure path needs more work to handle the
3531 		 * error condition and recovery.
3532 		 */
3533 		if (active_pkt_bit == 0) {
3534 			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3535 
3536 			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3537 			    "nvp->nvp_sactive %X", sactive,
3538 			    nvp->nvp_sactive_cache);
3539 
3540 			(void) nv_get8(cmdhdl, nvp->nvp_status);
3541 
3542 			mutex_exit(&nvp->nvp_mutex);
3543 
3544 			return (NV_FAILURE);
3545 		}
3546 
3547 		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3548 		    active_pkt++, active_pkt_bit >>= 1) {
3549 		}
3550 
3551 		/*
3552 		 * make sure only one bit is ever turned on
3553 		 */
3554 		ASSERT(active_pkt_bit == 1);
3555 
3556 		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3557 	}
3558 
3559 	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3560 
3561 	spkt = nv_slotp->nvslot_spkt;
3562 
3563 	ASSERT(spkt != NULL);
3564 
3565 	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3566 
3567 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3568 
3569 	if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3570 
3571 		nv_complete_io(nvp, spkt, active_pkt);
3572 	}
3573 
3574 	mutex_exit(&nvp->nvp_mutex);
3575 
3576 	return (NV_SUCCESS);
3577 }
3578 
3579 
3580 static void
3581 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3582 {
3583 
3584 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3585 
3586 	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3587 		nvp->nvp_ncq_run--;
3588 	} else {
3589 		nvp->nvp_non_ncq_run--;
3590 	}
3591 
3592 	/*
3593 	 * mark the packet slot idle so it can be reused.  Do this before
3594 	 * calling satapkt_comp so the slot can be reused.
3595 	 */
3596 	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3597 
3598 	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3599 		/*
3600 		 * If this is not timed polled mode cmd, which has an
3601 		 * active thread monitoring for completion, then need
3602 		 * to signal the sleeping thread that the cmd is complete.
3603 		 */
3604 		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3605 			cv_signal(&nvp->nvp_poll_cv);
3606 		}
3607 
3608 		return;
3609 	}
3610 
3611 	if (spkt->satapkt_comp != NULL) {
3612 		mutex_exit(&nvp->nvp_mutex);
3613 		(*spkt->satapkt_comp)(spkt);
3614 		mutex_enter(&nvp->nvp_mutex);
3615 	}
3616 }
3617 
3618 
3619 /*
3620  * check whether packet is ncq command or not.  for ncq command,
3621  * start it if there is still room on queue.  for non-ncq command only
3622  * start if no other command is running.
3623  */
3624 static int
3625 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3626 {
3627 	uint8_t cmd, ncq;
3628 
3629 	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry"));
3630 
3631 	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3632 
3633 	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3634 	    (cmd == SATAC_READ_FPDMA_QUEUED));
3635 
3636 	if (ncq == B_FALSE) {
3637 
3638 		if ((nvp->nvp_non_ncq_run == 1) ||
3639 		    (nvp->nvp_ncq_run > 0)) {
3640 			/*
3641 			 * next command is non-ncq which can't run
3642 			 * concurrently.  exit and return queue full.
3643 			 */
3644 			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3645 
3646 			return (SATA_TRAN_QUEUE_FULL);
3647 		}
3648 
3649 		return (nv_start_common(nvp, spkt));
3650 	}
3651 
3652 	/*
3653 	 * ncq == B_TRUE
3654 	 */
3655 	if (nvp->nvp_non_ncq_run == 1) {
3656 		/*
3657 		 * cannot start any NCQ commands when there
3658 		 * is a non-NCQ command running.
3659 		 */
3660 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3661 
3662 		return (SATA_TRAN_QUEUE_FULL);
3663 	}
3664 
3665 #ifdef NCQ
3666 	/*
3667 	 * this is not compiled for now as satapkt_device.satadev_qdepth
3668 	 * is being pulled out until NCQ support is later addressed
3669 	 *
3670 	 * nvp_queue_depth is initialized by the first NCQ command
3671 	 * received.
3672 	 */
3673 	if (nvp->nvp_queue_depth == 1) {
3674 		nvp->nvp_queue_depth =
3675 		    spkt->satapkt_device.satadev_qdepth;
3676 
3677 		ASSERT(nvp->nvp_queue_depth > 1);
3678 
3679 		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3680 		    "nv_process_queue: nvp_queue_depth set to %d",
3681 		    nvp->nvp_queue_depth));
3682 	}
3683 #endif
3684 
3685 	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3686 		/*
3687 		 * max number of NCQ commands already active
3688 		 */
3689 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3690 
3691 		return (SATA_TRAN_QUEUE_FULL);
3692 	}
3693 
3694 	return (nv_start_common(nvp, spkt));
3695 }
3696 
3697 
3698 /*
3699  * configure INTx and legacy interrupts
3700  */
3701 static int
3702 nv_add_legacy_intrs(nv_ctl_t *nvc)
3703 {
3704 	dev_info_t	*devinfo = nvc->nvc_dip;
3705 	int		actual, count = 0;
3706 	int		x, y, rc, inum = 0;
3707 
3708 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs"));
3709 
3710 	/*
3711 	 * get number of interrupts
3712 	 */
3713 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3714 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3715 		NVLOG((NVDBG_INTR, nvc, NULL,
3716 		    "ddi_intr_get_nintrs() failed, "
3717 		    "rc %d count %d", rc, count));
3718 
3719 		return (DDI_FAILURE);
3720 	}
3721 
3722 	/*
3723 	 * allocate an array of interrupt handles
3724 	 */
3725 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3726 	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3727 
3728 	/*
3729 	 * call ddi_intr_alloc()
3730 	 */
3731 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3732 	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3733 
3734 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3735 		nv_cmn_err(CE_WARN, nvc, NULL,
3736 		    "ddi_intr_alloc() failed, rc %d", rc);
3737 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3738 
3739 		return (DDI_FAILURE);
3740 	}
3741 
3742 	if (actual < count) {
3743 		nv_cmn_err(CE_WARN, nvc, NULL,
3744 		    "ddi_intr_alloc: requested: %d, received: %d",
3745 		    count, actual);
3746 
3747 		goto failure;
3748 	}
3749 
3750 	nvc->nvc_intr_cnt = actual;
3751 
3752 	/*
3753 	 * get intr priority
3754 	 */
3755 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3756 	    DDI_SUCCESS) {
3757 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3758 
3759 		goto failure;
3760 	}
3761 
3762 	/*
3763 	 * Test for high level mutex
3764 	 */
3765 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3766 		nv_cmn_err(CE_WARN, nvc, NULL,
3767 		    "nv_add_legacy_intrs: high level intr not supported");
3768 
3769 		goto failure;
3770 	}
3771 
3772 	for (x = 0; x < actual; x++) {
3773 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3774 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3775 			nv_cmn_err(CE_WARN, nvc, NULL,
3776 			    "ddi_intr_add_handler() failed");
3777 
3778 			goto failure;
3779 		}
3780 	}
3781 
3782 	/*
3783 	 * call ddi_intr_enable() for legacy interrupts
3784 	 */
3785 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3786 		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3787 	}
3788 
3789 	return (DDI_SUCCESS);
3790 
3791 	failure:
3792 	/*
3793 	 * free allocated intr and nvc_htable
3794 	 */
3795 	for (y = 0; y < actual; y++) {
3796 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3797 	}
3798 
3799 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3800 
3801 	return (DDI_FAILURE);
3802 }
3803 
3804 #ifdef	NV_MSI_SUPPORTED
3805 /*
3806  * configure MSI interrupts
3807  */
3808 static int
3809 nv_add_msi_intrs(nv_ctl_t *nvc)
3810 {
3811 	dev_info_t	*devinfo = nvc->nvc_dip;
3812 	int		count, avail, actual;
3813 	int		x, y, rc, inum = 0;
3814 
3815 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs"));
3816 
3817 	/*
3818 	 * get number of interrupts
3819 	 */
3820 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3821 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3822 		nv_cmn_err(CE_WARN, nvc, NULL,
3823 		    "ddi_intr_get_nintrs() failed, "
3824 		    "rc %d count %d", rc, count);
3825 
3826 		return (DDI_FAILURE);
3827 	}
3828 
3829 	/*
3830 	 * get number of available interrupts
3831 	 */
3832 	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3833 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3834 		nv_cmn_err(CE_WARN, nvc, NULL,
3835 		    "ddi_intr_get_navail() failed, "
3836 		    "rc %d avail %d", rc, avail);
3837 
3838 		return (DDI_FAILURE);
3839 	}
3840 
3841 	if (avail < count) {
3842 		nv_cmn_err(CE_WARN, nvc, NULL,
3843 		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3844 		    avail, count);
3845 	}
3846 
3847 	/*
3848 	 * allocate an array of interrupt handles
3849 	 */
3850 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3851 	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3852 
3853 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3854 	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3855 
3856 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3857 		nv_cmn_err(CE_WARN, nvc, NULL,
3858 		    "ddi_intr_alloc() failed, rc %d", rc);
3859 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3860 
3861 		return (DDI_FAILURE);
3862 	}
3863 
3864 	/*
3865 	 * Use interrupt count returned or abort?
3866 	 */
3867 	if (actual < count) {
3868 		NVLOG((NVDBG_INIT, nvc, NULL,
3869 		    "Requested: %d, Received: %d", count, actual));
3870 	}
3871 
3872 	nvc->nvc_intr_cnt = actual;
3873 
3874 	/*
3875 	 * get priority for first msi, assume remaining are all the same
3876 	 */
3877 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3878 	    DDI_SUCCESS) {
3879 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3880 
3881 		goto failure;
3882 	}
3883 
3884 	/*
3885 	 * test for high level mutex
3886 	 */
3887 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3888 		nv_cmn_err(CE_WARN, nvc, NULL,
3889 		    "nv_add_msi_intrs: high level intr not supported");
3890 
3891 		goto failure;
3892 	}
3893 
3894 	/*
3895 	 * Call ddi_intr_add_handler()
3896 	 */
3897 	for (x = 0; x < actual; x++) {
3898 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3899 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3900 			nv_cmn_err(CE_WARN, nvc, NULL,
3901 			    "ddi_intr_add_handler() failed");
3902 
3903 			goto failure;
3904 		}
3905 	}
3906 
3907 	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3908 
3909 	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3910 		(void) ddi_intr_block_enable(nvc->nvc_htable,
3911 		    nvc->nvc_intr_cnt);
3912 	} else {
3913 		/*
3914 		 * Call ddi_intr_enable() for MSI non block enable
3915 		 */
3916 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3917 			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3918 		}
3919 	}
3920 
3921 	return (DDI_SUCCESS);
3922 
3923 	failure:
3924 	/*
3925 	 * free allocated intr and nvc_htable
3926 	 */
3927 	for (y = 0; y < actual; y++) {
3928 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3929 	}
3930 
3931 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3932 
3933 	return (DDI_FAILURE);
3934 }
3935 #endif
3936 
3937 
3938 static void
3939 nv_rem_intrs(nv_ctl_t *nvc)
3940 {
3941 	int x, i;
3942 	nv_port_t *nvp;
3943 
3944 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs"));
3945 
3946 	/*
3947 	 * prevent controller from generating interrupts by
3948 	 * masking them out.  This is an extra precaution.
3949 	 */
3950 	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3951 		nvp = (&nvc->nvc_port[i]);
3952 		mutex_enter(&nvp->nvp_mutex);
3953 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3954 		mutex_exit(&nvp->nvp_mutex);
3955 	}
3956 
3957 	/*
3958 	 * disable all interrupts
3959 	 */
3960 	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
3961 	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
3962 		(void) ddi_intr_block_disable(nvc->nvc_htable,
3963 		    nvc->nvc_intr_cnt);
3964 	} else {
3965 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3966 			(void) ddi_intr_disable(nvc->nvc_htable[x]);
3967 		}
3968 	}
3969 
3970 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3971 		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
3972 		(void) ddi_intr_free(nvc->nvc_htable[x]);
3973 	}
3974 
3975 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3976 }
3977 
3978 
3979 /*
3980  * variable argument wrapper for cmn_err.  prefixes the instance and port
3981  * number if possible
3982  */
3983 static void
3984 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
3985 {
3986 	char port[NV_STRING_10];
3987 	char inst[NV_STRING_10];
3988 
3989 	mutex_enter(&nv_log_mutex);
3990 
3991 	if (nvc) {
3992 		(void) snprintf(inst, NV_STRING_10, "inst %d",
3993 		    ddi_get_instance(nvc->nvc_dip));
3994 	} else {
3995 		inst[0] = '\0';
3996 	}
3997 
3998 	if (nvp) {
3999 		(void) sprintf(port, " port %d", nvp->nvp_port_num);
4000 	} else {
4001 		port[0] = '\0';
4002 	}
4003 
4004 	(void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port,
4005 	    (inst[0]|port[0] ? ": " :""));
4006 
4007 	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4008 	    NV_STRING_512 - strlen(nv_log_buf), fmt, ap);
4009 
4010 	/*
4011 	 * normally set to log to console but in some debug situations it
4012 	 * may be useful to log only to a file.
4013 	 */
4014 	if (nv_log_to_console) {
4015 		if (nv_prom_print) {
4016 			prom_printf("%s\n", nv_log_buf);
4017 		} else {
4018 			cmn_err(ce, "%s", nv_log_buf);
4019 		}
4020 
4021 
4022 	} else {
4023 		cmn_err(ce, "!%s", nv_log_buf);
4024 	}
4025 
4026 	mutex_exit(&nv_log_mutex);
4027 }
4028 
4029 
4030 /*
4031  * wrapper for cmn_err
4032  */
4033 static void
4034 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
4035 {
4036 	va_list ap;
4037 
4038 	va_start(ap, fmt);
4039 	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
4040 	va_end(ap);
4041 }
4042 
4043 
4044 #if defined(DEBUG)
4045 /*
4046  * prefixes the instance and port number if possible to the debug message
4047  */
4048 static void
4049 nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
4050 {
4051 	va_list ap;
4052 
4053 	if ((nv_debug_flags & flag) == 0) {
4054 		return;
4055 	}
4056 
4057 	va_start(ap, fmt);
4058 	nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap);
4059 	va_end(ap);
4060 
4061 	/*
4062 	 * useful for some debugging situations
4063 	 */
4064 	if (nv_log_delay) {
4065 		drv_usecwait(nv_log_delay);
4066 	}
4067 
4068 }
4069 #endif /* DEBUG */
4070 
4071 
4072 /*
4073  * program registers which are common to all commands
4074  */
4075 static void
4076 nv_program_taskfile_regs(nv_port_t *nvp, int slot)
4077 {
4078 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4079 	sata_pkt_t *spkt;
4080 	sata_cmd_t *satacmd;
4081 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4082 	uint8_t cmd, ncq = B_FALSE;
4083 
4084 	spkt = nv_slotp->nvslot_spkt;
4085 	satacmd = &spkt->satapkt_cmd;
4086 	cmd = satacmd->satacmd_cmd_reg;
4087 
4088 	ASSERT(nvp->nvp_slot);
4089 
4090 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4091 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4092 		ncq = B_TRUE;
4093 	}
4094 
4095 	/*
4096 	 * select the drive
4097 	 */
4098 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4099 
4100 	/*
4101 	 * make certain the drive selected
4102 	 */
4103 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4104 	    NV_SEC2USEC(5), 0) == B_FALSE) {
4105 
4106 		return;
4107 	}
4108 
4109 	switch (spkt->satapkt_cmd.satacmd_addr_type) {
4110 
4111 	case ATA_ADDR_LBA:
4112 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode"));
4113 
4114 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4115 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4116 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4117 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4118 
4119 		break;
4120 
4121 	case ATA_ADDR_LBA28:
4122 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4123 		    "ATA_ADDR_LBA28 mode"));
4124 		/*
4125 		 * NCQ only uses 48-bit addressing
4126 		 */
4127 		ASSERT(ncq != B_TRUE);
4128 
4129 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4130 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4131 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4132 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4133 
4134 		break;
4135 
4136 	case ATA_ADDR_LBA48:
4137 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4138 		    "ATA_ADDR_LBA48 mode"));
4139 
4140 		/*
4141 		 * for NCQ, tag goes into count register and real sector count
4142 		 * into features register.  The sata module does the translation
4143 		 * in the satacmd.
4144 		 */
4145 		if (ncq == B_TRUE) {
4146 			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
4147 			nv_put8(cmdhdl, nvp->nvp_feature,
4148 			    satacmd->satacmd_features_reg_ext);
4149 			nv_put8(cmdhdl, nvp->nvp_feature,
4150 			    satacmd->satacmd_features_reg);
4151 		} else {
4152 			nv_put8(cmdhdl, nvp->nvp_count,
4153 			    satacmd->satacmd_sec_count_msb);
4154 			nv_put8(cmdhdl, nvp->nvp_count,
4155 			    satacmd->satacmd_sec_count_lsb);
4156 		}
4157 
4158 		/*
4159 		 * send the high-order half first
4160 		 */
4161 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
4162 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
4163 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
4164 		/*
4165 		 * Send the low-order half
4166 		 */
4167 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4168 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4169 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4170 
4171 		break;
4172 
4173 	case 0:
4174 		/*
4175 		 * non-media access commands such as identify and features
4176 		 * take this path.
4177 		 */
4178 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4179 		nv_put8(cmdhdl, nvp->nvp_feature,
4180 		    satacmd->satacmd_features_reg);
4181 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4182 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4183 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4184 
4185 		break;
4186 
4187 	default:
4188 		break;
4189 	}
4190 
4191 	ASSERT(nvp->nvp_slot);
4192 }
4193 
4194 
4195 /*
4196  * start a command that involves no media access
4197  */
4198 static int
4199 nv_start_nodata(nv_port_t *nvp, int slot)
4200 {
4201 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4202 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4203 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4204 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4205 
4206 	nv_program_taskfile_regs(nvp, slot);
4207 
4208 	/*
4209 	 * This next one sets the controller in motion
4210 	 */
4211 	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
4212 
4213 	return (SATA_TRAN_ACCEPTED);
4214 }
4215 
4216 
4217 static int
4218 nv_bm_status_clear(nv_port_t *nvp)
4219 {
4220 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4221 	uchar_t	status, ret;
4222 
4223 	/*
4224 	 * Get the current BM status
4225 	 */
4226 	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
4227 
4228 	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
4229 
4230 	/*
4231 	 * Clear the latches (and preserve the other bits)
4232 	 */
4233 	nv_put8(bmhdl, nvp->nvp_bmisx, status);
4234 
4235 	return (ret);
4236 }
4237 
4238 
4239 /*
4240  * program the bus master DMA engine with the PRD address for
4241  * the active slot command, and start the DMA engine.
4242  */
4243 static void
4244 nv_start_dma_engine(nv_port_t *nvp, int slot)
4245 {
4246 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4247 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4248 	uchar_t direction;
4249 
4250 	ASSERT(nv_slotp->nvslot_spkt != NULL);
4251 
4252 	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
4253 	    == SATA_DIR_READ) {
4254 		direction = BMICX_RWCON_WRITE_TO_MEMORY;
4255 	} else {
4256 		direction = BMICX_RWCON_READ_FROM_MEMORY;
4257 	}
4258 
4259 	NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4260 	    "nv_start_dma_engine entered"));
4261 
4262 #if NOT_USED
4263 	/*
4264 	 * NOT NEEDED. Left here of historical reason.
4265 	 * Reset the controller's interrupt and error status bits.
4266 	 */
4267 	(void) nv_bm_status_clear(nvp);
4268 #endif
4269 	/*
4270 	 * program the PRD table physical start address
4271 	 */
4272 	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
4273 
4274 	/*
4275 	 * set the direction control and start the DMA controller
4276 	 */
4277 	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
4278 }
4279 
4280 /*
4281  * start dma command, either in or out
4282  */
4283 static int
4284 nv_start_dma(nv_port_t *nvp, int slot)
4285 {
4286 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4287 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4288 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4289 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4290 	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
4291 #ifdef NCQ
4292 	uint8_t ncq = B_FALSE;
4293 #endif
4294 	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
4295 	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
4296 	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
4297 	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
4298 
4299 	ASSERT(sg_count != 0);
4300 
4301 	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
4302 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
4303 		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4304 		    sata_cmdp->satacmd_num_dma_cookies);
4305 
4306 		return (NV_FAILURE);
4307 	}
4308 
4309 	nv_program_taskfile_regs(nvp, slot);
4310 
4311 	/*
4312 	 * start the drive in motion
4313 	 */
4314 	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4315 
4316 	/*
4317 	 * the drive starts processing the transaction when the cmd register
4318 	 * is written.  This is done here before programming the DMA engine to
4319 	 * parallelize and save some time.  In the event that the drive is ready
4320 	 * before DMA, it will wait.
4321 	 */
4322 #ifdef NCQ
4323 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4324 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4325 		ncq = B_TRUE;
4326 	}
4327 #endif
4328 
4329 	/*
4330 	 * copy the PRD list to PRD table in DMA accessible memory
4331 	 * so that the controller can access it.
4332 	 */
4333 	for (idx = 0; idx < sg_count; idx++, srcp++) {
4334 		uint32_t size;
4335 
4336 		nv_put32(sghdl, dstp++, srcp->dmac_address);
4337 
4338 		/* Set the number of bytes to transfer, 0 implies 64KB */
4339 		size = srcp->dmac_size;
4340 		if (size == 0x10000)
4341 			size = 0;
4342 
4343 		/*
4344 		 * If this is a 40-bit address, copy bits 32-40 of the
4345 		 * physical address to bits 16-24 of the PRD count.
4346 		 */
4347 		if (srcp->dmac_laddress > UINT32_MAX) {
4348 			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4349 		}
4350 
4351 		/*
4352 		 * set the end of table flag for the last entry
4353 		 */
4354 		if (idx == (sg_count - 1)) {
4355 			size |= PRDE_EOT;
4356 		}
4357 
4358 		nv_put32(sghdl, dstp++, size);
4359 	}
4360 
4361 	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4362 	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4363 
4364 	nv_start_dma_engine(nvp, slot);
4365 
4366 #ifdef NCQ
4367 	/*
4368 	 * optimization:  for SWNCQ, start DMA engine if this is the only
4369 	 * command running.  Preliminary NCQ efforts indicated this needs
4370 	 * more debugging.
4371 	 *
4372 	 * if (nvp->nvp_ncq_run <= 1)
4373 	 */
4374 
4375 	if (ncq == B_FALSE) {
4376 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4377 		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4378 		    " cmd = %X", non_ncq_commands++, cmd));
4379 		nv_start_dma_engine(nvp, slot);
4380 	} else {
4381 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program "
4382 		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd));
4383 	}
4384 #endif /* NCQ */
4385 
4386 	return (SATA_TRAN_ACCEPTED);
4387 }
4388 
4389 
4390 /*
4391  * start a PIO data-in ATA command
4392  */
4393 static int
4394 nv_start_pio_in(nv_port_t *nvp, int slot)
4395 {
4396 
4397 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4398 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4399 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4400 
4401 	nv_program_taskfile_regs(nvp, slot);
4402 
4403 	/*
4404 	 * This next one sets the drive in motion
4405 	 */
4406 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4407 
4408 	return (SATA_TRAN_ACCEPTED);
4409 }
4410 
4411 
4412 /*
4413  * start a PIO data-out ATA command
4414  */
4415 static int
4416 nv_start_pio_out(nv_port_t *nvp, int slot)
4417 {
4418 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4419 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4420 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4421 
4422 	nv_program_taskfile_regs(nvp, slot);
4423 
4424 	/*
4425 	 * this next one sets the drive in motion
4426 	 */
4427 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4428 
4429 	/*
4430 	 * wait for the busy bit to settle
4431 	 */
4432 	NV_DELAY_NSEC(400);
4433 
4434 	/*
4435 	 * wait for the drive to assert DRQ to send the first chunk
4436 	 * of data. Have to busy wait because there's no interrupt for
4437 	 * the first chunk. This is bad... uses a lot of cycles if the
4438 	 * drive responds too slowly or if the wait loop granularity
4439 	 * is too large. It's even worse if the drive is defective and
4440 	 * the loop times out.
4441 	 */
4442 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4443 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4444 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4445 	    4000000, 0) == B_FALSE) {
4446 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4447 
4448 		goto error;
4449 	}
4450 
4451 	/*
4452 	 * send the first block.
4453 	 */
4454 	nv_intr_pio_out(nvp, nv_slotp);
4455 
4456 	/*
4457 	 * If nvslot_flags is not set to COMPLETE yet, then processing
4458 	 * is OK so far, so return.  Otherwise, fall into error handling
4459 	 * below.
4460 	 */
4461 	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4462 
4463 		return (SATA_TRAN_ACCEPTED);
4464 	}
4465 
4466 	error:
4467 	/*
4468 	 * there was an error so reset the device and complete the packet.
4469 	 */
4470 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4471 	nv_complete_io(nvp, spkt, 0);
4472 	nvp->nvp_state |= NV_PORT_RESET;
4473 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4474 	nv_reset(nvp);
4475 
4476 	return (SATA_TRAN_PORT_ERROR);
4477 }
4478 
4479 
4480 /*
4481  * start a ATAPI Packet command (PIO data in or out)
4482  */
4483 static int
4484 nv_start_pkt_pio(nv_port_t *nvp, int slot)
4485 {
4486 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4487 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4488 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4489 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4490 
4491 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4492 	    "nv_start_pkt_pio: start"));
4493 
4494 	/*
4495 	 * Write the PACKET command to the command register.  Normally
4496 	 * this would be done through nv_program_taskfile_regs().  It
4497 	 * is done here because some values need to be overridden.
4498 	 */
4499 
4500 	/* select the drive */
4501 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4502 
4503 	/* make certain the drive selected */
4504 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4505 	    NV_SEC2USEC(5), 0) == B_FALSE) {
4506 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4507 		    "nv_start_pkt_pio: drive select failed"));
4508 		return (SATA_TRAN_PORT_ERROR);
4509 	}
4510 
4511 	/*
4512 	 * The command is always sent via PIO, despite whatever the SATA
4513 	 * framework sets in the command.  Overwrite the DMA bit to do this.
4514 	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4515 	 */
4516 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4517 
4518 	/* set appropriately by the sata framework */
4519 	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4520 	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4521 	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4522 	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4523 
4524 	/* initiate the command by writing the command register last */
4525 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4526 
4527 	/* Give the host controller time to do its thing */
4528 	NV_DELAY_NSEC(400);
4529 
4530 	/*
4531 	 * Wait for the device to indicate that it is ready for the command
4532 	 * ATAPI protocol state - HP0: Check_Status_A
4533 	 */
4534 
4535 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4536 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4537 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4538 	    4000000, 0) == B_FALSE) {
4539 		/*
4540 		 * Either an error or device fault occurred or the wait
4541 		 * timed out.  According to the ATAPI protocol, command
4542 		 * completion is also possible.  Other implementations of
4543 		 * this protocol don't handle this last case, so neither
4544 		 * does this code.
4545 		 */
4546 
4547 		if (nv_get8(cmdhdl, nvp->nvp_status) &
4548 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4549 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4550 
4551 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4552 			    "nv_start_pkt_pio: device error (HP0)"));
4553 		} else {
4554 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4555 
4556 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4557 			    "nv_start_pkt_pio: timeout (HP0)"));
4558 		}
4559 
4560 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4561 		nv_complete_io(nvp, spkt, 0);
4562 		nvp->nvp_state |= NV_PORT_RESET;
4563 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4564 		nv_reset(nvp);
4565 
4566 		return (SATA_TRAN_PORT_ERROR);
4567 	}
4568 
4569 	/*
4570 	 * Put the ATAPI command in the data register
4571 	 * ATAPI protocol state - HP1: Send_Packet
4572 	 */
4573 
4574 	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4575 	    (ushort_t *)nvp->nvp_data,
4576 	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4577 
4578 	/*
4579 	 * See you in nv_intr_pkt_pio.
4580 	 * ATAPI protocol state - HP3: INTRQ_wait
4581 	 */
4582 
4583 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4584 	    "nv_start_pkt_pio: exiting into HP3"));
4585 
4586 	return (SATA_TRAN_ACCEPTED);
4587 }
4588 
4589 
4590 /*
4591  * Interrupt processing for a non-data ATA command.
4592  */
4593 static void
4594 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4595 {
4596 	uchar_t status;
4597 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4598 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4599 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4600 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4601 
4602 	NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered"));
4603 
4604 	status = nv_get8(cmdhdl, nvp->nvp_status);
4605 
4606 	/*
4607 	 * check for errors
4608 	 */
4609 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4610 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4611 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4612 		    nvp->nvp_altstatus);
4613 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4614 	} else {
4615 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4616 	}
4617 
4618 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4619 }
4620 
4621 
4622 /*
4623  * ATA command, PIO data in
4624  */
4625 static void
4626 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4627 {
4628 	uchar_t	status;
4629 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4630 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4631 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4632 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4633 	int count;
4634 
4635 	status = nv_get8(cmdhdl, nvp->nvp_status);
4636 
4637 	if (status & SATA_STATUS_BSY) {
4638 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4639 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4640 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4641 		    nvp->nvp_altstatus);
4642 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4643 		nvp->nvp_state |= NV_PORT_RESET;
4644 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4645 		nv_reset(nvp);
4646 
4647 		return;
4648 	}
4649 
4650 	/*
4651 	 * check for errors
4652 	 */
4653 	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4654 	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4655 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4656 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4657 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4658 
4659 		return;
4660 	}
4661 
4662 	/*
4663 	 * read the next chunk of data (if any)
4664 	 */
4665 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4666 
4667 	/*
4668 	 * read count bytes
4669 	 */
4670 	ASSERT(count != 0);
4671 
4672 	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4673 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4674 
4675 	nv_slotp->nvslot_v_addr += count;
4676 	nv_slotp->nvslot_byte_count -= count;
4677 
4678 
4679 	if (nv_slotp->nvslot_byte_count != 0) {
4680 		/*
4681 		 * more to transfer.  Wait for next interrupt.
4682 		 */
4683 		return;
4684 	}
4685 
4686 	/*
4687 	 * transfer is complete. wait for the busy bit to settle.
4688 	 */
4689 	NV_DELAY_NSEC(400);
4690 
4691 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4692 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4693 }
4694 
4695 
4696 /*
4697  * ATA command PIO data out
4698  */
4699 static void
4700 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4701 {
4702 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4703 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4704 	uchar_t status;
4705 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4706 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4707 	int count;
4708 
4709 	/*
4710 	 * clear the IRQ
4711 	 */
4712 	status = nv_get8(cmdhdl, nvp->nvp_status);
4713 
4714 	if (status & SATA_STATUS_BSY) {
4715 		/*
4716 		 * this should not happen
4717 		 */
4718 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4719 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4720 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4721 		    nvp->nvp_altstatus);
4722 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4723 
4724 		return;
4725 	}
4726 
4727 	/*
4728 	 * check for errors
4729 	 */
4730 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4731 		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4732 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4733 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4734 
4735 		return;
4736 	}
4737 
4738 	/*
4739 	 * this is the condition which signals the drive is
4740 	 * no longer ready to transfer.  Likely that the transfer
4741 	 * completed successfully, but check that byte_count is
4742 	 * zero.
4743 	 */
4744 	if ((status & SATA_STATUS_DRQ) == 0) {
4745 
4746 		if (nv_slotp->nvslot_byte_count == 0) {
4747 			/*
4748 			 * complete; successful transfer
4749 			 */
4750 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4751 		} else {
4752 			/*
4753 			 * error condition, incomplete transfer
4754 			 */
4755 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4756 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4757 		}
4758 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4759 
4760 		return;
4761 	}
4762 
4763 	/*
4764 	 * write the next chunk of data
4765 	 */
4766 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4767 
4768 	/*
4769 	 * read or write count bytes
4770 	 */
4771 
4772 	ASSERT(count != 0);
4773 
4774 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4775 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4776 
4777 	nv_slotp->nvslot_v_addr += count;
4778 	nv_slotp->nvslot_byte_count -= count;
4779 }
4780 
4781 
4782 /*
4783  * ATAPI PACKET command, PIO in/out interrupt
4784  *
4785  * Under normal circumstances, one of four different interrupt scenarios
4786  * will result in this function being called:
4787  *
4788  * 1. Packet command data transfer
4789  * 2. Packet command completion
4790  * 3. Request sense data transfer
4791  * 4. Request sense command completion
4792  */
4793 static void
4794 nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4795 {
4796 	uchar_t	status;
4797 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4798 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4799 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4800 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4801 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4802 	uint16_t ctlr_count;
4803 	int count;
4804 
4805 	/* ATAPI protocol state - HP2: Check_Status_B */
4806 
4807 	status = nv_get8(cmdhdl, nvp->nvp_status);
4808 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4809 	    "nv_intr_pkt_pio: status 0x%x", status));
4810 
4811 	if (status & SATA_STATUS_BSY) {
4812 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4813 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4814 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4815 		} else {
4816 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4817 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4818 			nvp->nvp_state |= NV_PORT_RESET;
4819 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
4820 			    NV_PORT_RESET_RETRY);
4821 			nv_reset(nvp);
4822 		}
4823 
4824 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4825 		    "nv_intr_pkt_pio: busy - status 0x%x", status));
4826 
4827 		return;
4828 	}
4829 
4830 	if ((status & SATA_STATUS_DF) != 0) {
4831 		/*
4832 		 * On device fault, just clean up and bail.  Request sense
4833 		 * will just default to its NO SENSE initialized value.
4834 		 */
4835 
4836 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4837 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4838 		}
4839 
4840 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4841 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4842 
4843 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4844 		    nvp->nvp_altstatus);
4845 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4846 		    nvp->nvp_error);
4847 
4848 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4849 		    "nv_intr_pkt_pio: device fault"));
4850 
4851 		return;
4852 	}
4853 
4854 	if ((status & SATA_STATUS_ERR) != 0) {
4855 		/*
4856 		 * On command error, figure out whether we are processing a
4857 		 * request sense.  If so, clean up and bail.  Otherwise,
4858 		 * do a REQUEST SENSE.
4859 		 */
4860 
4861 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4862 			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4863 			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4864 			    NV_FAILURE) {
4865 				nv_copy_registers(nvp, &spkt->satapkt_device,
4866 				    spkt);
4867 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4868 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4869 			}
4870 
4871 			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4872 			    nvp->nvp_altstatus);
4873 			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4874 			    nvp->nvp_error);
4875 		} else {
4876 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4877 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4878 
4879 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4880 		}
4881 
4882 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4883 		    "nv_intr_pkt_pio: error (status 0x%x)", status));
4884 
4885 		return;
4886 	}
4887 
4888 	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4889 		/*
4890 		 * REQUEST SENSE command processing
4891 		 */
4892 
4893 		if ((status & (SATA_STATUS_DRQ)) != 0) {
4894 			/* ATAPI state - HP4: Transfer_Data */
4895 
4896 			/* read the byte count from the controller */
4897 			ctlr_count =
4898 			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4899 			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4900 
4901 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4902 			    "nv_intr_pkt_pio: ctlr byte count - %d",
4903 			    ctlr_count));
4904 
4905 			if (ctlr_count == 0) {
4906 				/* no data to transfer - some devices do this */
4907 
4908 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4909 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4910 
4911 				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4912 				    "nv_intr_pkt_pio: done (no data)"));
4913 
4914 				return;
4915 			}
4916 
4917 			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
4918 
4919 			/* transfer the data */
4920 			ddi_rep_get16(cmdhdl,
4921 			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
4922 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4923 			    DDI_DEV_NO_AUTOINCR);
4924 
4925 			/* consume residual bytes */
4926 			ctlr_count -= count;
4927 
4928 			if (ctlr_count > 0) {
4929 				for (; ctlr_count > 0; ctlr_count -= 2)
4930 					(void) ddi_get16(cmdhdl,
4931 					    (ushort_t *)nvp->nvp_data);
4932 			}
4933 
4934 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4935 			    "nv_intr_pkt_pio: transition to HP2"));
4936 		} else {
4937 			/* still in ATAPI state - HP2 */
4938 
4939 			/*
4940 			 * In order to avoid clobbering the rqsense data
4941 			 * set by the SATA framework, the sense data read
4942 			 * from the device is put in a separate buffer and
4943 			 * copied into the packet after the request sense
4944 			 * command successfully completes.
4945 			 */
4946 			bcopy(nv_slotp->nvslot_rqsense_buff,
4947 			    spkt->satapkt_cmd.satacmd_rqsense,
4948 			    SATA_ATAPI_RQSENSE_LEN);
4949 
4950 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4951 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4952 
4953 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4954 			    "nv_intr_pkt_pio: request sense done"));
4955 		}
4956 
4957 		return;
4958 	}
4959 
4960 	/*
4961 	 * Normal command processing
4962 	 */
4963 
4964 	if ((status & (SATA_STATUS_DRQ)) != 0) {
4965 		/* ATAPI protocol state - HP4: Transfer_Data */
4966 
4967 		/* read the byte count from the controller */
4968 		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4969 		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4970 
4971 		if (ctlr_count == 0) {
4972 			/* no data to transfer - some devices do this */
4973 
4974 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4975 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4976 
4977 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4978 			    "nv_intr_pkt_pio: done (no data)"));
4979 
4980 			return;
4981 		}
4982 
4983 		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
4984 
4985 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4986 		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count));
4987 
4988 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4989 		    "nv_intr_pkt_pio: byte_count 0x%x",
4990 		    nv_slotp->nvslot_byte_count));
4991 
4992 		/* transfer the data */
4993 
4994 		if (direction == SATA_DIR_READ) {
4995 			ddi_rep_get16(cmdhdl,
4996 			    (ushort_t *)nv_slotp->nvslot_v_addr,
4997 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4998 			    DDI_DEV_NO_AUTOINCR);
4999 
5000 			ctlr_count -= count;
5001 
5002 			if (ctlr_count > 0) {
5003 				/* consume remainding bytes */
5004 
5005 				for (; ctlr_count > 0;
5006 				    ctlr_count -= 2)
5007 					(void) ddi_get16(cmdhdl,
5008 					    (ushort_t *)nvp->nvp_data);
5009 
5010 				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5011 				    "nv_intr_pkt_pio: bytes remained"));
5012 			}
5013 		} else {
5014 			ddi_rep_put16(cmdhdl,
5015 			    (ushort_t *)nv_slotp->nvslot_v_addr,
5016 			    (ushort_t *)nvp->nvp_data, (count >> 1),
5017 			    DDI_DEV_NO_AUTOINCR);
5018 		}
5019 
5020 		nv_slotp->nvslot_v_addr += count;
5021 		nv_slotp->nvslot_byte_count -= count;
5022 
5023 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5024 		    "nv_intr_pkt_pio: transition to HP2"));
5025 	} else {
5026 		/* still in ATAPI state - HP2 */
5027 
5028 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
5029 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5030 
5031 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5032 		    "nv_intr_pkt_pio: done"));
5033 	}
5034 }
5035 
5036 
5037 /*
5038  * ATA command, DMA data in/out
5039  */
5040 static void
5041 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
5042 {
5043 	uchar_t status;
5044 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5045 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
5046 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5047 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5048 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
5049 	uchar_t	bmicx;
5050 	uchar_t bm_status;
5051 
5052 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5053 
5054 	/*
5055 	 * stop DMA engine.
5056 	 */
5057 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
5058 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
5059 
5060 	/*
5061 	 * get the status and clear the IRQ, and check for DMA error
5062 	 */
5063 	status = nv_get8(cmdhdl, nvp->nvp_status);
5064 
5065 	/*
5066 	 * check for drive errors
5067 	 */
5068 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
5069 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5070 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5071 		(void) nv_bm_status_clear(nvp);
5072 
5073 		return;
5074 	}
5075 
5076 	bm_status = nv_bm_status_clear(nvp);
5077 
5078 	/*
5079 	 * check for bus master errors
5080 	 */
5081 	if (bm_status & BMISX_IDERR) {
5082 		spkt->satapkt_reason = SATA_PKT_RESET;   /* ? */
5083 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
5084 		    nvp->nvp_altstatus);
5085 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5086 		nvp->nvp_state |= NV_PORT_RESET;
5087 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
5088 		nv_reset(nvp);
5089 
5090 		return;
5091 	}
5092 
5093 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
5094 }
5095 
5096 
5097 /*
5098  * Wait for a register of a controller to achieve a specific state.
5099  * To return normally, all the bits in the first sub-mask must be ON,
5100  * all the bits in the second sub-mask must be OFF.
5101  * If timeout_usec microseconds pass without the controller achieving
5102  * the desired bit configuration, return TRUE, else FALSE.
5103  *
5104  * hybrid waiting algorithm: if not in interrupt context, busy looping will
5105  * occur for the first 250 us, then switch over to a sleeping wait.
5106  *
5107  */
5108 int
5109 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
5110     int type_wait)
5111 {
5112 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5113 	hrtime_t end, cur, start_sleep, start;
5114 	int first_time = B_TRUE;
5115 	ushort_t val;
5116 
5117 	for (;;) {
5118 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5119 
5120 		if ((val & onbits) == onbits && (val & offbits) == 0) {
5121 
5122 			return (B_TRUE);
5123 		}
5124 
5125 		cur = gethrtime();
5126 
5127 		/*
5128 		 * store the start time and calculate the end
5129 		 * time.  also calculate "start_sleep" which is
5130 		 * the point after which the driver will stop busy
5131 		 * waiting and change to sleep waiting.
5132 		 */
5133 		if (first_time) {
5134 			first_time = B_FALSE;
5135 			/*
5136 			 * start and end are in nanoseconds
5137 			 */
5138 			start = cur;
5139 			end = start + timeout_usec * 1000;
5140 			/*
5141 			 * add 1 ms to start
5142 			 */
5143 			start_sleep =  start + 250000;
5144 
5145 			if (servicing_interrupt()) {
5146 				type_wait = NV_NOSLEEP;
5147 			}
5148 		}
5149 
5150 		if (cur > end) {
5151 
5152 			break;
5153 		}
5154 
5155 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5156 #if ! defined(__lock_lint)
5157 			delay(1);
5158 #endif
5159 		} else {
5160 			drv_usecwait(nv_usec_delay);
5161 		}
5162 	}
5163 
5164 	return (B_FALSE);
5165 }
5166 
5167 
5168 /*
5169  * This is a slightly more complicated version that checks
5170  * for error conditions and bails-out rather than looping
5171  * until the timeout is exceeded.
5172  *
5173  * hybrid waiting algorithm: if not in interrupt context, busy looping will
5174  * occur for the first 250 us, then switch over to a sleeping wait.
5175  */
5176 int
5177 nv_wait3(
5178 	nv_port_t	*nvp,
5179 	uchar_t		onbits1,
5180 	uchar_t		offbits1,
5181 	uchar_t		failure_onbits2,
5182 	uchar_t		failure_offbits2,
5183 	uchar_t		failure_onbits3,
5184 	uchar_t		failure_offbits3,
5185 	uint_t		timeout_usec,
5186 	int		type_wait)
5187 {
5188 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5189 	hrtime_t end, cur, start_sleep, start;
5190 	int first_time = B_TRUE;
5191 	ushort_t val;
5192 
5193 	for (;;) {
5194 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5195 
5196 		/*
5197 		 * check for expected condition
5198 		 */
5199 		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
5200 
5201 			return (B_TRUE);
5202 		}
5203 
5204 		/*
5205 		 * check for error conditions
5206 		 */
5207 		if ((val & failure_onbits2) == failure_onbits2 &&
5208 		    (val & failure_offbits2) == 0) {
5209 
5210 			return (B_FALSE);
5211 		}
5212 
5213 		if ((val & failure_onbits3) == failure_onbits3 &&
5214 		    (val & failure_offbits3) == 0) {
5215 
5216 			return (B_FALSE);
5217 		}
5218 
5219 		/*
5220 		 * store the start time and calculate the end
5221 		 * time.  also calculate "start_sleep" which is
5222 		 * the point after which the driver will stop busy
5223 		 * waiting and change to sleep waiting.
5224 		 */
5225 		if (first_time) {
5226 			first_time = B_FALSE;
5227 			/*
5228 			 * start and end are in nanoseconds
5229 			 */
5230 			cur = start = gethrtime();
5231 			end = start + timeout_usec * 1000;
5232 			/*
5233 			 * add 1 ms to start
5234 			 */
5235 			start_sleep =  start + 250000;
5236 
5237 			if (servicing_interrupt()) {
5238 				type_wait = NV_NOSLEEP;
5239 			}
5240 		} else {
5241 			cur = gethrtime();
5242 		}
5243 
5244 		if (cur > end) {
5245 
5246 			break;
5247 		}
5248 
5249 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5250 #if ! defined(__lock_lint)
5251 			delay(1);
5252 #endif
5253 		} else {
5254 			drv_usecwait(nv_usec_delay);
5255 		}
5256 	}
5257 
5258 	return (B_FALSE);
5259 }
5260 
5261 
5262 /*
5263  * nv_port_state_change() reports the state of the port to the
5264  * sata module by calling sata_hba_event_notify().  This
5265  * function is called any time the state of the port is changed
5266  */
5267 static void
5268 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
5269 {
5270 	sata_device_t sd;
5271 
5272 	NVLOG((NVDBG_EVENT, nvp->nvp_ctlp, nvp,
5273 	    "nv_port_state_change: event 0x%x type 0x%x state 0x%x "
5274 	    "time %ld (ticks)", event, addr_type, state, ddi_get_lbolt()));
5275 
5276 	bzero((void *)&sd, sizeof (sata_device_t));
5277 	sd.satadev_rev = SATA_DEVICE_REV;
5278 	nv_copy_registers(nvp, &sd, NULL);
5279 
5280 	/*
5281 	 * When NCQ is implemented sactive and snotific field need to be
5282 	 * updated.
5283 	 */
5284 	sd.satadev_addr.cport = nvp->nvp_port_num;
5285 	sd.satadev_addr.qual = addr_type;
5286 	sd.satadev_state = state;
5287 
5288 	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
5289 }
5290 
5291 
5292 
5293 /*
5294  * Monitor reset progress and signature gathering.
5295  * This function may loop, so it should not be called from interrupt
5296  * context.
5297  *
5298  * Entered with nvp mutex held.
5299  */
5300 static void
5301 nv_monitor_reset(nv_port_t *nvp)
5302 {
5303 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5304 	uint32_t sstatus;
5305 	int send_notification = B_FALSE;
5306 	uint8_t dev_type;
5307 
5308 	sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5309 
5310 	/*
5311 	 * We do not know here the reason for port reset.
5312 	 * Check the link status. The link needs to be active before
5313 	 * we can check the link's status.
5314 	 */
5315 	if ((SSTATUS_GET_IPM(sstatus) != SSTATUS_IPM_ACTIVE) ||
5316 	    (SSTATUS_GET_DET(sstatus) != SSTATUS_DET_DEVPRE_PHYCOM)) {
5317 		/*
5318 		 * Either link is not active or there is no device
5319 		 * If the link remains down for more than NV_LINK_DOWN_TIMEOUT
5320 		 * (milliseconds), abort signature acquisition and complete
5321 		 * reset processing.
5322 		 * The link will go down when COMRESET is sent by nv_reset(),
5323 		 * so it is practically nvp_reset_time milliseconds.
5324 		 */
5325 
5326 		if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5327 		    NV_LINK_DOWN_TIMEOUT) {
5328 			NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5329 			    "nv_monitor_reset: no link - ending signature "
5330 			    "acquisition; time after reset %ldms",
5331 			    TICK_TO_MSEC(ddi_get_lbolt() -
5332 			    nvp->nvp_reset_time)));
5333 		}
5334 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
5335 		    NV_PORT_PROBE | NV_PORT_HOTPLUG_DELAY);
5336 		/*
5337 		 * Else, if the link was lost (i.e. was present before)
5338 		 * the controller should generate a 'remove' interrupt
5339 		 * that will cause the appropriate event notification.
5340 		 */
5341 		return;
5342 	}
5343 
5344 	NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5345 	    "nv_monitor_reset: link up after reset; time %ldms",
5346 	    TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time)));
5347 
5348 sig_read:
5349 	if (nvp->nvp_signature != 0) {
5350 		/*
5351 		 * The link is up. The signature was acquired before (device
5352 		 * was present).
5353 		 * But we may need to wait for the signature (D2H FIS) before
5354 		 * accessing the drive.
5355 		 */
5356 		if (nv_wait_for_signature != 0) {
5357 			uint32_t old_signature;
5358 			uint8_t old_type;
5359 
5360 			old_signature = nvp->nvp_signature;
5361 			old_type = nvp->nvp_type;
5362 			nvp->nvp_signature = 0;
5363 			nv_read_signature(nvp);
5364 			if (nvp->nvp_signature == 0) {
5365 				nvp->nvp_signature = old_signature;
5366 				nvp->nvp_type = old_type;
5367 
5368 #ifdef NV_DEBUG
5369 				/* FOR DEBUGGING */
5370 				if (nv_wait_here_forever) {
5371 					drv_usecwait(1000);
5372 					goto sig_read;
5373 				}
5374 #endif
5375 				/*
5376 				 * Wait, but not endlessly.
5377 				 */
5378 				if (TICK_TO_MSEC(ddi_get_lbolt() -
5379 				    nvp->nvp_reset_time) <
5380 				    nv_sig_acquisition_time) {
5381 					drv_usecwait(1000);
5382 					goto sig_read;
5383 				} else if (!(nvp->nvp_state &
5384 				    NV_PORT_RESET_RETRY)) {
5385 					/*
5386 					 * Retry reset.
5387 					 */
5388 					NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5389 					    "nv_monitor_reset: retrying reset "
5390 					    "time after first reset: %ldms",
5391 					    TICK_TO_MSEC(ddi_get_lbolt() -
5392 					    nvp->nvp_reset_time)));
5393 					nvp->nvp_state |= NV_PORT_RESET_RETRY;
5394 					nv_reset(nvp);
5395 					goto sig_read;
5396 				}
5397 
5398 				NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5399 				    "nv_monitor_reset: terminating signature "
5400 				    "acquisition (1); time after reset: %ldms",
5401 				    TICK_TO_MSEC(ddi_get_lbolt() -
5402 				    nvp->nvp_reset_time)));
5403 			} else {
5404 				NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5405 				    "nv_monitor_reset: signature acquired; "
5406 				    "time after reset: %ldms",
5407 				    TICK_TO_MSEC(ddi_get_lbolt() -
5408 				    nvp->nvp_reset_time)));
5409 			}
5410 		}
5411 		/*
5412 		 * Clear reset state, set device reset recovery state
5413 		 */
5414 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
5415 		    NV_PORT_PROBE);
5416 		nvp->nvp_state |= NV_PORT_RESTORE;
5417 
5418 		/*
5419 		 * Need to send reset event notification
5420 		 */
5421 		send_notification = B_TRUE;
5422 	} else {
5423 		/*
5424 		 * The link is up. The signature was not acquired before.
5425 		 * We can try to fetch a device signature.
5426 		 */
5427 		dev_type = nvp->nvp_type;
5428 
5429 acquire_signature:
5430 		nv_read_signature(nvp);
5431 		if (nvp->nvp_signature != 0) {
5432 			/*
5433 			 * Got device signature.
5434 			 */
5435 			NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5436 			    "nv_monitor_reset: signature acquired; "
5437 			    "time after reset: %ldms",
5438 			    TICK_TO_MSEC(ddi_get_lbolt() -
5439 			    nvp->nvp_reset_time)));
5440 
5441 			/* Clear internal reset state */
5442 			nvp->nvp_state &=
5443 			    ~(NV_PORT_RESET | NV_PORT_RESET_RETRY);
5444 
5445 			if (dev_type != SATA_DTYPE_NONE) {
5446 				/*
5447 				 * We acquired the signature for a
5448 				 * pre-existing device that was not identified
5449 				 * before and and was reset.
5450 				 * Need to enter the device reset recovery
5451 				 * state and to send the reset notification.
5452 				 */
5453 				nvp->nvp_state |= NV_PORT_RESTORE;
5454 				send_notification = B_TRUE;
5455 			} else {
5456 				/*
5457 				 * Else, We acquired the signature because a new
5458 				 * device was attached (the driver attach or
5459 				 * a hot-plugged device). There is no need to
5460 				 * enter the device reset recovery state or to
5461 				 * send the reset notification, but we may need
5462 				 * to send a device attached notification.
5463 				 */
5464 				if (nvp->nvp_state & NV_PORT_PROBE) {
5465 					nv_port_state_change(nvp,
5466 					    SATA_EVNT_DEVICE_ATTACHED,
5467 					    SATA_ADDR_CPORT, 0);
5468 					nvp->nvp_state &= ~NV_PORT_PROBE;
5469 				}
5470 			}
5471 		} else {
5472 			if (TICK_TO_MSEC(ddi_get_lbolt() -
5473 			    nvp->nvp_reset_time) < nv_sig_acquisition_time) {
5474 				drv_usecwait(1000);
5475 				goto acquire_signature;
5476 			} else if (!(nvp->nvp_state & NV_PORT_RESET_RETRY)) {
5477 				/*
5478 				 * Some drives may require additional
5479 				 * reset(s) to get a valid signature
5480 				 * (indicating that the drive is ready).
5481 				 * If a drive was not just powered
5482 				 * up, the signature should be available
5483 				 * within few hundred milliseconds
5484 				 * after reset.  Therefore, if more than
5485 				 * NV_SIG_ACQUISITION_TIME has elapsed
5486 				 * while waiting for a signature, reset
5487 				 * device again.
5488 				 */
5489 				NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5490 				    "nv_monitor_reset: retrying reset "
5491 				    "time after first reset: %ldms",
5492 				    TICK_TO_MSEC(ddi_get_lbolt() -
5493 				    nvp->nvp_reset_time)));
5494 				nvp->nvp_state |= NV_PORT_RESET_RETRY;
5495 				nv_reset(nvp);
5496 				drv_usecwait(1000);
5497 				goto acquire_signature;
5498 			}
5499 			/*
5500 			 * Terminating signature acquisition.
5501 			 * Hopefully, the drive is ready.
5502 			 * The SATA module can deal with this as long as it
5503 			 * knows that some device is attached and a device
5504 			 * responds to commands.
5505 			 */
5506 			if (!(nvp->nvp_state & NV_PORT_PROBE)) {
5507 				send_notification = B_TRUE;
5508 			}
5509 			nvp->nvp_state &= ~(NV_PORT_RESET |
5510 			    NV_PORT_RESET_RETRY);
5511 			nvp->nvp_type = SATA_DTYPE_UNKNOWN;
5512 			if (nvp->nvp_state & NV_PORT_PROBE) {
5513 				nv_port_state_change(nvp,
5514 				    SATA_EVNT_DEVICE_ATTACHED,
5515 				    SATA_ADDR_CPORT, 0);
5516 				nvp->nvp_state &= ~NV_PORT_PROBE;
5517 			}
5518 			nvp->nvp_type = dev_type;
5519 			NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5520 			    "nv_monitor_reset: terminating signature "
5521 			    "acquisition (2); time after reset: %ldms",
5522 			    TICK_TO_MSEC(ddi_get_lbolt() -
5523 			    nvp->nvp_reset_time)));
5524 		}
5525 	}
5526 
5527 	if (send_notification) {
5528 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_RESET,
5529 		    SATA_ADDR_DCPORT,
5530 		    SATA_DSTATE_RESET | SATA_DSTATE_PWR_ACTIVE);
5531 	}
5532 
5533 #ifdef SGPIO_SUPPORT
5534 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5535 		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5536 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5537 	} else {
5538 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5539 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5540 	}
5541 #endif
5542 }
5543 
5544 
5545 /*
5546  * Send a hotplug (add device) notification at the appropriate time after
5547  * hotplug detection.
5548  * Relies on nvp_reset_time set at a hotplug detection time.
5549  * Called only from nv_timeout when NV_PORT_HOTPLUG_DELAY flag is set in
5550  * the nvp_state.
5551  */
5552 static void
5553 nv_delay_hotplug_notification(nv_port_t *nvp)
5554 {
5555 
5556 	if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5557 	    nv_hotplug_delay) {
5558 		NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5559 		    "nv_delay_hotplug_notification: notifying framework after "
5560 		    "%dms delay", TICK_TO_MSEC(ddi_get_lbolt() -
5561 		    nvp->nvp_reset_time)));
5562 		nvp->nvp_state &= ~NV_PORT_HOTPLUG_DELAY;
5563 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
5564 		    SATA_ADDR_CPORT, 0);
5565 	}
5566 }
5567 
5568 /*
5569  * timeout processing:
5570  *
5571  * Check if any packets have crossed a timeout threshold.  If so,
5572  * abort the packet.  This function is not NCQ-aware.
5573  *
5574  * If reset was invoked, call reset monitoring function.
5575  *
5576  * Timeout frequency may be lower for checking packet timeout (1s)
5577  * and higher for reset monitoring (1ms)
5578  *
5579  */
5580 static void
5581 nv_timeout(void *arg)
5582 {
5583 	nv_port_t *nvp = arg;
5584 	nv_slot_t *nv_slotp;
5585 	int next_timeout = NV_ONE_SEC;	/* Default */
5586 	uint16_t int_status;
5587 	uint8_t status, bmstatus;
5588 	static int intr_warn_once = 0;
5589 
5590 	ASSERT(nvp != NULL);
5591 
5592 	mutex_enter(&nvp->nvp_mutex);
5593 	nvp->nvp_timeout_id = 0;
5594 
5595 	/*
5596 	 * If the port is not in the init state, ignore it.
5597 	 */
5598 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
5599 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5600 		    "nv_timeout: port uninitialized"));
5601 		next_timeout = 0;
5602 
5603 		goto finished;
5604 	}
5605 
5606 	if (nvp->nvp_state & (NV_PORT_RESET | NV_PORT_RESET_RETRY)) {
5607 		nv_monitor_reset(nvp);
5608 		next_timeout = NV_ONE_MSEC;	/* at least 1ms */
5609 
5610 		goto finished;
5611 	}
5612 
5613 	if ((nvp->nvp_state & NV_PORT_HOTPLUG_DELAY) != 0) {
5614 		nv_delay_hotplug_notification(nvp);
5615 		next_timeout = NV_ONE_MSEC;	/* at least 1ms */
5616 
5617 		goto finished;
5618 	}
5619 
5620 	/*
5621 	 * Not yet NCQ-aware - there is only one command active.
5622 	 */
5623 	nv_slotp = &(nvp->nvp_slot[0]);
5624 
5625 	/*
5626 	 * perform timeout checking and processing only if there is an
5627 	 * active packet on the port
5628 	 */
5629 	if (nv_slotp != NULL && nv_slotp->nvslot_spkt != NULL)  {
5630 		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5631 		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5632 		uint8_t cmd = satacmd->satacmd_cmd_reg;
5633 		uint64_t lba;
5634 
5635 #if ! defined(__lock_lint) && defined(DEBUG)
5636 
5637 		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5638 		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5639 		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5640 		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5641 		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5642 		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5643 #endif
5644 
5645 		/*
5646 		 * timeout not needed if there is a polling thread
5647 		 */
5648 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5649 			next_timeout = 0;
5650 
5651 			goto finished;
5652 		}
5653 
5654 		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5655 		    spkt->satapkt_time) {
5656 
5657 			uint32_t serr = nv_get32(nvp->nvp_ctlp->nvc_bar_hdl[5],
5658 			    nvp->nvp_serror);
5659 
5660 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5661 			    "nv_timeout: aborting: "
5662 			    "nvslot_stime: %ld max ticks till timeout: "
5663 			    "%ld cur_time: %ld cmd=%x lba=%d",
5664 			    nv_slotp->nvslot_stime,
5665 			    drv_usectohz(MICROSEC *
5666 			    spkt->satapkt_time), ddi_get_lbolt(),
5667 			    cmd, lba));
5668 
5669 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5670 			    "nv_timeout: SError at timeout: 0x%x", serr));
5671 
5672 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5673 			    "nv_timeout: previous cmd=%x",
5674 			    nvp->nvp_previous_cmd));
5675 
5676 			if (nvp->nvp_mcp5x_int_status != NULL) {
5677 				status = nv_get8(nvp->nvp_ctl_hdl,
5678 				    nvp->nvp_altstatus);
5679 				bmstatus = nv_get8(nvp->nvp_bm_hdl,
5680 				    nvp->nvp_bmisx);
5681 				int_status = nv_get16(
5682 				    nvp->nvp_ctlp->nvc_bar_hdl[5],
5683 				    nvp->nvp_mcp5x_int_status);
5684 				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5685 				    "nv_timeout: altstatus %x, bmicx %x, "
5686 				    "int_status %X", status, bmstatus,
5687 				    int_status));
5688 
5689 				if (int_status & MCP5X_INT_COMPLETE) {
5690 					/*
5691 					 * Completion interrupt was missed!
5692 					 * Issue warning message once
5693 					 */
5694 					if (!intr_warn_once) {
5695 						cmn_err(CE_WARN,
5696 						    "nv_sata: missing command "
5697 						    "completion interrupt(s)!");
5698 						intr_warn_once = 1;
5699 					}
5700 					NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp,
5701 					    nvp, "timeout detected with "
5702 					    "interrupt ready - calling "
5703 					    "int directly"));
5704 					mutex_exit(&nvp->nvp_mutex);
5705 					(void) mcp5x_intr_port(nvp);
5706 					mutex_enter(&nvp->nvp_mutex);
5707 				} else {
5708 					/*
5709 					 * True timeout and not a missing
5710 					 * interrupt.
5711 					 */
5712 					(void) nv_abort_active(nvp, spkt,
5713 					    SATA_PKT_TIMEOUT, B_TRUE);
5714 				}
5715 			} else {
5716 				(void) nv_abort_active(nvp, spkt,
5717 				    SATA_PKT_TIMEOUT, B_TRUE);
5718 			}
5719 
5720 		} else {
5721 #ifdef NV_DEBUG
5722 			if (nv_debug_flags & NVDBG_VERBOSE) {
5723 				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5724 				    "nv_timeout:"
5725 				    " still in use so restarting timeout"));
5726 			}
5727 #endif
5728 			next_timeout = NV_ONE_SEC;
5729 		}
5730 	} else {
5731 		/*
5732 		 * there was no active packet, so do not re-enable timeout
5733 		 */
5734 		next_timeout = 0;
5735 #ifdef NV_DEBUG
5736 		if (nv_debug_flags & NVDBG_VERBOSE) {
5737 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5738 			    "nv_timeout: no active packet so not re-arming "
5739 			    "timeout"));
5740 		}
5741 #endif
5742 	}
5743 
5744 finished:
5745 	if (next_timeout != 0) {
5746 		nv_setup_timeout(nvp, next_timeout);
5747 	}
5748 	mutex_exit(&nvp->nvp_mutex);
5749 }
5750 
5751 
5752 /*
5753  * enable or disable the 3 interrupt types the driver is
5754  * interested in: completion, add and remove.
5755  */
5756 static void
5757 ck804_set_intr(nv_port_t *nvp, int flag)
5758 {
5759 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5760 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5761 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5762 	uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5763 	    CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5764 	uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5765 	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5766 
5767 	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5768 		int_en = nv_get8(bar5_hdl,
5769 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5770 		int_en &= ~intr_bits[port];
5771 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5772 		    int_en);
5773 		return;
5774 	}
5775 
5776 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5777 
5778 	/*
5779 	 * controller level lock also required since access to an 8-bit
5780 	 * interrupt register is shared between both channels.
5781 	 */
5782 	mutex_enter(&nvc->nvc_mutex);
5783 
5784 	if (flag & NV_INTR_CLEAR_ALL) {
5785 		NVLOG((NVDBG_INTR, nvc, nvp,
5786 		    "ck804_set_intr: NV_INTR_CLEAR_ALL"));
5787 
5788 		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5789 		    (uint8_t *)(nvc->nvc_ck804_int_status));
5790 
5791 		if (intr_status & clear_all_bits[port]) {
5792 
5793 			nv_put8(nvc->nvc_bar_hdl[5],
5794 			    (uint8_t *)(nvc->nvc_ck804_int_status),
5795 			    clear_all_bits[port]);
5796 
5797 			NVLOG((NVDBG_INTR, nvc, nvp,
5798 			    "interrupt bits cleared %x",
5799 			    intr_status & clear_all_bits[port]));
5800 		}
5801 	}
5802 
5803 	if (flag & NV_INTR_DISABLE) {
5804 		NVLOG((NVDBG_INTR, nvc, nvp,
5805 		    "ck804_set_intr: NV_INTR_DISABLE"));
5806 		int_en = nv_get8(bar5_hdl,
5807 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5808 		int_en &= ~intr_bits[port];
5809 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5810 		    int_en);
5811 	}
5812 
5813 	if (flag & NV_INTR_ENABLE) {
5814 		NVLOG((NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE"));
5815 		int_en = nv_get8(bar5_hdl,
5816 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5817 		int_en |= intr_bits[port];
5818 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5819 		    int_en);
5820 	}
5821 
5822 	mutex_exit(&nvc->nvc_mutex);
5823 }
5824 
5825 
5826 /*
5827  * enable or disable the 3 interrupts the driver is interested in:
5828  * completion interrupt, hot add, and hot remove interrupt.
5829  */
5830 static void
5831 mcp5x_set_intr(nv_port_t *nvp, int flag)
5832 {
5833 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5834 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5835 	uint16_t intr_bits =
5836 	    MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5837 	uint16_t int_en;
5838 
5839 	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5840 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5841 		int_en &= ~intr_bits;
5842 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5843 		return;
5844 	}
5845 
5846 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5847 
5848 	NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag));
5849 
5850 	if (flag & NV_INTR_CLEAR_ALL) {
5851 		NVLOG((NVDBG_INTR, nvc, nvp,
5852 		    "mcp5x_set_intr: NV_INTR_CLEAR_ALL"));
5853 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
5854 	}
5855 
5856 	if (flag & NV_INTR_ENABLE) {
5857 		NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE"));
5858 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5859 		int_en |= intr_bits;
5860 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5861 	}
5862 
5863 	if (flag & NV_INTR_DISABLE) {
5864 		NVLOG((NVDBG_INTR, nvc, nvp,
5865 		    "mcp5x_set_intr: NV_INTR_DISABLE"));
5866 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5867 		int_en &= ~intr_bits;
5868 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5869 	}
5870 }
5871 
5872 
5873 static void
5874 nv_resume(nv_port_t *nvp)
5875 {
5876 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()"));
5877 
5878 	mutex_enter(&nvp->nvp_mutex);
5879 
5880 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5881 		mutex_exit(&nvp->nvp_mutex);
5882 
5883 		return;
5884 	}
5885 
5886 	/* Enable interrupt */
5887 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5888 
5889 	/*
5890 	 * Power may have been removed to the port and the
5891 	 * drive, and/or a drive may have been added or removed.
5892 	 * Force a reset which will cause a probe and re-establish
5893 	 * any state needed on the drive.
5894 	 */
5895 	nvp->nvp_state |= NV_PORT_RESET;
5896 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
5897 	nv_reset(nvp);
5898 
5899 	mutex_exit(&nvp->nvp_mutex);
5900 }
5901 
5902 
5903 static void
5904 nv_suspend(nv_port_t *nvp)
5905 {
5906 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()"));
5907 
5908 	mutex_enter(&nvp->nvp_mutex);
5909 
5910 #ifdef SGPIO_SUPPORT
5911 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5912 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5913 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5914 	}
5915 #endif
5916 
5917 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5918 		mutex_exit(&nvp->nvp_mutex);
5919 
5920 		return;
5921 	}
5922 
5923 	/*
5924 	 * Stop the timeout handler.
5925 	 * (It will be restarted in nv_reset() during nv_resume().)
5926 	 */
5927 	if (nvp->nvp_timeout_id) {
5928 		(void) untimeout(nvp->nvp_timeout_id);
5929 		nvp->nvp_timeout_id = 0;
5930 	}
5931 
5932 	/* Disable interrupt */
5933 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
5934 	    NV_INTR_CLEAR_ALL|NV_INTR_DISABLE);
5935 
5936 	mutex_exit(&nvp->nvp_mutex);
5937 }
5938 
5939 
5940 static void
5941 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
5942 {
5943 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5944 	sata_cmd_t *scmd = &spkt->satapkt_cmd;
5945 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5946 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5947 	uchar_t status;
5948 	struct sata_cmd_flags flags;
5949 
5950 	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5951 	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
5952 	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5953 
5954 	if (spkt == NULL) {
5955 
5956 		return;
5957 	}
5958 
5959 	/*
5960 	 * in the error case, implicitly set the return of regs needed
5961 	 * for error handling.
5962 	 */
5963 	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
5964 	    nvp->nvp_altstatus);
5965 
5966 	flags = scmd->satacmd_flags;
5967 
5968 	if (status & SATA_STATUS_ERR) {
5969 		flags.sata_copy_out_lba_low_msb = B_TRUE;
5970 		flags.sata_copy_out_lba_mid_msb = B_TRUE;
5971 		flags.sata_copy_out_lba_high_msb = B_TRUE;
5972 		flags.sata_copy_out_lba_low_lsb = B_TRUE;
5973 		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
5974 		flags.sata_copy_out_lba_high_lsb = B_TRUE;
5975 		flags.sata_copy_out_error_reg = B_TRUE;
5976 		flags.sata_copy_out_sec_count_msb = B_TRUE;
5977 		flags.sata_copy_out_sec_count_lsb = B_TRUE;
5978 		scmd->satacmd_status_reg = status;
5979 	}
5980 
5981 	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
5982 
5983 		/*
5984 		 * set HOB so that high byte will be read
5985 		 */
5986 		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
5987 
5988 		/*
5989 		 * get the requested high bytes
5990 		 */
5991 		if (flags.sata_copy_out_sec_count_msb) {
5992 			scmd->satacmd_sec_count_msb =
5993 			    nv_get8(cmdhdl, nvp->nvp_count);
5994 		}
5995 
5996 		if (flags.sata_copy_out_lba_low_msb) {
5997 			scmd->satacmd_lba_low_msb =
5998 			    nv_get8(cmdhdl, nvp->nvp_sect);
5999 		}
6000 
6001 		if (flags.sata_copy_out_lba_mid_msb) {
6002 			scmd->satacmd_lba_mid_msb =
6003 			    nv_get8(cmdhdl, nvp->nvp_lcyl);
6004 		}
6005 
6006 		if (flags.sata_copy_out_lba_high_msb) {
6007 			scmd->satacmd_lba_high_msb =
6008 			    nv_get8(cmdhdl, nvp->nvp_hcyl);
6009 		}
6010 	}
6011 
6012 	/*
6013 	 * disable HOB so that low byte is read
6014 	 */
6015 	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
6016 
6017 	/*
6018 	 * get the requested low bytes
6019 	 */
6020 	if (flags.sata_copy_out_sec_count_lsb) {
6021 		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
6022 	}
6023 
6024 	if (flags.sata_copy_out_lba_low_lsb) {
6025 		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
6026 	}
6027 
6028 	if (flags.sata_copy_out_lba_mid_lsb) {
6029 		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
6030 	}
6031 
6032 	if (flags.sata_copy_out_lba_high_lsb) {
6033 		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
6034 	}
6035 
6036 	/*
6037 	 * get the device register if requested
6038 	 */
6039 	if (flags.sata_copy_out_device_reg) {
6040 		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
6041 	}
6042 
6043 	/*
6044 	 * get the error register if requested
6045 	 */
6046 	if (flags.sata_copy_out_error_reg) {
6047 		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
6048 	}
6049 }
6050 
6051 
6052 /*
6053  * Hot plug and remove interrupts can occur when the device is reset.  Just
6054  * masking the interrupt doesn't always work well because if a
6055  * different interrupt arrives on the other port, the driver can still
6056  * end up checking the state of the other port and discover the hot
6057  * interrupt flag is set even though it was masked.  Checking for recent
6058  * reset activity and then ignoring turns out to be the easiest way.
6059  *
6060  * Entered with nvp mutex held.
6061  */
6062 static void
6063 nv_report_add_remove(nv_port_t *nvp, int flags)
6064 {
6065 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6066 	uint32_t sstatus;
6067 	int i;
6068 	clock_t nv_lbolt = ddi_get_lbolt();
6069 
6070 
6071 	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove() - "
6072 	    "time (ticks) %d", nv_lbolt));
6073 
6074 	/*
6075 	 * wait up to 1ms for sstatus to settle and reflect the true
6076 	 * status of the port.  Failure to do so can create confusion
6077 	 * in probe, where the incorrect sstatus value can still
6078 	 * persist.
6079 	 */
6080 	for (i = 0; i < 1000; i++) {
6081 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
6082 
6083 		if ((flags == NV_PORT_HOTREMOVED) &&
6084 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
6085 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
6086 			break;
6087 		}
6088 
6089 		if ((flags != NV_PORT_HOTREMOVED) &&
6090 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
6091 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
6092 			break;
6093 		}
6094 		drv_usecwait(1);
6095 	}
6096 
6097 	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6098 	    "sstatus took %d us for DEVPRE_PHYCOM to settle", i));
6099 
6100 	if (flags == NV_PORT_HOTREMOVED) {
6101 
6102 		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
6103 		    B_FALSE);
6104 
6105 		/*
6106 		 * No device, no point of bothering with device reset
6107 		 */
6108 		nvp->nvp_type = SATA_DTYPE_NONE;
6109 		nvp->nvp_signature = 0;
6110 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
6111 		    NV_PORT_RESTORE);
6112 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6113 		    "nv_report_add_remove() hot removed"));
6114 		nv_port_state_change(nvp,
6115 		    SATA_EVNT_DEVICE_DETACHED,
6116 		    SATA_ADDR_CPORT, 0);
6117 
6118 #ifdef SGPIO_SUPPORT
6119 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
6120 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
6121 #endif
6122 	} else {
6123 		/*
6124 		 * This is a hot plug or link up indication
6125 		 * Now, re-check the link state - no link, no device
6126 		 */
6127 		if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
6128 		    (SSTATUS_GET_DET(sstatus) == SSTATUS_DET_DEVPRE_PHYCOM)) {
6129 
6130 			if (nvp->nvp_type == SATA_DTYPE_NONE) {
6131 				/*
6132 				 * Real device attach - there was no device
6133 				 * attached to this port before this report
6134 				 */
6135 				NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6136 				    "nv_report_add_remove() new device hot"
6137 				    "plugged"));
6138 				nvp->nvp_reset_time = ddi_get_lbolt();
6139 				if (!(nvp->nvp_state &
6140 				    (NV_PORT_RESET_RETRY | NV_PORT_RESET))) {
6141 
6142 					nvp->nvp_signature = 0;
6143 					if (nv_reset_after_hotplug != 0) {
6144 
6145 						/*
6146 						 * Send reset to obtain a device
6147 						 * signature
6148 						 */
6149 						nvp->nvp_state |=
6150 						    NV_PORT_RESET |
6151 						    NV_PORT_PROBE;
6152 						nv_reset(nvp);
6153 						NVLOG((NVDBG_HOT,
6154 						    nvp->nvp_ctlp, nvp,
6155 						    "nv_report_add_remove() "
6156 						    "resetting device"));
6157 					} else {
6158 						nvp->nvp_type =
6159 						    SATA_DTYPE_UNKNOWN;
6160 					}
6161 				}
6162 
6163 				if (!(nvp->nvp_state & NV_PORT_PROBE)) {
6164 					if (nv_reset_after_hotplug == 0) {
6165 						/*
6166 						 * In case a hotplug interrupt
6167 						 * is generated right after a
6168 						 * link is up, delay reporting
6169 						 * a hotplug event to let the
6170 						 * drive to initialize and to
6171 						 * send a D2H FIS with a
6172 						 * signature.
6173 						 * The timeout will issue an
6174 						 * event notification after
6175 						 * the NV_HOTPLUG_DELAY
6176 						 * milliseconds delay.
6177 						 */
6178 						nvp->nvp_state |=
6179 						    NV_PORT_HOTPLUG_DELAY;
6180 						nvp->nvp_type =
6181 						    SATA_DTYPE_UNKNOWN;
6182 						/*
6183 						 * Make sure timer is running.
6184 						 */
6185 						nv_setup_timeout(nvp,
6186 						    NV_ONE_MSEC);
6187 					} else {
6188 						nv_port_state_change(nvp,
6189 						    SATA_EVNT_DEVICE_ATTACHED,
6190 						    SATA_ADDR_CPORT, 0);
6191 					}
6192 				}
6193 				return;
6194 			}
6195 			/*
6196 			 * Othervise it is a bogus attach, indicating recovered
6197 			 * link loss. No real need to report it after-the-fact.
6198 			 * But we may keep some statistics, or notify the
6199 			 * sata module by reporting LINK_LOST/LINK_ESTABLISHED
6200 			 * events to keep track of such occurrences.
6201 			 * Anyhow, we may want to terminate signature
6202 			 * acquisition.
6203 			 */
6204 			NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6205 			    "nv_report_add_remove() ignoring plug interrupt "
6206 			    "- recovered link?"));
6207 
6208 			if (nvp->nvp_state &
6209 			    (NV_PORT_RESET_RETRY | NV_PORT_RESET)) {
6210 				NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6211 				    "nv_report_add_remove() - "
6212 				    "time since last reset %dms",
6213 				    TICK_TO_MSEC(ddi_get_lbolt() -
6214 				    nvp->nvp_reset_time)));
6215 				/*
6216 				 * If the driver does not have to wait for
6217 				 * a signature, then terminate reset processing
6218 				 * now.
6219 				 */
6220 				if (nv_wait_for_signature == 0) {
6221 					NVLOG((NVDBG_RESET, nvp->nvp_ctlp,
6222 					    nvp, "nv_report_add_remove() - ",
6223 					    "terminating signature acquisition",
6224 					    ", time after reset: %dms",
6225 					    TICK_TO_MSEC(ddi_get_lbolt() -
6226 					    nvp->nvp_reset_time)));
6227 
6228 					nvp->nvp_state &= ~(NV_PORT_RESET |
6229 					    NV_PORT_RESET_RETRY);
6230 
6231 					if (!(nvp->nvp_state & NV_PORT_PROBE)) {
6232 						nvp->nvp_state |=
6233 						    NV_PORT_RESTORE;
6234 						nvp->nvp_state &=
6235 						    ~NV_PORT_PROBE;
6236 
6237 						/*
6238 						 * It is not the initial device
6239 						 * probing, so notify sata
6240 						 * module that device was
6241 						 * reset
6242 						 */
6243 						nv_port_state_change(nvp,
6244 						    SATA_EVNT_DEVICE_RESET,
6245 						    SATA_ADDR_DCPORT,
6246 						    SATA_DSTATE_RESET |
6247 						    SATA_DSTATE_PWR_ACTIVE);
6248 					}
6249 
6250 				}
6251 			}
6252 			return;
6253 		}
6254 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
6255 		    "ignoring add dev interrupt - "
6256 		    "link is down or no device!"));
6257 	}
6258 
6259 }
6260 
6261 /*
6262  * Get request sense data and stuff it the command's sense buffer.
6263  * Start a request sense command in order to get sense data to insert
6264  * in the sata packet's rqsense buffer.  The command completion
6265  * processing is in nv_intr_pkt_pio.
6266  *
6267  * The sata framework provides a function to allocate and set-up a
6268  * request sense packet command. The reasons it is not being used here is:
6269  * a) it cannot be called in an interrupt context and this function is
6270  *    called in an interrupt context.
6271  * b) it allocates DMA resources that are not used here because this is
6272  *    implemented using PIO.
6273  *
6274  * If, in the future, this is changed to use DMA, the sata framework should
6275  * be used to allocate and set-up the error retrieval (request sense)
6276  * command.
6277  */
6278 static int
6279 nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
6280 {
6281 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
6282 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
6283 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6284 	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
6285 
6286 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6287 	    "nv_start_rqsense_pio: start"));
6288 
6289 	/* clear the local request sense buffer before starting the command */
6290 	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
6291 
6292 	/* Write the request sense PACKET command */
6293 
6294 	/* select the drive */
6295 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
6296 
6297 	/* make certain the drive selected */
6298 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
6299 	    NV_SEC2USEC(5), 0) == B_FALSE) {
6300 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6301 		    "nv_start_rqsense_pio: drive select failed"));
6302 		return (NV_FAILURE);
6303 	}
6304 
6305 	/* set up the command */
6306 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
6307 	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
6308 	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
6309 	nv_put8(cmdhdl, nvp->nvp_sect, 0);
6310 	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
6311 
6312 	/* initiate the command by writing the command register last */
6313 	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
6314 
6315 	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
6316 	NV_DELAY_NSEC(400);
6317 
6318 	/*
6319 	 * Wait for the device to indicate that it is ready for the command
6320 	 * ATAPI protocol state - HP0: Check_Status_A
6321 	 */
6322 
6323 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
6324 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
6325 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
6326 	    4000000, 0) == B_FALSE) {
6327 		if (nv_get8(cmdhdl, nvp->nvp_status) &
6328 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
6329 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6330 			    "nv_start_rqsense_pio: rqsense dev error (HP0)"));
6331 		} else {
6332 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6333 			    "nv_start_rqsense_pio: rqsense timeout (HP0)"));
6334 		}
6335 
6336 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
6337 		nv_complete_io(nvp, spkt, 0);
6338 		nvp->nvp_state |= NV_PORT_RESET;
6339 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
6340 		nv_reset(nvp);
6341 
6342 		return (NV_FAILURE);
6343 	}
6344 
6345 	/*
6346 	 * Put the ATAPI command in the data register
6347 	 * ATAPI protocol state - HP1: Send_Packet
6348 	 */
6349 
6350 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
6351 	    (ushort_t *)nvp->nvp_data,
6352 	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
6353 
6354 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6355 	    "nv_start_rqsense_pio: exiting into HP3"));
6356 
6357 	return (NV_SUCCESS);
6358 }
6359 
6360 /*
6361  * quiesce(9E) entry point.
6362  *
6363  * This function is called when the system is single-threaded at high
6364  * PIL with preemption disabled. Therefore, this function must not be
6365  * blocked.
6366  *
6367  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6368  * DDI_FAILURE indicates an error condition and should almost never happen.
6369  */
6370 static int
6371 nv_quiesce(dev_info_t *dip)
6372 {
6373 	int port, instance = ddi_get_instance(dip);
6374 	nv_ctl_t *nvc;
6375 
6376 	if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
6377 		return (DDI_FAILURE);
6378 
6379 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
6380 		nv_port_t *nvp = &(nvc->nvc_port[port]);
6381 		ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6382 		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6383 		uint32_t sctrl;
6384 
6385 		/*
6386 		 * Stop the controllers from generating interrupts.
6387 		 */
6388 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
6389 
6390 		/*
6391 		 * clear signature registers
6392 		 */
6393 		nv_put8(cmdhdl, nvp->nvp_sect, 0);
6394 		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
6395 		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
6396 		nv_put8(cmdhdl, nvp->nvp_count, 0);
6397 
6398 		nvp->nvp_signature = 0;
6399 		nvp->nvp_type = 0;
6400 		nvp->nvp_state |= NV_PORT_RESET;
6401 		nvp->nvp_reset_time = ddi_get_lbolt();
6402 
6403 		/*
6404 		 * assert reset in PHY by writing a 1 to bit 0 scontrol
6405 		 */
6406 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6407 
6408 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
6409 		    sctrl | SCONTROL_DET_COMRESET);
6410 
6411 		/*
6412 		 * wait 1ms
6413 		 */
6414 		drv_usecwait(1000);
6415 
6416 		/*
6417 		 * de-assert reset in PHY
6418 		 */
6419 		nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
6420 	}
6421 
6422 	return (DDI_SUCCESS);
6423 }
6424 
6425 
6426 #ifdef SGPIO_SUPPORT
6427 /*
6428  * NVIDIA specific SGPIO LED support
6429  * Please refer to the NVIDIA documentation for additional details
6430  */
6431 
6432 /*
6433  * nv_sgp_led_init
6434  * Detect SGPIO support.  If present, initialize.
6435  */
6436 static void
6437 nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
6438 {
6439 	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
6440 	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
6441 	nv_sgp_cmn_t *cmn;	/* shared data structure */
6442 	int i;
6443 	char tqname[SGPIO_TQ_NAME_LEN];
6444 	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
6445 
6446 	/*
6447 	 * Initialize with appropriately invalid values in case this function
6448 	 * exits without initializing SGPIO (for example, there is no SGPIO
6449 	 * support).
6450 	 */
6451 	nvc->nvc_sgp_csr = 0;
6452 	nvc->nvc_sgp_cbp = NULL;
6453 	nvc->nvc_sgp_cmn = NULL;
6454 
6455 	/*
6456 	 * Only try to initialize SGPIO LED support if this property
6457 	 * indicates it should be.
6458 	 */
6459 	if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
6460 	    "enable-sgpio-leds", 0) != 1)
6461 		return;
6462 
6463 	/*
6464 	 * CK804 can pass the sgpio_detect test even though it does not support
6465 	 * SGPIO, so don't even look at a CK804.
6466 	 */
6467 	if (nvc->nvc_mcp5x_flag != B_TRUE)
6468 		return;
6469 
6470 	/*
6471 	 * The NVIDIA SGPIO support can nominally handle 6 drives.
6472 	 * However, the current implementation only supports 4 drives.
6473 	 * With two drives per controller, that means only look at the
6474 	 * first two controllers.
6475 	 */
6476 	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
6477 		return;
6478 
6479 	/* confirm that the SGPIO registers are there */
6480 	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
6481 		NVLOG((NVDBG_INIT, nvc, NULL,
6482 		    "SGPIO registers not detected"));
6483 		return;
6484 	}
6485 
6486 	/* save off the SGPIO_CSR I/O address */
6487 	nvc->nvc_sgp_csr = csrp;
6488 
6489 	/* map in Control Block */
6490 	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
6491 	    sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
6492 
6493 	/* initialize the SGPIO h/w */
6494 	if (nv_sgp_init(nvc) == NV_FAILURE) {
6495 		nv_cmn_err(CE_WARN, nvc, NULL,
6496 		    "!Unable to initialize SGPIO");
6497 	}
6498 
6499 	/*
6500 	 * Initialize the shared space for this instance.  This could
6501 	 * involve allocating the space, saving a pointer to the space
6502 	 * and starting the taskq that actually turns the LEDs on and off.
6503 	 * Or, it could involve just getting the pointer to the already
6504 	 * allocated space.
6505 	 */
6506 
6507 	mutex_enter(&nv_sgp_c2c_mutex);
6508 
6509 	/* try and find our CBP in the mapping table */
6510 	cmn = NULL;
6511 	for (i = 0; i < NV_MAX_CBPS; i++) {
6512 		if (nv_sgp_cbp2cmn[i].c2cm_cbp == cbp) {
6513 			cmn = nv_sgp_cbp2cmn[i].c2cm_cmn;
6514 			break;
6515 		}
6516 
6517 		if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
6518 			break;
6519 	}
6520 
6521 	if (i >= NV_MAX_CBPS) {
6522 		/*
6523 		 * CBP to shared space mapping table is full
6524 		 */
6525 		nvc->nvc_sgp_cmn = NULL;
6526 		nv_cmn_err(CE_WARN, nvc, NULL,
6527 		    "!LED handling not initialized - too many controllers");
6528 	} else if (cmn == NULL) {
6529 		/*
6530 		 * Allocate the shared space, point the SGPIO scratch register
6531 		 * at it and start the led update taskq.
6532 		 */
6533 
6534 		/* allocate shared space */
6535 		cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
6536 		    KM_SLEEP);
6537 		if (cmn == NULL) {
6538 			nv_cmn_err(CE_WARN, nvc, NULL,
6539 			    "!Failed to allocate shared data");
6540 			return;
6541 		}
6542 
6543 		nvc->nvc_sgp_cmn = cmn;
6544 
6545 		/* initialize the shared data structure */
6546 		cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
6547 		cmn->nvs_connected = 0;
6548 		cmn->nvs_activity = 0;
6549 		cmn->nvs_cbp = cbp;
6550 
6551 		mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
6552 		mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
6553 		cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
6554 
6555 		/* put the address in the SGPIO scratch register */
6556 #if defined(__amd64)
6557 		nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
6558 #else
6559 		nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
6560 #endif
6561 
6562 		/* add an entry to the cbp to cmn mapping table */
6563 
6564 		/* i should be the next available table position */
6565 		nv_sgp_cbp2cmn[i].c2cm_cbp = cbp;
6566 		nv_sgp_cbp2cmn[i].c2cm_cmn = cmn;
6567 
6568 		/* start the activity LED taskq */
6569 
6570 		/*
6571 		 * The taskq name should be unique and the time
6572 		 */
6573 		(void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
6574 		    "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
6575 		cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
6576 		    TASKQ_DEFAULTPRI, 0);
6577 		if (cmn->nvs_taskq == NULL) {
6578 			cmn->nvs_taskq_delay = 0;
6579 			nv_cmn_err(CE_WARN, nvc, NULL,
6580 			    "!Failed to start activity LED taskq");
6581 		} else {
6582 			cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
6583 			(void) ddi_taskq_dispatch(cmn->nvs_taskq,
6584 			    nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
6585 		}
6586 	} else {
6587 		nvc->nvc_sgp_cmn = cmn;
6588 		cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6589 	}
6590 
6591 	mutex_exit(&nv_sgp_c2c_mutex);
6592 }
6593 
6594 /*
6595  * nv_sgp_detect
6596  * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
6597  * report back whether both were readable.
6598  */
6599 static int
6600 nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
6601     uint32_t *cbpp)
6602 {
6603 	/* get the SGPIO_CSRP */
6604 	*csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
6605 	if (*csrpp == 0) {
6606 		return (NV_FAILURE);
6607 	}
6608 
6609 	/* SGPIO_CSRP is good, get the SGPIO_CBP */
6610 	*cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
6611 	if (*cbpp == 0) {
6612 		return (NV_FAILURE);
6613 	}
6614 
6615 	/* SGPIO_CBP is good, so we must support SGPIO */
6616 	return (NV_SUCCESS);
6617 }
6618 
6619 /*
6620  * nv_sgp_init
6621  * Initialize SGPIO.
6622  * The initialization process is described by NVIDIA, but the hardware does
6623  * not always behave as documented, so several steps have been changed and/or
6624  * omitted.
6625  */
6626 static int
6627 nv_sgp_init(nv_ctl_t *nvc)
6628 {
6629 	int seq;
6630 	int rval = NV_SUCCESS;
6631 	hrtime_t start, end;
6632 	uint32_t cmd;
6633 	uint32_t status;
6634 	int drive_count;
6635 
6636 	status = nv_sgp_csr_read(nvc);
6637 	if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
6638 		/* SGPIO logic is in reset state and requires initialization */
6639 
6640 		/* noting the Sequence field value */
6641 		seq = SGPIO_CSR_SEQ(status);
6642 
6643 		/* issue SGPIO_CMD_READ_PARAMS command */
6644 		cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
6645 		nv_sgp_csr_write(nvc, cmd);
6646 
6647 		DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
6648 
6649 		/* poll for command completion */
6650 		start = gethrtime();
6651 		end = start + NV_SGP_CMD_TIMEOUT;
6652 		for (;;) {
6653 			status = nv_sgp_csr_read(nvc);
6654 
6655 			/* break on error */
6656 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR) {
6657 				NVLOG((NVDBG_ALWAYS, nvc, NULL,
6658 				    "Command error during initialization"));
6659 				rval = NV_FAILURE;
6660 				break;
6661 			}
6662 
6663 			/* command processing is taking place */
6664 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK) {
6665 				if (SGPIO_CSR_SEQ(status) != seq) {
6666 					NVLOG((NVDBG_ALWAYS, nvc, NULL,
6667 					    "Sequence number change error"));
6668 				}
6669 
6670 				break;
6671 			}
6672 
6673 			/* if completion not detected in 2000ms ... */
6674 
6675 			if (gethrtime() > end)
6676 				break;
6677 
6678 			/* wait 400 ns before checking again */
6679 			NV_DELAY_NSEC(400);
6680 		}
6681 	}
6682 
6683 	if (rval == NV_FAILURE)
6684 		return (rval);
6685 
6686 	if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
6687 		NVLOG((NVDBG_ALWAYS, nvc, NULL,
6688 		    "SGPIO logic not operational after init - state %d",
6689 		    SGPIO_CSR_SSTAT(status)));
6690 		/*
6691 		 * Should return (NV_FAILURE) but the hardware can be
6692 		 * operational even if the SGPIO Status does not indicate
6693 		 * this.
6694 		 */
6695 	}
6696 
6697 	/*
6698 	 * NVIDIA recommends reading the supported drive count even
6699 	 * though they also indicate that it is always 4 at this time.
6700 	 */
6701 	drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
6702 	if (drive_count != SGPIO_DRV_CNT_VALUE) {
6703 		NVLOG((NVDBG_INIT, nvc, NULL,
6704 		    "SGPIO reported undocumented drive count - %d",
6705 		    drive_count));
6706 	}
6707 
6708 	NVLOG((NVDBG_INIT, nvc, NULL,
6709 	    "initialized ctlr: %d csr: 0x%08x",
6710 	    nvc->nvc_ctlr_num, nvc->nvc_sgp_csr));
6711 
6712 	return (rval);
6713 }
6714 
6715 static int
6716 nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6717 {
6718 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6719 
6720 	if (cmn == NULL)
6721 		return (NV_FAILURE);
6722 
6723 	mutex_enter(&cmn->nvs_slock);
6724 	cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6725 	mutex_exit(&cmn->nvs_slock);
6726 
6727 	return (NV_SUCCESS);
6728 }
6729 
6730 /*
6731  * nv_sgp_csr_read
6732  * This is just a 32-bit port read from the value that was obtained from the
6733  * PCI config space.
6734  *
6735  * XXX It was advised to use the in[bwl] function for this, even though they
6736  * are obsolete interfaces.
6737  */
6738 static int
6739 nv_sgp_csr_read(nv_ctl_t *nvc)
6740 {
6741 	return (inl(nvc->nvc_sgp_csr));
6742 }
6743 
6744 /*
6745  * nv_sgp_csr_write
6746  * This is just a 32-bit I/O port write.  The port number was obtained from
6747  * the PCI config space.
6748  *
6749  * XXX It was advised to use the out[bwl] function for this, even though they
6750  * are obsolete interfaces.
6751  */
6752 static void
6753 nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6754 {
6755 	outl(nvc->nvc_sgp_csr, val);
6756 }
6757 
6758 /*
6759  * nv_sgp_write_data
6760  * Cause SGPIO to send Control Block data
6761  */
6762 static int
6763 nv_sgp_write_data(nv_ctl_t *nvc)
6764 {
6765 	hrtime_t start, end;
6766 	uint32_t status;
6767 	uint32_t cmd;
6768 
6769 	/* issue command */
6770 	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6771 	nv_sgp_csr_write(nvc, cmd);
6772 
6773 	/* poll for completion */
6774 	start = gethrtime();
6775 	end = start + NV_SGP_CMD_TIMEOUT;
6776 	for (;;) {
6777 		status = nv_sgp_csr_read(nvc);
6778 
6779 		/* break on error completion */
6780 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6781 			break;
6782 
6783 		/* break on successful completion */
6784 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6785 			break;
6786 
6787 		/* Wait 400 ns and try again */
6788 		NV_DELAY_NSEC(400);
6789 
6790 		if (gethrtime() > end)
6791 			break;
6792 	}
6793 
6794 	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6795 		return (NV_SUCCESS);
6796 
6797 	return (NV_FAILURE);
6798 }
6799 
6800 /*
6801  * nv_sgp_activity_led_ctl
6802  * This is run as a taskq.  It wakes up at a fixed interval and checks to
6803  * see if any of the activity LEDs need to be changed.
6804  */
6805 static void
6806 nv_sgp_activity_led_ctl(void *arg)
6807 {
6808 	nv_ctl_t *nvc = (nv_ctl_t *)arg;
6809 	nv_sgp_cmn_t *cmn;
6810 	volatile nv_sgp_cb_t *cbp;
6811 	clock_t ticks;
6812 	uint8_t drv_leds;
6813 	uint32_t old_leds;
6814 	uint32_t new_led_state;
6815 	int i;
6816 
6817 	cmn = nvc->nvc_sgp_cmn;
6818 	cbp = nvc->nvc_sgp_cbp;
6819 
6820 	do {
6821 		/* save off the old state of all of the LEDs */
6822 		old_leds = cbp->sgpio0_tr;
6823 
6824 		DTRACE_PROBE3(sgpio__activity__state,
6825 		    int, cmn->nvs_connected, int, cmn->nvs_activity,
6826 		    int, old_leds);
6827 
6828 		new_led_state = 0;
6829 
6830 		/* for each drive */
6831 		for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6832 
6833 			/* get the current state of the LEDs for the drive */
6834 			drv_leds = SGPIO0_TR_DRV(old_leds, i);
6835 
6836 			if ((cmn->nvs_connected & (1 << i)) == 0) {
6837 				/* if not connected, turn off activity */
6838 				drv_leds &= ~TR_ACTIVE_MASK;
6839 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6840 
6841 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6842 				new_led_state |=
6843 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6844 
6845 				continue;
6846 			}
6847 
6848 			if ((cmn->nvs_activity & (1 << i)) == 0) {
6849 				/* connected, but not active */
6850 				drv_leds &= ~TR_ACTIVE_MASK;
6851 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6852 
6853 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6854 				new_led_state |=
6855 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6856 
6857 				continue;
6858 			}
6859 
6860 			/* connected and active */
6861 			if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6862 				/* was enabled, so disable */
6863 				drv_leds &= ~TR_ACTIVE_MASK;
6864 				drv_leds |=
6865 				    TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6866 
6867 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6868 				new_led_state |=
6869 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6870 			} else {
6871 				/* was disabled, so enable */
6872 				drv_leds &= ~TR_ACTIVE_MASK;
6873 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6874 
6875 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6876 				new_led_state |=
6877 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6878 			}
6879 
6880 			/*
6881 			 * clear the activity bit
6882 			 * if there is drive activity again within the
6883 			 * loop interval (now 1/16 second), nvs_activity
6884 			 * will be reset and the "connected and active"
6885 			 * condition above will cause the LED to blink
6886 			 * off and on at the loop interval rate.  The
6887 			 * rate may be increased (interval shortened) as
6888 			 * long as it is not more than 1/30 second.
6889 			 */
6890 			mutex_enter(&cmn->nvs_slock);
6891 			cmn->nvs_activity &= ~(1 << i);
6892 			mutex_exit(&cmn->nvs_slock);
6893 		}
6894 
6895 		DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6896 
6897 		/* write out LED values */
6898 
6899 		mutex_enter(&cmn->nvs_slock);
6900 		cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6901 		cbp->sgpio0_tr |= new_led_state;
6902 		cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6903 		mutex_exit(&cmn->nvs_slock);
6904 
6905 		if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6906 			NVLOG((NVDBG_VERBOSE, nvc, NULL,
6907 			    "nv_sgp_write_data failure updating active LED"));
6908 		}
6909 
6910 		/* now rest for the interval */
6911 		mutex_enter(&cmn->nvs_tlock);
6912 		ticks = drv_usectohz(cmn->nvs_taskq_delay);
6913 		if (ticks > 0)
6914 			(void) cv_reltimedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6915 			    ticks, TR_CLOCK_TICK);
6916 		mutex_exit(&cmn->nvs_tlock);
6917 	} while (ticks > 0);
6918 }
6919 
6920 /*
6921  * nv_sgp_drive_connect
6922  * Set the flag used to indicate that the drive is attached to the HBA.
6923  * Used to let the taskq know that it should turn the Activity LED on.
6924  */
6925 static void
6926 nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6927 {
6928 	nv_sgp_cmn_t *cmn;
6929 
6930 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6931 		return;
6932 	cmn = nvc->nvc_sgp_cmn;
6933 
6934 	mutex_enter(&cmn->nvs_slock);
6935 	cmn->nvs_connected |= (1 << drive);
6936 	mutex_exit(&cmn->nvs_slock);
6937 }
6938 
6939 /*
6940  * nv_sgp_drive_disconnect
6941  * Clears the flag used to indicate that the drive is no longer attached
6942  * to the HBA.  Used to let the taskq know that it should turn the
6943  * Activity LED off.  The flag that indicates that the drive is in use is
6944  * also cleared.
6945  */
6946 static void
6947 nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
6948 {
6949 	nv_sgp_cmn_t *cmn;
6950 
6951 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6952 		return;
6953 	cmn = nvc->nvc_sgp_cmn;
6954 
6955 	mutex_enter(&cmn->nvs_slock);
6956 	cmn->nvs_connected &= ~(1 << drive);
6957 	cmn->nvs_activity &= ~(1 << drive);
6958 	mutex_exit(&cmn->nvs_slock);
6959 }
6960 
6961 /*
6962  * nv_sgp_drive_active
6963  * Sets the flag used to indicate that the drive has been accessed and the
6964  * LED should be flicked off, then on.  It is cleared at a fixed time
6965  * interval by the LED taskq and set by the sata command start.
6966  */
6967 static void
6968 nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
6969 {
6970 	nv_sgp_cmn_t *cmn;
6971 
6972 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6973 		return;
6974 	cmn = nvc->nvc_sgp_cmn;
6975 
6976 	DTRACE_PROBE1(sgpio__active, int, drive);
6977 
6978 	mutex_enter(&cmn->nvs_slock);
6979 	cmn->nvs_activity |= (1 << drive);
6980 	mutex_exit(&cmn->nvs_slock);
6981 }
6982 
6983 
6984 /*
6985  * nv_sgp_locate
6986  * Turns the Locate/OK2RM LED off or on for a particular drive.  State is
6987  * maintained in the SGPIO Control Block.
6988  */
6989 static void
6990 nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
6991 {
6992 	uint8_t leds;
6993 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6994 	nv_sgp_cmn_t *cmn;
6995 
6996 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6997 		return;
6998 	cmn = nvc->nvc_sgp_cmn;
6999 
7000 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7001 		return;
7002 
7003 	DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
7004 
7005 	mutex_enter(&cmn->nvs_slock);
7006 
7007 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7008 
7009 	leds &= ~TR_LOCATE_MASK;
7010 	leds |= TR_LOCATE_SET(value);
7011 
7012 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7013 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7014 
7015 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7016 
7017 	mutex_exit(&cmn->nvs_slock);
7018 
7019 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7020 		nv_cmn_err(CE_WARN, nvc, NULL,
7021 		    "!nv_sgp_write_data failure updating OK2RM/Locate LED");
7022 	}
7023 }
7024 
7025 /*
7026  * nv_sgp_error
7027  * Turns the Error/Failure LED off or on for a particular drive.  State is
7028  * maintained in the SGPIO Control Block.
7029  */
7030 static void
7031 nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
7032 {
7033 	uint8_t leds;
7034 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7035 	nv_sgp_cmn_t *cmn;
7036 
7037 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7038 		return;
7039 	cmn = nvc->nvc_sgp_cmn;
7040 
7041 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7042 		return;
7043 
7044 	DTRACE_PROBE2(sgpio__error, int, drive, int, value);
7045 
7046 	mutex_enter(&cmn->nvs_slock);
7047 
7048 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7049 
7050 	leds &= ~TR_ERROR_MASK;
7051 	leds |= TR_ERROR_SET(value);
7052 
7053 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7054 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7055 
7056 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7057 
7058 	mutex_exit(&cmn->nvs_slock);
7059 
7060 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7061 		nv_cmn_err(CE_WARN, nvc, NULL,
7062 		    "!nv_sgp_write_data failure updating Fail/Error LED");
7063 	}
7064 }
7065 
7066 static void
7067 nv_sgp_cleanup(nv_ctl_t *nvc)
7068 {
7069 	int drive, i;
7070 	uint8_t drv_leds;
7071 	uint32_t led_state;
7072 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7073 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
7074 	extern void psm_unmap_phys(caddr_t, size_t);
7075 
7076 	/*
7077 	 * If the SGPIO Control Block isn't mapped or the shared data
7078 	 * structure isn't present in this instance, there isn't much that
7079 	 * can be cleaned up.
7080 	 */
7081 	if ((cb == NULL) || (cmn == NULL))
7082 		return;
7083 
7084 	/* turn off activity LEDs for this controller */
7085 	drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
7086 
7087 	/* get the existing LED state */
7088 	led_state = cb->sgpio0_tr;
7089 
7090 	/* turn off port 0 */
7091 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
7092 	led_state &= SGPIO0_TR_DRV_CLR(drive);
7093 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7094 
7095 	/* turn off port 1 */
7096 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
7097 	led_state &= SGPIO0_TR_DRV_CLR(drive);
7098 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7099 
7100 	/* set the new led state, which should turn off this ctrl's LEDs */
7101 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7102 	(void) nv_sgp_write_data(nvc);
7103 
7104 	/* clear the controller's in use bit */
7105 	mutex_enter(&cmn->nvs_slock);
7106 	cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
7107 	mutex_exit(&cmn->nvs_slock);
7108 
7109 	if (cmn->nvs_in_use == 0) {
7110 		/* if all "in use" bits cleared, take everything down */
7111 
7112 		if (cmn->nvs_taskq != NULL) {
7113 			/* allow activity taskq to exit */
7114 			cmn->nvs_taskq_delay = 0;
7115 			cv_broadcast(&cmn->nvs_cv);
7116 
7117 			/* then destroy it */
7118 			ddi_taskq_destroy(cmn->nvs_taskq);
7119 		}
7120 
7121 		/* turn off all of the LEDs */
7122 		cb->sgpio0_tr = 0;
7123 		cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7124 		(void) nv_sgp_write_data(nvc);
7125 
7126 		cb->sgpio_sr = NULL;
7127 
7128 		/* zero out the CBP to cmn mapping */
7129 		for (i = 0; i < NV_MAX_CBPS; i++) {
7130 			if (nv_sgp_cbp2cmn[i].c2cm_cbp == cmn->nvs_cbp) {
7131 				nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
7132 				break;
7133 			}
7134 
7135 			if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
7136 				break;
7137 		}
7138 
7139 		/* free resources */
7140 		cv_destroy(&cmn->nvs_cv);
7141 		mutex_destroy(&cmn->nvs_tlock);
7142 		mutex_destroy(&cmn->nvs_slock);
7143 
7144 		kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
7145 	}
7146 
7147 	nvc->nvc_sgp_cmn = NULL;
7148 
7149 	/* unmap the SGPIO Control Block */
7150 	psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
7151 }
7152 #endif	/* SGPIO_SUPPORT */
7153