xref: /illumos-gate/usr/src/uts/common/io/sdcard/adapters/sdhost/sdhost.c (revision 581cede61ac9c14d8d4ea452562a567189eead78)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include "sdhost.h"
27 
28 typedef	struct sdstats	sdstats_t;
29 typedef	struct sdslot	sdslot_t;
30 typedef	struct sdhost	sdhost_t;
31 
32 struct sdstats {
33 	kstat_named_t	ks_ncmd;
34 	kstat_named_t	ks_ixfr;
35 	kstat_named_t	ks_oxfr;
36 	kstat_named_t	ks_ibytes;
37 	kstat_named_t	ks_obytes;
38 	kstat_named_t	ks_npio;
39 	kstat_named_t	ks_ndma;
40 	kstat_named_t	ks_nmulti;
41 	kstat_named_t	ks_baseclk;
42 	kstat_named_t	ks_cardclk;
43 	kstat_named_t	ks_tmusecs;
44 	kstat_named_t	ks_width;
45 	kstat_named_t	ks_flags;
46 	kstat_named_t	ks_capab;
47 };
48 
49 #define	SDFLAG_FORCE_PIO		(1U << 0)
50 #define	SDFLAG_FORCE_DMA		(1U << 1)
51 
52 /*
53  * Per slot state.
54  */
55 struct sdslot {
56 	sda_host_t		*ss_host;
57 	int			ss_num;
58 	ddi_acc_handle_t	ss_acch;
59 	caddr_t 		ss_regva;
60 	kmutex_t		ss_lock;
61 	uint8_t			ss_tmoutclk;
62 	uint32_t		ss_ocr;		/* OCR formatted voltages */
63 	uint16_t		ss_mode;
64 	boolean_t		ss_suspended;
65 	sdstats_t		ss_stats;
66 #define	ss_ncmd			ss_stats.ks_ncmd.value.ui64
67 #define	ss_ixfr			ss_stats.ks_ixfr.value.ui64
68 #define	ss_oxfr			ss_stats.ks_oxfr.value.ui64
69 #define	ss_ibytes		ss_stats.ks_ibytes.value.ui64
70 #define	ss_obytes		ss_stats.ks_obytes.value.ui64
71 #define	ss_ndma			ss_stats.ks_ndma.value.ui64
72 #define	ss_npio			ss_stats.ks_npio.value.ui64
73 #define	ss_nmulti		ss_stats.ks_nmulti.value.ui64
74 
75 #define	ss_baseclk		ss_stats.ks_baseclk.value.ui32
76 #define	ss_cardclk		ss_stats.ks_cardclk.value.ui32
77 #define	ss_tmusecs		ss_stats.ks_tmusecs.value.ui32
78 #define	ss_width		ss_stats.ks_width.value.ui32
79 #define	ss_flags		ss_stats.ks_flags.value.ui32
80 #define	ss_capab		ss_stats.ks_capab.value.ui32
81 	kstat_t			*ss_ksp;
82 
83 	/*
84 	 * Command in progress
85 	 */
86 	uint8_t			*ss_kvaddr;
87 	int			ss_blksz;
88 	uint16_t		ss_resid;	/* in blocks */
89 	int			ss_rcnt;
90 
91 	/* scratch buffer, to receive extra PIO data */
92 	caddr_t			ss_bounce;
93 	ddi_dma_handle_t	ss_bufdmah;
94 	ddi_acc_handle_t	ss_bufacch;
95 	ddi_dma_cookie_t	ss_bufdmac;
96 };
97 
98 /*
99  * This allocates a rather large chunk of contiguous memory for DMA.
100  * But doing so means that we'll almost never have to resort to PIO.
101  */
102 #define	SDHOST_BOUNCESZ		65536
103 
104 /*
105  * Per controller state.
106  */
107 struct sdhost {
108 	int			sh_numslots;
109 	ddi_dma_attr_t		sh_dmaattr;
110 	sdslot_t		sh_slots[SDHOST_MAXSLOTS];
111 	sda_host_t		*sh_host;
112 
113 	/*
114 	 * Interrupt related information.
115 	 */
116 	ddi_intr_handle_t	sh_ihandle;
117 	int			sh_icap;
118 	uint_t			sh_ipri;
119 };
120 
121 #define	PROPSET(x)							\
122 	(ddi_prop_get_int(DDI_DEV_T_ANY, dip,				\
123 	DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, x, 0) != 0)
124 
125 
126 static int sdhost_attach(dev_info_t *, ddi_attach_cmd_t);
127 static int sdhost_detach(dev_info_t *, ddi_detach_cmd_t);
128 static int sdhost_quiesce(dev_info_t *);
129 static int sdhost_suspend(dev_info_t *);
130 static int sdhost_resume(dev_info_t *);
131 
132 static void sdhost_enable_interrupts(sdslot_t *);
133 static void sdhost_disable_interrupts(sdslot_t *);
134 static int sdhost_setup_intr(dev_info_t *, sdhost_t *);
135 static uint_t sdhost_intr(caddr_t, caddr_t);
136 static int sdhost_init_slot(dev_info_t *, sdhost_t *, int, int);
137 static void sdhost_uninit_slot(sdhost_t *, int);
138 static sda_err_t sdhost_soft_reset(sdslot_t *, uint8_t);
139 static sda_err_t sdhost_set_clock(sdslot_t *, uint32_t);
140 static void sdhost_xfer_done(sdslot_t *, sda_err_t);
141 static sda_err_t sdhost_wait_cmd(sdslot_t *, sda_cmd_t *);
142 static uint_t sdhost_slot_intr(sdslot_t *);
143 
144 static sda_err_t sdhost_cmd(void *, sda_cmd_t *);
145 static sda_err_t sdhost_getprop(void *, sda_prop_t, uint32_t *);
146 static sda_err_t sdhost_setprop(void *, sda_prop_t, uint32_t);
147 static sda_err_t sdhost_poll(void *);
148 static sda_err_t sdhost_reset(void *);
149 static sda_err_t sdhost_halt(void *);
150 
151 static struct dev_ops sdhost_dev_ops = {
152 	DEVO_REV,			/* devo_rev */
153 	0,				/* devo_refcnt */
154 	ddi_no_info,			/* devo_getinfo */
155 	nulldev,			/* devo_identify */
156 	nulldev,			/* devo_probe */
157 	sdhost_attach,			/* devo_attach */
158 	sdhost_detach,			/* devo_detach */
159 	nodev,				/* devo_reset */
160 	NULL,				/* devo_cb_ops */
161 	NULL,				/* devo_bus_ops */
162 	NULL,				/* devo_power */
163 	sdhost_quiesce,			/* devo_quiesce */
164 };
165 
166 static struct modldrv sdhost_modldrv = {
167 	&mod_driverops,			/* drv_modops */
168 	"Standard SD Host Controller",	/* drv_linkinfo */
169 	&sdhost_dev_ops			/* drv_dev_ops */
170 };
171 
172 static struct modlinkage modlinkage = {
173 	MODREV_1,			/* ml_rev */
174 	{ &sdhost_modldrv, NULL }	/* ml_linkage */
175 };
176 
177 static struct sda_ops sdhost_ops = {
178 	SDA_OPS_VERSION,
179 	sdhost_cmd,			/* so_cmd */
180 	sdhost_getprop,			/* so_getprop */
181 	sdhost_setprop,			/* so_setprop */
182 	sdhost_poll,			/* so_poll */
183 	sdhost_reset,			/* so_reset */
184 	sdhost_halt,			/* so_halt */
185 };
186 
187 static ddi_device_acc_attr_t sdhost_regattr = {
188 	DDI_DEVICE_ATTR_V0,	/* devacc_attr_version */
189 	DDI_STRUCTURE_LE_ACC,	/* devacc_attr_endian_flags */
190 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder */
191 	DDI_DEFAULT_ACC,	/* devacc_attr_access */
192 };
193 static ddi_device_acc_attr_t sdhost_bufattr = {
194 	DDI_DEVICE_ATTR_V0,	/* devacc_attr_version */
195 	DDI_NEVERSWAP_ACC,	/* devacc_attr_endian_flags */
196 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder */
197 	DDI_DEFAULT_ACC,	/* devacc_attr_access */
198 };
199 
200 #define	GET16(ss, reg)	\
201 	ddi_get16(ss->ss_acch, (void *)(ss->ss_regva + reg))
202 #define	PUT16(ss, reg, val)	\
203 	ddi_put16(ss->ss_acch, (void *)(ss->ss_regva + reg), val)
204 #define	GET32(ss, reg)	\
205 	ddi_get32(ss->ss_acch, (void *)(ss->ss_regva + reg))
206 #define	PUT32(ss, reg, val)	\
207 	ddi_put32(ss->ss_acch, (void *)(ss->ss_regva + reg), val)
208 #define	GET64(ss, reg)	\
209 	ddi_get64(ss->ss_acch, (void *)(ss->ss_regva + reg))
210 
211 #define	GET8(ss, reg)	\
212 	ddi_get8(ss->ss_acch, (void *)(ss->ss_regva + reg))
213 #define	PUT8(ss, reg, val)	\
214 	ddi_put8(ss->ss_acch, (void *)(ss->ss_regva + reg), val)
215 
216 #define	CLR8(ss, reg, mask)	PUT8(ss, reg, GET8(ss, reg) & ~(mask))
217 #define	SET8(ss, reg, mask)	PUT8(ss, reg, GET8(ss, reg) | (mask))
218 
219 /*
220  * If ever anyone uses PIO on SPARC, we have to endian-swap.  But we
221  * think that SD Host Controllers are likely to be uncommon on SPARC,
222  * and hopefully when they exist at all they will be able to use DMA.
223  */
224 #ifdef	_BIG_ENDIAN
225 #define	sw32(x)		ddi_swap32(x)
226 #define	sw16(x)		ddi_swap16(x)
227 #else
228 #define	sw32(x)		(x)
229 #define	sw16(x)		(x)
230 #endif
231 
232 #define	GETDATA32(ss)		sw32(GET32(ss, REG_DATA))
233 #define	GETDATA16(ss)		sw16(GET16(ss, REG_DATA))
234 #define	GETDATA8(ss)		GET8(ss, REG_DATA)
235 
236 #define	PUTDATA32(ss, val)	PUT32(ss, REG_DATA, sw32(val))
237 #define	PUTDATA16(ss, val)	PUT16(ss, REG_DATA, sw16(val))
238 #define	PUTDATA8(ss, val)	PUT8(ss, REG_DATA, val)
239 
240 #define	CHECK_STATE(ss, nm)	\
241 	((GET32(ss, REG_PRS) & PRS_ ## nm) != 0)
242 
243 int
244 _init(void)
245 {
246 	int	rv;
247 
248 	sda_host_init_ops(&sdhost_dev_ops);
249 
250 	if ((rv = mod_install(&modlinkage)) != 0) {
251 		sda_host_fini_ops(&sdhost_dev_ops);
252 	}
253 
254 	return (rv);
255 }
256 
257 int
258 _fini(void)
259 {
260 	int	rv;
261 
262 	if ((rv = mod_remove(&modlinkage)) == 0) {
263 		sda_host_fini_ops(&sdhost_dev_ops);
264 	}
265 	return (rv);
266 }
267 
268 int
269 _info(struct modinfo *modinfop)
270 {
271 	return (mod_info(&modlinkage, modinfop));
272 }
273 
274 int
275 sdhost_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
276 {
277 	sdhost_t		*shp;
278 	ddi_acc_handle_t	pcih;
279 	uint8_t			slotinfo;
280 	uint8_t			bar;
281 	int			i;
282 	int			rv;
283 
284 	switch (cmd) {
285 	case DDI_ATTACH:
286 		break;
287 
288 	case DDI_RESUME:
289 		return (sdhost_resume(dip));
290 
291 	default:
292 		return (DDI_FAILURE);
293 	}
294 
295 	/*
296 	 * Soft state allocation.
297 	 */
298 	shp = kmem_zalloc(sizeof (*shp), KM_SLEEP);
299 	ddi_set_driver_private(dip, shp);
300 
301 	/*
302 	 * Reset the "slot number", so uninit slot works properly.
303 	 */
304 	for (i = 0; i < SDHOST_MAXSLOTS; i++) {
305 		shp->sh_slots[i].ss_num = -1;
306 	}
307 
308 	/*
309 	 * Initialize DMA attributes.  For now we initialize as for
310 	 * SDMA.  If we add ADMA support we can improve this.
311 	 */
312 	shp->sh_dmaattr.dma_attr_version = DMA_ATTR_V0;
313 	shp->sh_dmaattr.dma_attr_addr_lo = 0;
314 	shp->sh_dmaattr.dma_attr_addr_hi = 0xffffffffU;
315 	shp->sh_dmaattr.dma_attr_count_max = 0xffffffffU;
316 	shp->sh_dmaattr.dma_attr_align = 4096;		/* Ricoh needs it */
317 	shp->sh_dmaattr.dma_attr_burstsizes = 0;	/* for now! */
318 	shp->sh_dmaattr.dma_attr_minxfer = 1;
319 	shp->sh_dmaattr.dma_attr_maxxfer = 0x7ffffU;
320 	shp->sh_dmaattr.dma_attr_sgllen = 1;		/* no scatter/gather */
321 	shp->sh_dmaattr.dma_attr_seg = 0x7ffffU;	/* not to cross 512K */
322 	shp->sh_dmaattr.dma_attr_granular = 1;
323 	shp->sh_dmaattr.dma_attr_flags = 0;
324 
325 	/*
326 	 * PCI configuration access to figure out number of slots present.
327 	 */
328 	if (pci_config_setup(dip, &pcih) != DDI_SUCCESS) {
329 		cmn_err(CE_WARN, "pci_config_setup failed");
330 		goto failed;
331 	}
332 
333 	slotinfo = pci_config_get8(pcih, SLOTINFO);
334 	shp->sh_numslots = SLOTINFO_NSLOT(slotinfo);
335 
336 	if (shp->sh_numslots > SDHOST_MAXSLOTS) {
337 		cmn_err(CE_WARN, "Host reports to have too many slots: %d",
338 		    shp->sh_numslots);
339 		pci_config_teardown(&pcih);
340 		goto failed;
341 	}
342 
343 	/*
344 	 * Enable master accesses and DMA.
345 	 */
346 	pci_config_put16(pcih, PCI_CONF_COMM,
347 	    pci_config_get16(pcih, PCI_CONF_COMM) |
348 	    PCI_COMM_MAE | PCI_COMM_ME);
349 
350 	/*
351 	 * Figure out which BAR to use.  Note that we number BARs from
352 	 * 1, although PCI and SD Host numbers from 0.  (We number
353 	 * from 1, because register number 0 means PCI configuration
354 	 * space in Solaris.)
355 	 */
356 	bar = SLOTINFO_BAR(slotinfo) + 1;
357 
358 	pci_config_teardown(&pcih);
359 
360 	/*
361 	 * Setup interrupts ... supports the new DDI interrupt API.  This
362 	 * will support MSI or MSI-X interrupts if a device is found to
363 	 * support it.
364 	 */
365 	if (sdhost_setup_intr(dip, shp) != DDI_SUCCESS) {
366 		cmn_err(CE_WARN, "Failed to setup interrupts");
367 		goto failed;
368 	}
369 
370 	shp->sh_host = sda_host_alloc(dip, shp->sh_numslots, &sdhost_ops,
371 	    &shp->sh_dmaattr);
372 	if (shp->sh_host == NULL) {
373 		cmn_err(CE_WARN, "Failed allocating SD host structure");
374 		goto failed;
375 	}
376 
377 	/*
378 	 * Configure slots, this also maps registers, enables
379 	 * interrupts, etc.  Most of the hardware setup is done here.
380 	 */
381 	for (i = 0; i < shp->sh_numslots; i++) {
382 		if (sdhost_init_slot(dip, shp, i, bar + i) != DDI_SUCCESS) {
383 			cmn_err(CE_WARN, "Failed initializing slot %d", i);
384 			goto failed;
385 		}
386 	}
387 
388 	ddi_report_dev(dip);
389 
390 	/*
391 	 * Enable device interrupts at the DDI layer.
392 	 */
393 	if (shp->sh_icap & DDI_INTR_FLAG_BLOCK) {
394 		rv = ddi_intr_block_enable(&shp->sh_ihandle, 1);
395 	} else {
396 		rv = ddi_intr_enable(shp->sh_ihandle);
397 	}
398 	if (rv != DDI_SUCCESS) {
399 		cmn_err(CE_WARN, "Failed enabling interrupts");
400 		goto failed;
401 	}
402 
403 	/*
404 	 * Mark the slots online with the framework.  This will cause
405 	 * the framework to probe them for the presence of cards.
406 	 */
407 	if (sda_host_attach(shp->sh_host) != DDI_SUCCESS) {
408 		cmn_err(CE_WARN, "Failed attaching to SDA framework");
409 		if (shp->sh_icap & DDI_INTR_FLAG_BLOCK) {
410 			(void) ddi_intr_block_disable(&shp->sh_ihandle, 1);
411 		} else {
412 			(void) ddi_intr_disable(shp->sh_ihandle);
413 		}
414 		goto failed;
415 	}
416 
417 	return (DDI_SUCCESS);
418 
419 failed:
420 	if (shp->sh_ihandle != NULL) {
421 		(void) ddi_intr_remove_handler(shp->sh_ihandle);
422 		(void) ddi_intr_free(shp->sh_ihandle);
423 	}
424 	for (i = 0; i < shp->sh_numslots; i++)
425 		sdhost_uninit_slot(shp, i);
426 	if (shp->sh_host != NULL)
427 		sda_host_free(shp->sh_host);
428 	kmem_free(shp, sizeof (*shp));
429 
430 	return (DDI_FAILURE);
431 }
432 
433 int
434 sdhost_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
435 {
436 	sdhost_t	*shp;
437 	int		i;
438 
439 	switch (cmd) {
440 	case DDI_DETACH:
441 		break;
442 
443 	case DDI_SUSPEND:
444 		return (sdhost_suspend(dip));
445 
446 	default:
447 		return (DDI_FAILURE);
448 	}
449 
450 	shp = ddi_get_driver_private(dip);
451 
452 	/*
453 	 * Take host offline with the framework.
454 	 */
455 	sda_host_detach(shp->sh_host);
456 
457 	/*
458 	 * Tear down interrupts.
459 	 */
460 	if (shp->sh_ihandle != NULL) {
461 		if (shp->sh_icap & DDI_INTR_FLAG_BLOCK) {
462 			(void) ddi_intr_block_disable(&shp->sh_ihandle, 1);
463 		} else {
464 			(void) ddi_intr_disable(shp->sh_ihandle);
465 		}
466 		(void) ddi_intr_remove_handler(shp->sh_ihandle);
467 		(void) ddi_intr_free(shp->sh_ihandle);
468 	}
469 
470 	/*
471 	 * Tear down register mappings, etc.
472 	 */
473 	for (i = 0; i < shp->sh_numslots; i++)
474 		sdhost_uninit_slot(shp, i);
475 	sda_host_free(shp->sh_host);
476 	kmem_free(shp, sizeof (*shp));
477 
478 	return (DDI_SUCCESS);
479 }
480 
481 int
482 sdhost_quiesce(dev_info_t *dip)
483 {
484 	sdhost_t	*shp;
485 	sdslot_t	*ss;
486 
487 	shp = ddi_get_driver_private(dip);
488 
489 	/* reset each slot separately */
490 	for (int i = 0; i < shp->sh_numslots; i++) {
491 		ss = &shp->sh_slots[i];
492 		if (ss->ss_acch == NULL)
493 			continue;
494 
495 		(void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
496 	}
497 	return (DDI_SUCCESS);
498 }
499 
500 int
501 sdhost_suspend(dev_info_t *dip)
502 {
503 	sdhost_t	*shp;
504 	sdslot_t	*ss;
505 	int		i;
506 
507 	shp = ddi_get_driver_private(dip);
508 
509 	sda_host_suspend(shp->sh_host);
510 
511 	for (i = 0; i < shp->sh_numslots; i++) {
512 		ss = &shp->sh_slots[i];
513 		mutex_enter(&ss->ss_lock);
514 		ss->ss_suspended = B_TRUE;
515 		sdhost_disable_interrupts(ss);
516 		(void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
517 		mutex_exit(&ss->ss_lock);
518 	}
519 	return (DDI_SUCCESS);
520 }
521 
522 int
523 sdhost_resume(dev_info_t *dip)
524 {
525 	sdhost_t	*shp;
526 	sdslot_t	*ss;
527 	int		i;
528 
529 	shp = ddi_get_driver_private(dip);
530 
531 	for (i = 0; i < shp->sh_numslots; i++) {
532 		ss = &shp->sh_slots[i];
533 		mutex_enter(&ss->ss_lock);
534 		ss->ss_suspended = B_FALSE;
535 		(void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
536 		sdhost_enable_interrupts(ss);
537 		mutex_exit(&ss->ss_lock);
538 	}
539 
540 	sda_host_resume(shp->sh_host);
541 
542 	return (DDI_SUCCESS);
543 }
544 
545 sda_err_t
546 sdhost_set_clock(sdslot_t *ss, uint32_t hz)
547 {
548 	uint16_t	div;
549 	uint32_t	val;
550 	uint32_t	clk;
551 	int		count;
552 
553 	/*
554 	 * Shut off the clock to begin.
555 	 */
556 	ss->ss_cardclk = 0;
557 	PUT16(ss, REG_CLOCK_CONTROL, 0);
558 	if (hz == 0) {
559 		return (SDA_EOK);
560 	}
561 
562 	if (ss->ss_baseclk == 0) {
563 		sda_host_log(ss->ss_host, ss->ss_num,
564 		    "Base clock frequency not established.");
565 		return (SDA_EINVAL);
566 	}
567 
568 	if ((hz > 25000000) && ((ss->ss_capab & CAPAB_HIGH_SPEED) != 0)) {
569 		/* this clock requires high speed timings! */
570 		SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
571 	} else {
572 		/* don't allow clock to run faster than 25MHz */
573 		hz = min(hz, 25000000);
574 		CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
575 	}
576 
577 	/* figure out the divider */
578 	clk = ss->ss_baseclk;
579 	div  = 1;
580 	while (clk > hz) {
581 		if (div > 0x80)
582 			break;
583 		clk >>= 1;	/* divide clock by two */
584 		div <<= 1;	/* divider goes up by one */
585 	}
586 	div >>= 1;	/* 0 == divide by 1, 1 = divide by 2 */
587 
588 	/*
589 	 * Set the internal clock divider first, without enabling the
590 	 * card clock yet.
591 	 */
592 	PUT16(ss, REG_CLOCK_CONTROL,
593 	    (div << CLOCK_CONTROL_FREQ_SHIFT) | CLOCK_CONTROL_INT_CLOCK_EN);
594 
595 	/*
596 	 * Wait up to 100 msec for the internal clock to stabilize.
597 	 * (The spec does not seem to indicate a maximum timeout, but
598 	 * it also suggests that an infinite loop be used, which is
599 	 * not appropriate for hardened Solaris drivers.)
600 	 */
601 	for (count = 100000; count; count -= 10) {
602 
603 		val = GET16(ss, REG_CLOCK_CONTROL);
604 
605 		if (val & CLOCK_CONTROL_INT_CLOCK_STABLE) {
606 			/* if clock is stable, enable the SD clock pin */
607 			PUT16(ss, REG_CLOCK_CONTROL, val |
608 			    CLOCK_CONTROL_SD_CLOCK_EN);
609 
610 			ss->ss_cardclk = clk;
611 			return (SDA_EOK);
612 		}
613 
614 		drv_usecwait(10);
615 	}
616 
617 	return (SDA_ETIME);
618 }
619 
620 sda_err_t
621 sdhost_soft_reset(sdslot_t *ss, uint8_t bits)
622 {
623 	int	count;
624 
625 	/*
626 	 * There appears to be a bug where Ricoh hosts might have a
627 	 * problem if the host frequency is not set.  If the card
628 	 * isn't present, or we are doing a master reset, just enable
629 	 * the internal clock at its native speed.  (No dividers, and
630 	 * not exposed to card.).
631 	 */
632 	if ((bits == SOFT_RESET_ALL) || !(CHECK_STATE(ss, CARD_INSERTED))) {
633 		PUT16(ss, REG_CLOCK_CONTROL, CLOCK_CONTROL_INT_CLOCK_EN);
634 		/* simple 1msec wait, don't wait for clock to stabilize */
635 		drv_usecwait(1000);
636 		/*
637 		 * reset the card clock & width -- master reset also
638 		 * resets these
639 		 */
640 		ss->ss_cardclk = 0;
641 		ss->ss_width = 1;
642 	}
643 
644 
645 	PUT8(ss, REG_SOFT_RESET, bits);
646 	for (count = 100000; count != 0; count -= 10) {
647 		if ((GET8(ss, REG_SOFT_RESET) & bits) == 0) {
648 			return (SDA_EOK);
649 		}
650 		drv_usecwait(10);
651 	}
652 
653 	return (SDA_ETIME);
654 }
655 
656 void
657 sdhost_disable_interrupts(sdslot_t *ss)
658 {
659 	/* disable slot interrupts for card insert and remove */
660 	PUT16(ss, REG_INT_MASK, 0);
661 	PUT16(ss, REG_INT_EN, 0);
662 
663 	/* disable error interrupts */
664 	PUT16(ss, REG_ERR_MASK, 0);
665 	PUT16(ss, REG_ERR_EN, 0);
666 }
667 
668 void
669 sdhost_enable_interrupts(sdslot_t *ss)
670 {
671 	/*
672 	 * Note that we want to enable reading of the CMD related
673 	 * bits, but we do not want them to generate an interrupt.
674 	 * (The busy wait for typical CMD stuff will normally be less
675 	 * than 10usec, so its simpler/easier to just poll.  Even in
676 	 * the worst case of 100 kHz, the poll is at worst 2 msec.)
677 	 */
678 
679 	/* enable slot interrupts for card insert and remove */
680 	PUT16(ss, REG_INT_MASK, INT_MASK);
681 	PUT16(ss, REG_INT_EN, INT_ENAB);
682 
683 	/* enable error interrupts */
684 	PUT16(ss, REG_ERR_MASK, ERR_MASK);
685 	PUT16(ss, REG_ERR_EN, ERR_ENAB);
686 }
687 
688 int
689 sdhost_setup_intr(dev_info_t *dip, sdhost_t *shp)
690 {
691 	int		itypes;
692 	int		itype;
693 
694 	/*
695 	 * Set up interrupt handler.
696 	 */
697 	if (ddi_intr_get_supported_types(dip, &itypes) != DDI_SUCCESS) {
698 		cmn_err(CE_WARN, "ddi_intr_get_supported_types failed");
699 		return (DDI_FAILURE);
700 	}
701 
702 	/*
703 	 * It turns out that some controllers don't properly implement MSI,
704 	 * but advertise MSI capability in their  PCI config space.
705 	 *
706 	 * While this is really a chip-specific bug, the simplest solution
707 	 * is to just suppress MSI for now by default -- every device seen
708 	 * so far can use FIXED interrupts.
709 	 *
710 	 * We offer an override property, though, just in case someone really
711 	 * wants to force it.
712 	 *
713 	 * We don't do this if the FIXED type isn't supported though!
714 	 */
715 	if (itypes & DDI_INTR_TYPE_FIXED) {
716 		if (!PROPSET(SDHOST_PROP_ENABLE_MSI)) {
717 			itypes &= ~DDI_INTR_TYPE_MSI;
718 		}
719 		if (!PROPSET(SDHOST_PROP_ENABLE_MSIX)) {
720 			itypes &= ~DDI_INTR_TYPE_MSIX;
721 		}
722 	}
723 
724 	/*
725 	 * Interrupt types are bits in a mask.  We know about these ones:
726 	 * FIXED = 1
727 	 * MSI = 2
728 	 * MSIX = 4
729 	 */
730 	for (itype = DDI_INTR_TYPE_MSIX; itype != 0; itype >>= 1) {
731 
732 		int			count;
733 
734 		if ((itypes & itype) == 0) {
735 			/* this type is not supported on this device! */
736 			continue;
737 		}
738 
739 		if ((ddi_intr_get_nintrs(dip, itype, &count) != DDI_SUCCESS) ||
740 		    (count == 0)) {
741 			cmn_err(CE_WARN, "ddi_intr_get_nintrs failed");
742 			continue;
743 		}
744 
745 		/*
746 		 * We have not seen a host device with multiple
747 		 * interrupts (one per slot?), and the spec does not
748 		 * indicate that they exist.  But if one ever occurs,
749 		 * we spew a warning to help future debugging/support
750 		 * efforts.
751 		 */
752 		if (count > 1) {
753 			cmn_err(CE_WARN, "Controller offers %d interrupts, "
754 			    "but driver only supports one", count);
755 			continue;
756 		}
757 
758 		if ((ddi_intr_alloc(dip, &shp->sh_ihandle, itype, 0, 1,
759 		    &count, DDI_INTR_ALLOC_NORMAL) != DDI_SUCCESS) ||
760 		    (count != 1)) {
761 			cmn_err(CE_WARN, "ddi_intr_alloc failed");
762 			continue;
763 		}
764 
765 		if (ddi_intr_get_pri(shp->sh_ihandle, &shp->sh_ipri) !=
766 		    DDI_SUCCESS) {
767 			cmn_err(CE_WARN, "ddi_intr_get_pri failed");
768 			(void) ddi_intr_free(shp->sh_ihandle);
769 			shp->sh_ihandle = NULL;
770 			continue;
771 		}
772 
773 		if (shp->sh_ipri >= ddi_intr_get_hilevel_pri()) {
774 			cmn_err(CE_WARN, "Hi level interrupt not supported");
775 			(void) ddi_intr_free(shp->sh_ihandle);
776 			shp->sh_ihandle = NULL;
777 			continue;
778 		}
779 
780 		if (ddi_intr_get_cap(shp->sh_ihandle, &shp->sh_icap) !=
781 		    DDI_SUCCESS) {
782 			cmn_err(CE_WARN, "ddi_intr_get_cap failed");
783 			(void) ddi_intr_free(shp->sh_ihandle);
784 			shp->sh_ihandle = NULL;
785 			continue;
786 		}
787 
788 		if (ddi_intr_add_handler(shp->sh_ihandle, sdhost_intr,
789 		    shp, NULL) != DDI_SUCCESS) {
790 			cmn_err(CE_WARN, "ddi_intr_add_handler failed");
791 			(void) ddi_intr_free(shp->sh_ihandle);
792 			shp->sh_ihandle = NULL;
793 			continue;
794 		}
795 
796 		return (DDI_SUCCESS);
797 	}
798 
799 	return (DDI_FAILURE);
800 }
801 
802 void
803 sdhost_xfer_done(sdslot_t *ss, sda_err_t errno)
804 {
805 	if ((errno == SDA_EOK) && (ss->ss_resid != 0)) {
806 		/* an unexpected partial transfer was found */
807 		errno = SDA_ERESID;
808 	}
809 	ss->ss_blksz = 0;
810 	ss->ss_resid = 0;
811 
812 	if (errno != SDA_EOK) {
813 		(void) sdhost_soft_reset(ss, SOFT_RESET_CMD);
814 		(void) sdhost_soft_reset(ss, SOFT_RESET_DAT);
815 
816 		/* send a STOP command if necessary */
817 		if (ss->ss_mode & XFR_MODE_AUTO_CMD12) {
818 			PUT32(ss, REG_ARGUMENT, 0);
819 			PUT16(ss, REG_COMMAND,
820 			    (CMD_STOP_TRANSMIT << 8) |
821 			    COMMAND_TYPE_NORM | COMMAND_INDEX_CHECK_EN |
822 			    COMMAND_CRC_CHECK_EN | COMMAND_RESP_48_BUSY);
823 		}
824 	}
825 
826 	sda_host_transfer(ss->ss_host, ss->ss_num, errno);
827 }
828 
829 uint_t
830 sdhost_slot_intr(sdslot_t *ss)
831 {
832 	uint16_t	intr;
833 	uint16_t	errs;
834 	caddr_t		data;
835 	int		count;
836 
837 	mutex_enter(&ss->ss_lock);
838 
839 	if (ss->ss_suspended) {
840 		mutex_exit(&ss->ss_lock);
841 		return (DDI_INTR_UNCLAIMED);
842 	}
843 
844 	intr = GET16(ss, REG_INT_STAT);
845 	if (intr == 0) {
846 		mutex_exit(&ss->ss_lock);
847 		return (DDI_INTR_UNCLAIMED);
848 	}
849 	errs = GET16(ss, REG_ERR_STAT);
850 
851 	if (intr & (INT_REM | INT_INS)) {
852 
853 		PUT16(ss, REG_INT_STAT, intr);
854 		mutex_exit(&ss->ss_lock);
855 
856 		sda_host_detect(ss->ss_host, ss->ss_num);
857 		/* no further interrupt processing this cycle */
858 		return (DDI_INTR_CLAIMED);
859 	}
860 
861 	if (intr & INT_DMA) {
862 		/*
863 		 * We have crossed a DMA/page boundary.  Cope with it.
864 		 */
865 		/*
866 		 * Apparently some sdhost controllers issue a final
867 		 * DMA interrupt if the DMA completes on a boundary,
868 		 * even though there is no further data to transfer.
869 		 *
870 		 * There might be a risk here of the controller
871 		 * continuing to access the same data over and over
872 		 * again, but we accept the risk.
873 		 */
874 		PUT16(ss, REG_INT_STAT, INT_DMA);
875 	}
876 
877 	if (intr & INT_RD) {
878 		/*
879 		 * PIO read!  PIO is quite suboptimal, but we expect
880 		 * performance critical applications to use DMA
881 		 * whenever possible.  We have to stage this through
882 		 * the bounce buffer to meet alignment considerations.
883 		 */
884 
885 		PUT16(ss, REG_INT_STAT, INT_RD);
886 
887 		while ((ss->ss_resid > 0) && CHECK_STATE(ss, BUF_RD_EN)) {
888 
889 			data = ss->ss_bounce;
890 			count = ss->ss_blksz;
891 
892 			ASSERT(count > 0);
893 			ASSERT(ss->ss_kvaddr != NULL);
894 
895 			while (count >= sizeof (uint32_t)) {
896 				*(uint32_t *)(void *)data = GETDATA32(ss);
897 				data += sizeof (uint32_t);
898 				count -= sizeof (uint32_t);
899 			}
900 			while (count >= sizeof (uint16_t)) {
901 				*(uint16_t *)(void *)data = GETDATA16(ss);
902 				data += sizeof (uint16_t);
903 				count -= sizeof (uint16_t);
904 			}
905 			while (count >= sizeof (uint8_t)) {
906 				*(uint8_t *)data = GETDATA8(ss);
907 				data += sizeof (uint8_t);
908 				count -= sizeof (uint8_t);
909 			}
910 
911 			bcopy(ss->ss_bounce, ss->ss_kvaddr, ss->ss_blksz);
912 			ss->ss_kvaddr += ss->ss_blksz;
913 			ss->ss_resid--;
914 		}
915 	}
916 
917 	if (intr & INT_WR) {
918 		/*
919 		 * PIO write!  PIO is quite suboptimal, but we expect
920 		 * performance critical applications to use DMA
921 		 * whenever possible.  We have to stage this through
922 		 * the bounce buffer to meet alignment considerations.
923 		 */
924 
925 		PUT16(ss, REG_INT_STAT, INT_WR);
926 
927 		while ((ss->ss_resid > 0) && CHECK_STATE(ss, BUF_WR_EN)) {
928 
929 			data = ss->ss_bounce;
930 			count = ss->ss_blksz;
931 
932 			ASSERT(count > 0);
933 			ASSERT(ss->ss_kvaddr != NULL);
934 
935 			bcopy(ss->ss_kvaddr, data, count);
936 			while (count >= sizeof (uint32_t)) {
937 				PUTDATA32(ss, *(uint32_t *)(void *)data);
938 				data += sizeof (uint32_t);
939 				count -= sizeof (uint32_t);
940 			}
941 			while (count >= sizeof (uint16_t)) {
942 				PUTDATA16(ss, *(uint16_t *)(void *)data);
943 				data += sizeof (uint16_t);
944 				count -= sizeof (uint16_t);
945 			}
946 			while (count >= sizeof (uint8_t)) {
947 				PUTDATA8(ss, *(uint8_t *)data);
948 				data += sizeof (uint8_t);
949 				count -= sizeof (uint8_t);
950 			}
951 
952 			ss->ss_kvaddr += ss->ss_blksz;
953 			ss->ss_resid--;
954 		}
955 	}
956 
957 	if (intr & INT_XFR) {
958 		if ((ss->ss_mode & (XFR_MODE_READ | XFR_MODE_DMA_EN)) ==
959 		    (XFR_MODE_READ | XFR_MODE_DMA_EN)) {
960 			(void) ddi_dma_sync(ss->ss_bufdmah, 0, 0,
961 			    DDI_DMA_SYNC_FORKERNEL);
962 			bcopy(ss->ss_bounce, ss->ss_kvaddr, ss->ss_rcnt);
963 			ss->ss_rcnt = 0;
964 		}
965 		PUT16(ss, REG_INT_STAT, INT_XFR);
966 
967 		sdhost_xfer_done(ss, SDA_EOK);
968 	}
969 
970 	if (intr & INT_ERR) {
971 		PUT16(ss, REG_ERR_STAT, errs);
972 		PUT16(ss, REG_INT_STAT, INT_ERR);
973 
974 		if (errs & ERR_DAT) {
975 			if ((errs & ERR_DAT_END) == ERR_DAT_END) {
976 				sdhost_xfer_done(ss, SDA_EPROTO);
977 			} else if ((errs & ERR_DAT_CRC) == ERR_DAT_CRC) {
978 				sdhost_xfer_done(ss, SDA_ECRC7);
979 			} else {
980 				sdhost_xfer_done(ss, SDA_ETIME);
981 			}
982 
983 		} else if (errs & ERR_ACMD12) {
984 			/*
985 			 * Generally, this is bad news.  we need a full
986 			 * reset to recover properly.
987 			 */
988 			sdhost_xfer_done(ss, SDA_ECMD12);
989 		}
990 
991 		/*
992 		 * This asynchronous error leaves the slot more or less
993 		 * useless.  Report it to the framework.
994 		 */
995 		if (errs & ERR_CURRENT) {
996 			sda_host_fault(ss->ss_host, ss->ss_num,
997 			    SDA_FAULT_CURRENT);
998 		}
999 	}
1000 
1001 	mutex_exit(&ss->ss_lock);
1002 
1003 	return (DDI_INTR_CLAIMED);
1004 }
1005 
1006 /*ARGSUSED1*/
1007 uint_t
1008 sdhost_intr(caddr_t arg1, caddr_t arg2)
1009 {
1010 	sdhost_t	*shp = (void *)arg1;
1011 	int		rv = DDI_INTR_UNCLAIMED;
1012 	int		num;
1013 
1014 	/* interrupt for each of the slots present in the system */
1015 	for (num = 0; num < shp->sh_numslots; num++) {
1016 		if (sdhost_slot_intr(&shp->sh_slots[num]) ==
1017 		    DDI_INTR_CLAIMED) {
1018 			rv = DDI_INTR_CLAIMED;
1019 		}
1020 	}
1021 	return (rv);
1022 }
1023 
1024 int
1025 sdhost_init_slot(dev_info_t *dip, sdhost_t *shp, int num, int bar)
1026 {
1027 	sdslot_t	*ss;
1028 	uint32_t	capab;
1029 	uint32_t	clk;
1030 	char		ksname[16];
1031 	size_t		blen;
1032 	unsigned	ndmac;
1033 	int		rv;
1034 
1035 	/*
1036 	 * Register the private state.
1037 	 */
1038 	ss = &shp->sh_slots[num];
1039 	ss->ss_host = shp->sh_host;
1040 	ss->ss_num = num;
1041 	sda_host_set_private(shp->sh_host, num, ss);
1042 	/*
1043 	 * Initialize core data structure, locks, etc.
1044 	 */
1045 	mutex_init(&ss->ss_lock, NULL, MUTEX_DRIVER,
1046 	    DDI_INTR_PRI(shp->sh_ipri));
1047 
1048 	/*
1049 	 * Set up DMA.
1050 	 */
1051 	rv = ddi_dma_alloc_handle(dip, &shp->sh_dmaattr,
1052 	    DDI_DMA_SLEEP, NULL, &ss->ss_bufdmah);
1053 	if (rv != DDI_SUCCESS) {
1054 		cmn_err(CE_WARN, "Failed to alloc dma handle (%d)!", rv);
1055 		return (DDI_FAILURE);
1056 	}
1057 
1058 	rv = ddi_dma_mem_alloc(ss->ss_bufdmah, SDHOST_BOUNCESZ,
1059 	    &sdhost_bufattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1060 	    &ss->ss_bounce, &blen, &ss->ss_bufacch);
1061 	if (rv != DDI_SUCCESS) {
1062 		cmn_err(CE_WARN, "Failed to alloc bounce buffer (%d)!", rv);
1063 		return (DDI_FAILURE);
1064 	}
1065 
1066 	rv = ddi_dma_addr_bind_handle(ss->ss_bufdmah, NULL, ss->ss_bounce,
1067 	    blen, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1068 	    &ss->ss_bufdmac, &ndmac);
1069 	if ((rv != DDI_DMA_MAPPED) || (ndmac != 1)) {
1070 		cmn_err(CE_WARN, "Failed to bind DMA bounce buffer (%d, %u)!",
1071 		    rv, ndmac);
1072 		return (DDI_FAILURE);
1073 	}
1074 
1075 	/*
1076 	 * Set up virtual kstats.
1077 	 */
1078 	(void) snprintf(ksname, sizeof (ksname), "slot%d", num);
1079 	ss->ss_ksp = kstat_create(ddi_driver_name(dip), ddi_get_instance(dip),
1080 	    ksname, "misc", KSTAT_TYPE_NAMED,
1081 	    sizeof (sdstats_t) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
1082 	if (ss->ss_ksp != NULL) {
1083 		sdstats_t	*sp = &ss->ss_stats;
1084 		ss->ss_ksp->ks_data = sp;
1085 		ss->ss_ksp->ks_private = ss;
1086 		ss->ss_ksp->ks_lock = &ss->ss_lock;
1087 		/* counters are 64 bits wide */
1088 		kstat_named_init(&sp->ks_ncmd, "ncmd", KSTAT_DATA_UINT64);
1089 		kstat_named_init(&sp->ks_ixfr, "ixfr", KSTAT_DATA_UINT64);
1090 		kstat_named_init(&sp->ks_oxfr, "oxfr", KSTAT_DATA_UINT64);
1091 		kstat_named_init(&sp->ks_ibytes, "ibytes", KSTAT_DATA_UINT64);
1092 		kstat_named_init(&sp->ks_obytes, "obytes", KSTAT_DATA_UINT64);
1093 		kstat_named_init(&sp->ks_npio, "npio", KSTAT_DATA_UINT64);
1094 		kstat_named_init(&sp->ks_ndma, "ndma", KSTAT_DATA_UINT64);
1095 		kstat_named_init(&sp->ks_nmulti, "nmulti", KSTAT_DATA_UINT64);
1096 		/* these aren't counters -- leave them at 32 bits */
1097 		kstat_named_init(&sp->ks_baseclk, "baseclk", KSTAT_DATA_UINT32);
1098 		kstat_named_init(&sp->ks_cardclk, "cardclk", KSTAT_DATA_UINT32);
1099 		kstat_named_init(&sp->ks_tmusecs, "tmusecs", KSTAT_DATA_UINT32);
1100 		kstat_named_init(&sp->ks_width, "width", KSTAT_DATA_UINT32);
1101 		kstat_named_init(&sp->ks_flags, "flags", KSTAT_DATA_UINT32);
1102 		kstat_named_init(&sp->ks_capab, "capab", KSTAT_DATA_UINT32);
1103 		kstat_install(ss->ss_ksp);
1104 	}
1105 
1106 	if (PROPSET(SDHOST_PROP_FORCE_PIO)) {
1107 		ss->ss_flags |= SDFLAG_FORCE_PIO;
1108 	}
1109 	if (PROPSET(SDHOST_PROP_FORCE_DMA)) {
1110 		ss->ss_flags |= SDFLAG_FORCE_DMA;
1111 	}
1112 
1113 	if (ddi_regs_map_setup(dip, bar, &ss->ss_regva, 0, 0, &sdhost_regattr,
1114 	    &ss->ss_acch) != DDI_SUCCESS) {
1115 		cmn_err(CE_WARN, "Failed to map registers!");
1116 		return (DDI_FAILURE);
1117 	}
1118 
1119 	/* reset before reading capabilities */
1120 	if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK)
1121 		return (DDI_FAILURE);
1122 
1123 	capab = GET64(ss, REG_CAPAB) & 0xffffffffU; /* upper bits reserved */
1124 	ss->ss_capab = capab;
1125 
1126 	/* host voltages in OCR format */
1127 	ss->ss_ocr = 0;
1128 	if (capab & CAPAB_18V)
1129 		ss->ss_ocr |= OCR_18_19V;	/* 1.8V */
1130 	if (capab & CAPAB_30V)
1131 		ss->ss_ocr |= OCR_30_31V;
1132 	if (capab & CAPAB_33V)
1133 		ss->ss_ocr |= OCR_32_33V;
1134 
1135 	/* base clock */
1136 	ss->ss_baseclk =
1137 	    ((capab & CAPAB_BASE_FREQ_MASK) >> CAPAB_BASE_FREQ_SHIFT);
1138 	ss->ss_baseclk *= 1000000;
1139 
1140 	/*
1141 	 * Timeout clock.  We can calculate this using the following
1142 	 * formula:
1143 	 *
1144 	 * (1000000 usec/1sec) * (1sec/tmoutclk) * base factor = clock time
1145 	 *
1146 	 * Clock time is the length of the base clock in usecs.
1147 	 *
1148 	 * Our base factor is 2^13, which is the shortest clock we
1149 	 * can count.
1150 	 *
1151 	 * To simplify the math and avoid overflow, we cancel out the
1152 	 * zeros for kHz or MHz.  Since we want to wait more clocks, not
1153 	 * less, on error, we truncate the result rather than rounding
1154 	 * up.
1155 	 */
1156 	clk = ((capab & CAPAB_TIMEOUT_FREQ_MASK) >> CAPAB_TIMEOUT_FREQ_SHIFT);
1157 	if ((ss->ss_baseclk == 0) || (clk == 0)) {
1158 		cmn_err(CE_WARN, "Unable to determine clock frequencies");
1159 		return (DDI_FAILURE);
1160 	}
1161 
1162 	if (capab & CAPAB_TIMEOUT_UNITS) {
1163 		/* MHz */
1164 		ss->ss_tmusecs = (1 << 13) / clk;
1165 		clk *= 1000000;
1166 	} else {
1167 		/* kHz */
1168 		ss->ss_tmusecs = (1000 * (1 << 13)) / clk;
1169 		clk *= 1000;
1170 	}
1171 
1172 	/*
1173 	 * Calculation of the timeout.
1174 	 *
1175 	 * SDIO cards use a 1sec timeout, and SDHC cards use fixed
1176 	 * 100msec for read and 250 msec for write.
1177 	 *
1178 	 * Legacy cards running at 375kHz have a worst case of about
1179 	 * 15 seconds.  Running at 25MHz (the standard speed) it is
1180 	 * about 100msec for read, and about 3.2 sec for write.
1181 	 * Typical values are 1/100th that, or about 1msec for read,
1182 	 * and 32 msec for write.
1183 	 *
1184 	 * No transaction at full speed should ever take more than 4
1185 	 * seconds.  (Some slow legacy cards might have trouble, but
1186 	 * we'll worry about them if they ever are seen.  Nobody wants
1187 	 * to wait 4 seconds to access a single block anyway!)
1188 	 *
1189 	 * To get to 4 seconds, we continuously double usec until we
1190 	 * get to the maximum value, or a timeout greater than 4
1191 	 * seconds.
1192 	 *
1193 	 * Note that for high-speed timeout clocks, we might not be
1194 	 * able to get to the full 4 seconds.  E.g. with a 48MHz
1195 	 * timeout clock, we can only get to about 2.8 seconds.  Its
1196 	 * possible that there could be some slow MMC cards that will
1197 	 * timeout at this clock rate, but it seems unlikely.  (The
1198 	 * device would have to be pressing the very worst times,
1199 	 * against the 100-fold "permissive" window allowed, and
1200 	 * running at only 12.5MHz.)
1201 	 *
1202 	 * XXX: this could easily be a tunable.  Someone dealing with only
1203 	 * reasonable cards could set this to just 1 second.
1204 	 */
1205 	for (ss->ss_tmoutclk = 0; ss->ss_tmoutclk < 14; ss->ss_tmoutclk++) {
1206 		if ((ss->ss_tmusecs * (1 << ss->ss_tmoutclk)) >= 4000000) {
1207 			break;
1208 		}
1209 	}
1210 
1211 	/*
1212 	 * Enable slot interrupts.
1213 	 */
1214 	sdhost_enable_interrupts(ss);
1215 
1216 	return (DDI_SUCCESS);
1217 }
1218 
1219 void
1220 sdhost_uninit_slot(sdhost_t *shp, int num)
1221 {
1222 	sdslot_t	*ss;
1223 
1224 	ss = &shp->sh_slots[num];
1225 
1226 	if (ss->ss_acch != NULL)
1227 		(void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
1228 
1229 	if (ss->ss_bufdmac.dmac_address)
1230 		(void) ddi_dma_unbind_handle(ss->ss_bufdmah);
1231 
1232 	if (ss->ss_bufacch != NULL)
1233 		ddi_dma_mem_free(&ss->ss_bufacch);
1234 
1235 	if (ss->ss_bufdmah != NULL)
1236 		ddi_dma_free_handle(&ss->ss_bufdmah);
1237 
1238 	if (ss->ss_ksp != NULL)
1239 		kstat_delete(ss->ss_ksp);
1240 
1241 	if (ss->ss_acch != NULL)
1242 		ddi_regs_map_free(&ss->ss_acch);
1243 
1244 	if (ss->ss_num != -1)
1245 		mutex_destroy(&ss->ss_lock);
1246 }
1247 
1248 void
1249 sdhost_get_response(sdslot_t *ss, sda_cmd_t *cmdp)
1250 {
1251 	uint32_t	*resp = cmdp->sc_response;
1252 	int		i;
1253 
1254 	resp[0] = GET32(ss, REG_RESP1);
1255 	resp[1] = GET32(ss, REG_RESP2);
1256 	resp[2] = GET32(ss, REG_RESP3);
1257 	resp[3] = GET32(ss, REG_RESP4);
1258 
1259 	/*
1260 	 * Response 2 is goofy because the host drops the low
1261 	 * order CRC bits.  This makes it a bit awkward, so we
1262 	 * have to shift the bits to make it work out right.
1263 	 *
1264 	 * Note that the framework expects the 32 bit
1265 	 * words to be ordered in LE fashion.  (The
1266 	 * bits within the words are in native order).
1267 	 */
1268 	if (cmdp->sc_rtype == R2) {
1269 		for (i = 3; i > 0; i--) {
1270 			resp[i] <<= 8;
1271 			resp[i] |= (resp[i - 1] >> 24);
1272 		}
1273 		resp[0] <<= 8;
1274 	}
1275 }
1276 
1277 sda_err_t
1278 sdhost_wait_cmd(sdslot_t *ss, sda_cmd_t *cmdp)
1279 {
1280 	int		i;
1281 	uint16_t	errs;
1282 	sda_err_t	rv;
1283 
1284 	/*
1285 	 * Worst case for 100kHz timeout is 2msec (200 clocks), we add
1286 	 * a tiny bit for safety.  (Generally timeout will be far, far
1287 	 * less than that.)
1288 	 *
1289 	 * Note that at more typical 12MHz (and normally it will be
1290 	 * even faster than that!) that the device timeout is only
1291 	 * 16.67 usec.  We could be smarter and reduce the delay time,
1292 	 * but that would require putting more intelligence into the
1293 	 * code, and we don't expect CMD timeout to normally occur
1294 	 * except during initialization.  (At which time we need the
1295 	 * full timeout anyway.)
1296 	 *
1297 	 * Checking the ERR_STAT will normally cause the timeout to
1298 	 * terminate to finish early if the device is healthy, anyway.
1299 	 */
1300 
1301 	for (i = 3000; i > 0; i -= 5) {
1302 		if (GET16(ss, REG_INT_STAT) & INT_CMD) {
1303 
1304 			PUT16(ss, REG_INT_STAT, INT_CMD);
1305 
1306 			/* command completed */
1307 			sdhost_get_response(ss, cmdp);
1308 			return (SDA_EOK);
1309 		}
1310 
1311 		if ((errs = (GET16(ss, REG_ERR_STAT) & ERR_CMD)) != 0) {
1312 			PUT16(ss, REG_ERR_STAT, errs);
1313 
1314 			/* command timeout isn't a host failure */
1315 			if ((errs & ERR_CMD_TMO) == ERR_CMD_TMO) {
1316 				rv = SDA_ETIME;
1317 			} else if ((errs & ERR_CMD_CRC) == ERR_CMD_CRC) {
1318 				rv = SDA_ECRC7;
1319 			} else {
1320 				rv = SDA_EPROTO;
1321 			}
1322 			goto error;
1323 		}
1324 
1325 		drv_usecwait(5);
1326 	}
1327 
1328 	rv = SDA_ETIME;
1329 
1330 error:
1331 	/*
1332 	 * NB: We need to soft reset the CMD and DAT
1333 	 * lines after a failure of this sort.
1334 	 */
1335 	(void) sdhost_soft_reset(ss, SOFT_RESET_CMD);
1336 	(void) sdhost_soft_reset(ss, SOFT_RESET_DAT);
1337 
1338 	return (rv);
1339 }
1340 
1341 sda_err_t
1342 sdhost_poll(void *arg)
1343 {
1344 	sdslot_t	*ss = arg;
1345 
1346 	(void) sdhost_slot_intr(ss);
1347 	return (SDA_EOK);
1348 }
1349 
1350 sda_err_t
1351 sdhost_cmd(void *arg, sda_cmd_t *cmdp)
1352 {
1353 	sdslot_t	*ss = arg;
1354 	uint16_t	command;
1355 	uint16_t	mode;
1356 	sda_err_t	rv;
1357 
1358 	/*
1359 	 * Command register:
1360 	 * bit 13-8	= command index
1361 	 * bit 7-6	= command type (always zero for us!)
1362 	 * bit 5	= data present select
1363 	 * bit 4	= command index check (always on!)
1364 	 * bit 3	= command CRC check enable
1365 	 * bit 2	= reserved
1366 	 * bit 1-0	= response type
1367 	 */
1368 
1369 	command = ((uint16_t)cmdp->sc_index << 8);
1370 	command |= COMMAND_TYPE_NORM |
1371 	    COMMAND_INDEX_CHECK_EN | COMMAND_CRC_CHECK_EN;
1372 
1373 	switch (cmdp->sc_rtype) {
1374 	case R0:
1375 		command |= COMMAND_RESP_NONE;
1376 		break;
1377 	case R1:
1378 	case R5:
1379 	case R6:
1380 	case R7:
1381 		command |= COMMAND_RESP_48;
1382 		break;
1383 	case R1b:
1384 	case R5b:
1385 		command |= COMMAND_RESP_48_BUSY;
1386 		break;
1387 	case R2:
1388 		command |= COMMAND_RESP_136;
1389 		command &= ~(COMMAND_INDEX_CHECK_EN | COMMAND_CRC_CHECK_EN);
1390 		break;
1391 	case R3:
1392 	case R4:
1393 		command |= COMMAND_RESP_48;
1394 		command &= ~COMMAND_CRC_CHECK_EN;
1395 		command &= ~COMMAND_INDEX_CHECK_EN;
1396 		break;
1397 	default:
1398 		return (SDA_EINVAL);
1399 	}
1400 
1401 	mutex_enter(&ss->ss_lock);
1402 	if (ss->ss_suspended) {
1403 		mutex_exit(&ss->ss_lock);
1404 		return (SDA_ESUSPENDED);
1405 	}
1406 
1407 	if (cmdp->sc_nblks != 0) {
1408 		uint16_t	blksz;
1409 		uint16_t	nblks;
1410 
1411 		blksz = cmdp->sc_blksz;
1412 		nblks = cmdp->sc_nblks;
1413 
1414 		/*
1415 		 * Ensure that we have good data.
1416 		 */
1417 		if ((blksz < 1) || (blksz > 2048)) {
1418 			mutex_exit(&ss->ss_lock);
1419 			return (SDA_EINVAL);
1420 		}
1421 		command |= COMMAND_DATA_PRESENT;
1422 
1423 		ss->ss_blksz = blksz;
1424 
1425 		ss->ss_kvaddr = (void *)cmdp->sc_kvaddr;
1426 		ss->ss_rcnt = 0;
1427 		ss->ss_resid = 0;
1428 
1429 		/*
1430 		 * Only SDMA for now.  We can investigate ADMA2 later.
1431 		 * (Right now we don't have ADMA2 capable hardware.)
1432 		 * We always use a bounce buffer, which solves weird
1433 		 * problems with certain controllers.  Doing this with
1434 		 * a large contiguous buffer may be faster than
1435 		 * servicing all the little per-page interrupts
1436 		 * anyway. (Bcopy of 64 K vs. 16 interrupts.)
1437 		 */
1438 		if (((ss->ss_capab & CAPAB_SDMA) != 0) &&
1439 		    ((ss->ss_flags & SDFLAG_FORCE_PIO) == 0) &&
1440 		    ((blksz * nblks) <= SDHOST_BOUNCESZ)) {
1441 
1442 			if (cmdp->sc_flags & SDA_CMDF_WRITE) {
1443 				/*
1444 				 * if we're writing, prepare initial round
1445 				 * of data
1446 				 */
1447 				bcopy(cmdp->sc_kvaddr, ss->ss_bounce,
1448 				    nblks * blksz);
1449 				(void) ddi_dma_sync(ss->ss_bufdmah, 0, 0,
1450 				    DDI_DMA_SYNC_FORDEV);
1451 			} else {
1452 				ss->ss_rcnt = nblks * blksz;
1453 			}
1454 			PUT32(ss, REG_SDMA_ADDR, ss->ss_bufdmac.dmac_address);
1455 			mode = XFR_MODE_DMA_EN;
1456 			PUT16(ss, REG_BLKSZ, BLKSZ_BOUNDARY_512K | blksz);
1457 			ss->ss_ndma++;
1458 
1459 		} else {
1460 			mode = 0;
1461 			ss->ss_npio++;
1462 			ss->ss_resid = nblks;
1463 			PUT16(ss, REG_BLKSZ, blksz);
1464 		}
1465 
1466 		if (nblks > 1) {
1467 			mode |= XFR_MODE_MULTI | XFR_MODE_COUNT;
1468 			if (cmdp->sc_flags & SDA_CMDF_AUTO_CMD12)
1469 				mode |= XFR_MODE_AUTO_CMD12;
1470 			ss->ss_nmulti++;
1471 		}
1472 		if ((cmdp->sc_flags & SDA_CMDF_READ) != 0) {
1473 			mode |= XFR_MODE_READ;
1474 			ss->ss_ixfr++;
1475 			ss->ss_ibytes += nblks * blksz;
1476 		} else {
1477 			ss->ss_oxfr++;
1478 			ss->ss_obytes += nblks * blksz;
1479 		}
1480 
1481 		ss->ss_mode = mode;
1482 
1483 		PUT8(ss, REG_TIMEOUT_CONTROL, ss->ss_tmoutclk);
1484 		PUT16(ss, REG_BLOCK_COUNT, nblks);
1485 		PUT16(ss, REG_XFR_MODE, mode);
1486 	}
1487 
1488 	PUT32(ss, REG_ARGUMENT, cmdp->sc_argument);
1489 	PUT16(ss, REG_COMMAND, command);
1490 
1491 	ss->ss_ncmd++;
1492 	rv = sdhost_wait_cmd(ss, cmdp);
1493 
1494 	mutex_exit(&ss->ss_lock);
1495 
1496 	return (rv);
1497 }
1498 
1499 sda_err_t
1500 sdhost_getprop(void *arg, sda_prop_t prop, uint32_t *val)
1501 {
1502 	sdslot_t	*ss = arg;
1503 	sda_err_t	rv = 0;
1504 
1505 	mutex_enter(&ss->ss_lock);
1506 
1507 	if (ss->ss_suspended) {
1508 		mutex_exit(&ss->ss_lock);
1509 		return (SDA_ESUSPENDED);
1510 	}
1511 	switch (prop) {
1512 	case SDA_PROP_INSERTED:
1513 		if (CHECK_STATE(ss, CARD_INSERTED)) {
1514 			*val = B_TRUE;
1515 		} else {
1516 			*val = B_FALSE;
1517 		}
1518 		break;
1519 
1520 	case SDA_PROP_WPROTECT:
1521 		if (CHECK_STATE(ss, WRITE_ENABLE)) {
1522 			*val = B_FALSE;
1523 		} else {
1524 			*val = B_TRUE;
1525 		}
1526 		break;
1527 
1528 	case SDA_PROP_OCR:
1529 		*val = ss->ss_ocr;
1530 		break;
1531 
1532 	case SDA_PROP_CLOCK:
1533 		*val = ss->ss_cardclk;
1534 		break;
1535 
1536 	case SDA_PROP_CAP_HISPEED:
1537 		if ((ss->ss_capab & CAPAB_HIGH_SPEED) != 0) {
1538 			*val = B_TRUE;
1539 		} else {
1540 			*val = B_FALSE;
1541 		}
1542 		break;
1543 
1544 	case SDA_PROP_CAP_4BITS:
1545 		*val = B_TRUE;
1546 		break;
1547 
1548 	case SDA_PROP_CAP_NOPIO:
1549 		/*
1550 		 * We might have to use PIO for buffers that don't
1551 		 * have reasonable alignments.  A few controllers seem
1552 		 * not to deal with granularity or alignments of
1553 		 * something other 32-bits.
1554 		 */
1555 		*val = B_FALSE;
1556 		break;
1557 
1558 	case SDA_PROP_CAP_INTR:
1559 	case SDA_PROP_CAP_8BITS:
1560 		*val = B_FALSE;
1561 		break;
1562 
1563 	default:
1564 		rv = SDA_ENOTSUP;
1565 		break;
1566 	}
1567 	mutex_exit(&ss->ss_lock);
1568 
1569 	return (rv);
1570 }
1571 
1572 sda_err_t
1573 sdhost_setprop(void *arg, sda_prop_t prop, uint32_t val)
1574 {
1575 	sdslot_t	*ss = arg;
1576 	sda_err_t	rv = SDA_EOK;
1577 
1578 	mutex_enter(&ss->ss_lock);
1579 
1580 	if (ss->ss_suspended) {
1581 		mutex_exit(&ss->ss_lock);
1582 		return (SDA_ESUSPENDED);
1583 	}
1584 
1585 	switch (prop) {
1586 	case SDA_PROP_LED:
1587 		if (val) {
1588 			SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_LED_ON);
1589 		} else {
1590 			CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_LED_ON);
1591 		}
1592 		break;
1593 
1594 	case SDA_PROP_CLOCK:
1595 		rv = sdhost_set_clock(arg, val);
1596 		break;
1597 
1598 	case SDA_PROP_BUSWIDTH:
1599 		switch (val) {
1600 		case 1:
1601 			ss->ss_width = val;
1602 			CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_DATA_WIDTH);
1603 			break;
1604 		case 4:
1605 			ss->ss_width = val;
1606 			SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_DATA_WIDTH);
1607 			break;
1608 		default:
1609 			rv = SDA_EINVAL;
1610 		}
1611 		break;
1612 
1613 	case SDA_PROP_OCR:
1614 		val &= ss->ss_ocr;
1615 
1616 		if (val & OCR_17_18V) {
1617 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_18V);
1618 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_18V |
1619 			    POWER_CONTROL_BUS_POWER);
1620 		} else if (val & OCR_29_30V) {
1621 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_30V);
1622 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_30V |
1623 			    POWER_CONTROL_BUS_POWER);
1624 		} else if (val & OCR_32_33V) {
1625 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_33V);
1626 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_33V |
1627 			    POWER_CONTROL_BUS_POWER);
1628 		} else if (val == 0) {
1629 			/* turn off power */
1630 			PUT8(ss, REG_POWER_CONTROL, 0);
1631 		} else {
1632 			rv = SDA_EINVAL;
1633 		}
1634 		break;
1635 
1636 	case SDA_PROP_HISPEED:
1637 		if (val) {
1638 			SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
1639 		} else {
1640 			CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
1641 		}
1642 		/* give clocks time to settle */
1643 		drv_usecwait(10);
1644 		break;
1645 
1646 	default:
1647 		rv = SDA_ENOTSUP;
1648 		break;
1649 	}
1650 
1651 	/*
1652 	 * Apparently some controllers (ENE) have issues with changing
1653 	 * certain parameters (bus width seems to be one), requiring
1654 	 * a reset of the DAT and CMD lines.
1655 	 */
1656 	if (rv == SDA_EOK) {
1657 		(void) sdhost_soft_reset(ss, SOFT_RESET_CMD);
1658 		(void) sdhost_soft_reset(ss, SOFT_RESET_DAT);
1659 	}
1660 	mutex_exit(&ss->ss_lock);
1661 	return (rv);
1662 }
1663 
1664 sda_err_t
1665 sdhost_reset(void *arg)
1666 {
1667 	sdslot_t	*ss = arg;
1668 
1669 	mutex_enter(&ss->ss_lock);
1670 	if (!ss->ss_suspended) {
1671 		if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK) {
1672 			mutex_exit(&ss->ss_lock);
1673 			return (SDA_ETIME);
1674 		}
1675 		sdhost_enable_interrupts(ss);
1676 	}
1677 	mutex_exit(&ss->ss_lock);
1678 	return (SDA_EOK);
1679 }
1680 
1681 sda_err_t
1682 sdhost_halt(void *arg)
1683 {
1684 	sdslot_t	*ss = arg;
1685 
1686 	mutex_enter(&ss->ss_lock);
1687 	if (!ss->ss_suspended) {
1688 		sdhost_disable_interrupts(ss);
1689 		/* this has the side effect of removing power from the card */
1690 		if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK) {
1691 			mutex_exit(&ss->ss_lock);
1692 			return (SDA_ETIME);
1693 		}
1694 	}
1695 	mutex_exit(&ss->ss_lock);
1696 	return (SDA_EOK);
1697 }
1698