xref: /illumos-gate/usr/src/uts/common/os/devid_cache.c (revision 27dd1e87cd3d939264769dd4af7e6a529cde001f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/note.h>
26 #include <sys/t_lock.h>
27 #include <sys/cmn_err.h>
28 #include <sys/instance.h>
29 #include <sys/conf.h>
30 #include <sys/stat.h>
31 #include <sys/ddi.h>
32 #include <sys/hwconf.h>
33 #include <sys/sunddi.h>
34 #include <sys/sunndi.h>
35 #include <sys/ddi_impldefs.h>
36 #include <sys/ndi_impldefs.h>
37 #include <sys/kobj.h>
38 #include <sys/devcache.h>
39 #include <sys/devid_cache.h>
40 #include <sys/sysmacros.h>
41 
42 /*
43  * Discovery refers to the heroic effort made to discover a device which
44  * cannot be accessed at the physical path where it once resided.  Discovery
45  * involves walking the entire device tree attaching all possible disk
46  * instances, to search for the device referenced by a devid.  Obviously,
47  * full device discovery is something to be avoided where possible.
48  * Note that simply invoking devfsadm(1M) is equivalent to running full
49  * discovery at the devid cache level.
50  *
51  * Reasons why a disk may not be accessible:
52  *	disk powered off
53  *	disk removed or cable disconnected
54  *	disk or adapter broken
55  *
56  * Note that discovery is not needed and cannot succeed in any of these
57  * cases.
58  *
59  * When discovery may succeed:
60  *	Discovery will result in success when a device has been moved
61  *	to a different address.  Note that it's recommended that
62  *	devfsadm(1M) be invoked (no arguments required) whenever a system's
63  *	h/w configuration has been updated.  Alternatively, a
64  *	reconfiguration boot can be used to accomplish the same result.
65  *
66  * Note that discovery is not necessary to be able to correct an access
67  * failure for a device which was powered off.  Assuming the cache has an
68  * entry for such a device, simply powering it on should permit the system
69  * to access it.  If problems persist after powering it on, invoke
70  * devfsadm(1M).
71  *
72  * Discovery prior to mounting root is only of interest when booting
73  * from a filesystem which accesses devices by device id, which of
74  * not all do.
75  *
76  * Tunables
77  *
78  * devid_discovery_boot (default 1)
79  *	Number of times discovery will be attempted prior to mounting root.
80  *	Must be done at least once to recover from corrupted or missing
81  *	devid cache backing store.  Probably there's no reason to ever
82  * 	set this to greater than one as a missing device will remain
83  *	unavailable no matter how often the system searches for it.
84  *
85  * devid_discovery_postboot (default 1)
86  *	Number of times discovery will be attempted after mounting root.
87  *	This must be performed at least once to discover any devices
88  *	needed after root is mounted which may have been powered
89  *	off and moved before booting.
90  *	Setting this to a larger positive number will introduce
91  *	some inconsistency in system operation.  Searching for a device
92  *	will take an indeterminate amount of time, sometimes slower,
93  *	sometimes faster.  In addition, the system will sometimes
94  *	discover a newly powered on device, sometimes it won't.
95  *	Use of this option is not therefore recommended.
96  *
97  * devid_discovery_postboot_always (default 0)
98  *	Set to 1, the system will always attempt full discovery.
99  *
100  * devid_discovery_secs (default 0)
101  *	Set to a positive value, the system will attempt full discovery
102  *	but with a minimum delay between attempts.  A device search
103  *	within the period of time specified will result in failure.
104  *
105  * devid_cache_read_disable (default 0)
106  *	Set to 1 to disable reading /etc/devices/devid_cache.
107  *	Devid cache will continue to operate normally but
108  *	at least one discovery attempt will be required.
109  *
110  * devid_cache_write_disable (default 0)
111  *	Set to 1 to disable updates to /etc/devices/devid_cache.
112  *	Any updates to the devid cache will not be preserved across a reboot.
113  *
114  * devid_report_error (default 0)
115  *	Set to 1 to enable some error messages related to devid
116  *	cache failures.
117  *
118  * The devid is packed in the cache file as a byte array.  For
119  * portability, this could be done in the encoded string format.
120  */
121 
122 
123 int devid_discovery_boot = 1;
124 int devid_discovery_postboot = 1;
125 int devid_discovery_postboot_always = 0;
126 int devid_discovery_secs = 0;
127 
128 int devid_cache_read_disable = 0;
129 int devid_cache_write_disable = 0;
130 
131 int devid_report_error = 0;
132 
133 
134 /*
135  * State to manage discovery of devices providing a devid
136  */
137 static int		devid_discovery_busy = 0;
138 static kmutex_t		devid_discovery_mutex;
139 static kcondvar_t	devid_discovery_cv;
140 static clock_t		devid_last_discovery = 0;
141 
142 
143 #ifdef	DEBUG
144 int nvp_devid_debug = 0;
145 int devid_debug = 0;
146 int devid_log_registers = 0;
147 int devid_log_finds = 0;
148 int devid_log_lookups = 0;
149 int devid_log_discovery = 0;
150 int devid_log_matches = 0;
151 int devid_log_paths = 0;
152 int devid_log_failures = 0;
153 int devid_log_hold = 0;
154 int devid_log_unregisters = 0;
155 int devid_log_removes = 0;
156 int devid_register_debug = 0;
157 int devid_log_stale = 0;
158 int devid_log_detaches = 0;
159 #endif	/* DEBUG */
160 
161 /*
162  * devid cache file registration for cache reads and updates
163  */
164 static nvf_ops_t devid_cache_ops = {
165 	"/etc/devices/devid_cache",		/* path to cache */
166 	devid_cache_unpack_nvlist,		/* read: nvlist to nvp */
167 	devid_cache_pack_list,			/* write: nvp to nvlist */
168 	devid_list_free,			/* free data list */
169 	NULL					/* write complete callback */
170 };
171 
172 /*
173  * handle to registered devid cache handlers
174  */
175 nvf_handle_t	dcfd_handle;
176 
177 
178 /*
179  * Initialize devid cache file management
180  */
181 void
182 devid_cache_init(void)
183 {
184 	dcfd_handle = nvf_register_file(&devid_cache_ops);
185 	ASSERT(dcfd_handle);
186 
187 	list_create(nvf_list(dcfd_handle), sizeof (nvp_devid_t),
188 	    offsetof(nvp_devid_t, nvp_link));
189 
190 	mutex_init(&devid_discovery_mutex, NULL, MUTEX_DEFAULT, NULL);
191 	cv_init(&devid_discovery_cv, NULL, CV_DRIVER, NULL);
192 }
193 
194 /*
195  * Read and initialize the devid cache from the persistent store
196  */
197 void
198 devid_cache_read(void)
199 {
200 	if (!devid_cache_read_disable) {
201 		rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
202 		ASSERT(list_head(nvf_list(dcfd_handle)) == NULL);
203 		(void) nvf_read_file(dcfd_handle);
204 		rw_exit(nvf_lock(dcfd_handle));
205 	}
206 }
207 
208 static void
209 devid_nvp_free(nvp_devid_t *dp)
210 {
211 	if (dp->nvp_devpath)
212 		kmem_free(dp->nvp_devpath, strlen(dp->nvp_devpath)+1);
213 	if (dp->nvp_devid)
214 		kmem_free(dp->nvp_devid, ddi_devid_sizeof(dp->nvp_devid));
215 
216 	kmem_free(dp, sizeof (nvp_devid_t));
217 }
218 
219 static void
220 devid_list_free(nvf_handle_t fd)
221 {
222 	list_t		*listp;
223 	nvp_devid_t	*np;
224 
225 	ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
226 
227 	listp = nvf_list(fd);
228 	while (np = list_head(listp)) {
229 		list_remove(listp, np);
230 		devid_nvp_free(np);
231 	}
232 }
233 
234 /*
235  * Free an nvp element in a list
236  */
237 static void
238 devid_nvp_unlink_and_free(nvf_handle_t fd, nvp_devid_t *np)
239 {
240 	list_remove(nvf_list(fd), np);
241 	devid_nvp_free(np);
242 }
243 
244 /*
245  * Unpack a device path/nvlist pair to the list of devid cache elements.
246  * Used to parse the nvlist format when reading
247  * /etc/devices/devid_cache
248  */
249 static int
250 devid_cache_unpack_nvlist(nvf_handle_t fd, nvlist_t *nvl, char *name)
251 {
252 	nvp_devid_t *np;
253 	ddi_devid_t devidp;
254 	int rval;
255 	uint_t n;
256 
257 	NVP_DEVID_DEBUG_PATH((name));
258 	ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
259 
260 	/*
261 	 * check path for a devid
262 	 */
263 	rval = nvlist_lookup_byte_array(nvl,
264 	    DP_DEVID_ID, (uchar_t **)&devidp, &n);
265 	if (rval == 0) {
266 		if (ddi_devid_valid(devidp) == DDI_SUCCESS) {
267 			ASSERT(n == ddi_devid_sizeof(devidp));
268 			np = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
269 			np->nvp_devpath = i_ddi_strdup(name, KM_SLEEP);
270 			np->nvp_devid = kmem_alloc(n, KM_SLEEP);
271 			(void) bcopy(devidp, np->nvp_devid, n);
272 			list_insert_tail(nvf_list(fd), np);
273 			NVP_DEVID_DEBUG_DEVID((np->nvp_devid));
274 		} else {
275 			DEVIDERR((CE_CONT,
276 			    "%s: invalid devid\n", name));
277 		}
278 	} else {
279 		DEVIDERR((CE_CONT,
280 		    "%s: devid not available\n", name));
281 	}
282 
283 	return (0);
284 }
285 
286 /*
287  * Pack the list of devid cache elements into a single nvlist
288  * Used when writing the nvlist file.
289  */
290 static int
291 devid_cache_pack_list(nvf_handle_t fd, nvlist_t **ret_nvl)
292 {
293 	nvlist_t	*nvl, *sub_nvl;
294 	nvp_devid_t	*np;
295 	int		rval;
296 	list_t		*listp;
297 
298 	ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
299 
300 	rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP);
301 	if (rval != 0) {
302 		nvf_error("%s: nvlist alloc error %d\n",
303 		    nvf_cache_name(fd), rval);
304 		return (DDI_FAILURE);
305 	}
306 
307 	listp = nvf_list(fd);
308 	for (np = list_head(listp); np; np = list_next(listp, np)) {
309 		if (np->nvp_devid == NULL)
310 			continue;
311 		NVP_DEVID_DEBUG_PATH(np->nvp_devpath);
312 		rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP);
313 		if (rval != 0) {
314 			nvf_error("%s: nvlist alloc error %d\n",
315 			    nvf_cache_name(fd), rval);
316 			sub_nvl = NULL;
317 			goto err;
318 		}
319 
320 		rval = nvlist_add_byte_array(sub_nvl, DP_DEVID_ID,
321 		    (uchar_t *)np->nvp_devid,
322 		    ddi_devid_sizeof(np->nvp_devid));
323 		if (rval == 0) {
324 			NVP_DEVID_DEBUG_DEVID(np->nvp_devid);
325 		} else {
326 			nvf_error(
327 			    "%s: nvlist add error %d (devid)\n",
328 			    nvf_cache_name(fd), rval);
329 			goto err;
330 		}
331 
332 		rval = nvlist_add_nvlist(nvl, np->nvp_devpath, sub_nvl);
333 		if (rval != 0) {
334 			nvf_error("%s: nvlist add error %d (sublist)\n",
335 			    nvf_cache_name(fd), rval);
336 			goto err;
337 		}
338 		nvlist_free(sub_nvl);
339 	}
340 
341 	*ret_nvl = nvl;
342 	return (DDI_SUCCESS);
343 
344 err:
345 	if (sub_nvl)
346 		nvlist_free(sub_nvl);
347 	nvlist_free(nvl);
348 	*ret_nvl = NULL;
349 	return (DDI_FAILURE);
350 }
351 
352 static int
353 e_devid_do_discovery(void)
354 {
355 	ASSERT(mutex_owned(&devid_discovery_mutex));
356 
357 	if (i_ddi_io_initialized() == 0) {
358 		if (devid_discovery_boot > 0) {
359 			devid_discovery_boot--;
360 			return (1);
361 		}
362 	} else {
363 		if (devid_discovery_postboot_always > 0)
364 			return (1);
365 		if (devid_discovery_postboot > 0) {
366 			devid_discovery_postboot--;
367 			return (1);
368 		}
369 		if (devid_discovery_secs > 0) {
370 			if ((ddi_get_lbolt() - devid_last_discovery) >
371 			    drv_usectohz(devid_discovery_secs * MICROSEC)) {
372 				return (1);
373 			}
374 		}
375 	}
376 
377 	DEVID_LOG_DISC((CE_CONT, "devid_discovery: no discovery\n"));
378 	return (0);
379 }
380 
381 static void
382 e_ddi_devid_hold_by_major(major_t major)
383 {
384 	DEVID_LOG_DISC((CE_CONT,
385 	    "devid_discovery: ddi_hold_installed_driver %d\n", major));
386 
387 	if (ddi_hold_installed_driver(major) == NULL)
388 		return;
389 
390 	ddi_rele_driver(major);
391 }
392 
393 static char *e_ddi_devid_hold_driver_list[] = { "sd", "ssd", "dad" };
394 
395 #define	N_DRIVERS_TO_HOLD	\
396 	(sizeof (e_ddi_devid_hold_driver_list) / sizeof (char *))
397 
398 
399 static void
400 e_ddi_devid_hold_installed_driver(ddi_devid_t devid)
401 {
402 	impl_devid_t	*id = (impl_devid_t *)devid;
403 	major_t		major, hint_major;
404 	char		hint[DEVID_HINT_SIZE + 1];
405 	char		**drvp;
406 	int		i;
407 
408 	/* Count non-null bytes */
409 	for (i = 0; i < DEVID_HINT_SIZE; i++)
410 		if (id->did_driver[i] == '\0')
411 			break;
412 
413 	/* Make a copy of the driver hint */
414 	bcopy(id->did_driver, hint, i);
415 	hint[i] = '\0';
416 
417 	/* search for the devid using the hint driver */
418 	hint_major = ddi_name_to_major(hint);
419 	if (hint_major != DDI_MAJOR_T_NONE) {
420 		e_ddi_devid_hold_by_major(hint_major);
421 	}
422 
423 	drvp = e_ddi_devid_hold_driver_list;
424 	for (i = 0; i < N_DRIVERS_TO_HOLD; i++, drvp++) {
425 		major = ddi_name_to_major(*drvp);
426 		if (major != DDI_MAJOR_T_NONE && major != hint_major) {
427 			e_ddi_devid_hold_by_major(major);
428 		}
429 	}
430 }
431 
432 
433 /*
434  * Return success if discovery was attempted, to indicate
435  * that the desired device may now be available.
436  */
437 int
438 e_ddi_devid_discovery(ddi_devid_t devid)
439 {
440 	int flags;
441 	int rval = DDI_SUCCESS;
442 
443 	mutex_enter(&devid_discovery_mutex);
444 
445 	if (devid_discovery_busy) {
446 		DEVID_LOG_DISC((CE_CONT, "devid_discovery: busy\n"));
447 		while (devid_discovery_busy) {
448 			cv_wait(&devid_discovery_cv, &devid_discovery_mutex);
449 		}
450 	} else if (e_devid_do_discovery()) {
451 		devid_discovery_busy = 1;
452 		mutex_exit(&devid_discovery_mutex);
453 
454 		if (i_ddi_io_initialized() == 0) {
455 			e_ddi_devid_hold_installed_driver(devid);
456 		} else {
457 			DEVID_LOG_DISC((CE_CONT,
458 			    "devid_discovery: ndi_devi_config\n"));
459 			flags = NDI_DEVI_PERSIST | NDI_CONFIG | NDI_NO_EVENT;
460 			if (i_ddi_io_initialized())
461 				flags |= NDI_DRV_CONF_REPROBE;
462 			(void) ndi_devi_config(ddi_root_node(), flags);
463 		}
464 
465 		mutex_enter(&devid_discovery_mutex);
466 		devid_discovery_busy = 0;
467 		cv_broadcast(&devid_discovery_cv);
468 		if (devid_discovery_secs > 0)
469 			devid_last_discovery = ddi_get_lbolt();
470 		DEVID_LOG_DISC((CE_CONT, "devid_discovery: done\n"));
471 	} else {
472 		rval = DDI_FAILURE;
473 		DEVID_LOG_DISC((CE_CONT, "no devid discovery\n"));
474 	}
475 
476 	mutex_exit(&devid_discovery_mutex);
477 
478 	return (rval);
479 }
480 
481 /*
482  * As part of registering a devid for a device,
483  * update the devid cache with this device/devid pair
484  * or note that this combination has registered.
485  */
486 int
487 e_devid_cache_register(dev_info_t *dip, ddi_devid_t devid)
488 {
489 	nvp_devid_t *np;
490 	nvp_devid_t *new_nvp;
491 	ddi_devid_t new_devid;
492 	int new_devid_size;
493 	char *path, *fullpath;
494 	ddi_devid_t free_devid = NULL;
495 	int pathlen;
496 	list_t *listp;
497 	int is_dirty = 0;
498 
499 	/*
500 	 * We are willing to accept DS_BOUND nodes if we can form a full
501 	 * ddi_pathname (i.e. the node is part way to becomming
502 	 * DS_INITIALIZED and devi_addr/ddi_get_name_addr are non-NULL).
503 	 */
504 	if (ddi_get_name_addr(dip) == NULL) {
505 		return (DDI_FAILURE);
506 	}
507 
508 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
509 
510 	fullpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
511 	(void) ddi_pathname(dip, fullpath);
512 	pathlen = strlen(fullpath) + 1;
513 	path = kmem_alloc(pathlen, KM_SLEEP);
514 	bcopy(fullpath, path, pathlen);
515 	kmem_free(fullpath, MAXPATHLEN);
516 
517 	DEVID_LOG_REG(("register", devid, path));
518 
519 	new_nvp = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
520 	new_devid_size = ddi_devid_sizeof(devid);
521 	new_devid = kmem_alloc(new_devid_size, KM_SLEEP);
522 	(void) bcopy(devid, new_devid, new_devid_size);
523 
524 	rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
525 
526 	listp = nvf_list(dcfd_handle);
527 	for (np = list_head(listp); np; np = list_next(listp, np)) {
528 		if (strcmp(path, np->nvp_devpath) == 0) {
529 			DEVID_DEBUG2((CE_CONT,
530 			    "register: %s path match\n", path));
531 			if (np->nvp_devid == NULL) {
532 replace:			np->nvp_devid = new_devid;
533 				np->nvp_flags |=
534 				    NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
535 				np->nvp_dip = dip;
536 				if (!devid_cache_write_disable) {
537 					nvf_mark_dirty(dcfd_handle);
538 					is_dirty = 1;
539 				}
540 				rw_exit(nvf_lock(dcfd_handle));
541 				kmem_free(new_nvp, sizeof (nvp_devid_t));
542 				kmem_free(path, pathlen);
543 				goto exit;
544 			}
545 			if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
546 				/* replace invalid devid */
547 				free_devid = np->nvp_devid;
548 				goto replace;
549 			}
550 			/*
551 			 * We're registering an already-cached path
552 			 * Does the device's devid match the cache?
553 			 */
554 			if (ddi_devid_compare(devid, np->nvp_devid) != 0) {
555 				DEVID_DEBUG((CE_CONT, "devid register: "
556 				    "devid %s does not match\n", path));
557 				/*
558 				 * Replace cached devid for this path
559 				 * with newly registered devid.  A devid
560 				 * may map to multiple paths but one path
561 				 * should only map to one devid.
562 				 */
563 				devid_nvp_unlink_and_free(dcfd_handle, np);
564 				np = NULL;
565 				break;
566 			} else {
567 				DEVID_DEBUG2((CE_CONT,
568 				    "devid register: %s devid match\n", path));
569 				np->nvp_flags |=
570 				    NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
571 				np->nvp_dip = dip;
572 				rw_exit(nvf_lock(dcfd_handle));
573 				kmem_free(new_nvp, sizeof (nvp_devid_t));
574 				kmem_free(path, pathlen);
575 				kmem_free(new_devid, new_devid_size);
576 				return (DDI_SUCCESS);
577 			}
578 		}
579 	}
580 
581 	/*
582 	 * Add newly registered devid to the cache
583 	 */
584 	ASSERT(np == NULL);
585 
586 	new_nvp->nvp_devpath = path;
587 	new_nvp->nvp_flags = NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
588 	new_nvp->nvp_dip = dip;
589 	new_nvp->nvp_devid = new_devid;
590 
591 	if (!devid_cache_write_disable) {
592 		is_dirty = 1;
593 		nvf_mark_dirty(dcfd_handle);
594 	}
595 	list_insert_tail(nvf_list(dcfd_handle), new_nvp);
596 
597 	rw_exit(nvf_lock(dcfd_handle));
598 
599 exit:
600 	if (free_devid)
601 		kmem_free(free_devid, ddi_devid_sizeof(free_devid));
602 
603 	if (is_dirty)
604 		nvf_wake_daemon();
605 
606 	return (DDI_SUCCESS);
607 }
608 
609 /*
610  * Unregister a device's devid
611  * Called as an instance detachs
612  * Invalidate the devid's devinfo reference
613  * Devid-path remains in the cache
614  */
615 void
616 e_devid_cache_unregister(dev_info_t *dip)
617 {
618 	nvp_devid_t *np;
619 	list_t *listp;
620 
621 	rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
622 
623 	listp = nvf_list(dcfd_handle);
624 	for (np = list_head(listp); np; np = list_next(listp, np)) {
625 		if (np->nvp_devid == NULL)
626 			continue;
627 		if ((np->nvp_flags & NVP_DEVID_DIP) && np->nvp_dip == dip) {
628 			DEVID_LOG_UNREG((CE_CONT,
629 			    "unregister: %s\n", np->nvp_devpath));
630 			np->nvp_flags &= ~NVP_DEVID_DIP;
631 			np->nvp_dip = NULL;
632 			break;
633 		}
634 	}
635 
636 	rw_exit(nvf_lock(dcfd_handle));
637 }
638 
639 /*
640  * Purge devid cache of stale devids
641  */
642 void
643 devid_cache_cleanup(void)
644 {
645 	nvp_devid_t *np, *next;
646 	list_t *listp;
647 	int is_dirty = 0;
648 
649 	rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
650 
651 	listp = nvf_list(dcfd_handle);
652 	for (np = list_head(listp); np; np = next) {
653 		next = list_next(listp, np);
654 		if (np->nvp_devid == NULL)
655 			continue;
656 		if ((np->nvp_flags & NVP_DEVID_REGISTERED) == 0) {
657 			DEVID_LOG_REMOVE((CE_CONT,
658 			    "cleanup: %s\n", np->nvp_devpath));
659 			if (!devid_cache_write_disable) {
660 				nvf_mark_dirty(dcfd_handle);
661 				is_dirty = 0;
662 			}
663 			devid_nvp_unlink_and_free(dcfd_handle, np);
664 		}
665 	}
666 
667 	rw_exit(nvf_lock(dcfd_handle));
668 
669 	if (is_dirty)
670 		nvf_wake_daemon();
671 }
672 
673 
674 /*
675  * Build a list of dev_t's for a device/devid
676  *
677  * The effect of this function is cumulative, adding dev_t's
678  * for the device to the list of all dev_t's for a given
679  * devid.
680  */
681 static void
682 e_devid_minor_to_devlist(
683 	dev_info_t	*dip,
684 	char		*minor_name,
685 	int		ndevts_alloced,
686 	int		*devtcntp,
687 	dev_t		*devtsp)
688 {
689 	int			circ;
690 	struct ddi_minor_data	*dmdp;
691 	int			minor_all = 0;
692 	int			ndevts = *devtcntp;
693 
694 	ASSERT(i_ddi_devi_attached(dip));
695 
696 	/* are we looking for a set of minor nodes? */
697 	if ((minor_name == DEVID_MINOR_NAME_ALL) ||
698 	    (minor_name == DEVID_MINOR_NAME_ALL_CHR) ||
699 	    (minor_name == DEVID_MINOR_NAME_ALL_BLK))
700 		minor_all = 1;
701 
702 	/* Find matching minor names */
703 	ndi_devi_enter(dip, &circ);
704 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
705 
706 		/* Skip non-minors, and non matching minor names */
707 		if ((dmdp->type != DDM_MINOR) || ((minor_all == 0) &&
708 		    strcmp(dmdp->ddm_name, minor_name)))
709 			continue;
710 
711 		/* filter out minor_all mismatches */
712 		if (minor_all &&
713 		    (((minor_name == DEVID_MINOR_NAME_ALL_CHR) &&
714 		    (dmdp->ddm_spec_type != S_IFCHR)) ||
715 		    ((minor_name == DEVID_MINOR_NAME_ALL_BLK) &&
716 		    (dmdp->ddm_spec_type != S_IFBLK))))
717 			continue;
718 
719 		if (ndevts < ndevts_alloced)
720 			devtsp[ndevts] = dmdp->ddm_dev;
721 		ndevts++;
722 	}
723 	ndi_devi_exit(dip, circ);
724 
725 	*devtcntp = ndevts;
726 }
727 
728 /*
729  * Search for cached entries matching a devid
730  * Return two lists:
731  *	a list of dev_info nodes, for those devices in the attached state
732  *	a list of pathnames whose instances registered the given devid
733  * If the lists passed in are not sufficient to return the matching
734  * references, return the size of lists required.
735  * The dev_info nodes are returned with a hold that the caller must release.
736  */
737 static int
738 e_devid_cache_devi_path_lists(ddi_devid_t devid, int retmax,
739 	int *retndevis, dev_info_t **retdevis, int *retnpaths, char **retpaths)
740 {
741 	nvp_devid_t *np;
742 	int ndevis, npaths;
743 	dev_info_t *dip, *pdip;
744 	int circ;
745 	int maxdevis = 0;
746 	int maxpaths = 0;
747 	list_t *listp;
748 
749 	ndevis = 0;
750 	npaths = 0;
751 	listp = nvf_list(dcfd_handle);
752 	for (np = list_head(listp); np; np = list_next(listp, np)) {
753 		if (np->nvp_devid == NULL)
754 			continue;
755 		if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
756 			DEVIDERR((CE_CONT,
757 			    "find: invalid devid %s\n",
758 			    np->nvp_devpath));
759 			continue;
760 		}
761 		if (ddi_devid_compare(devid, np->nvp_devid) == 0) {
762 			DEVID_DEBUG2((CE_CONT,
763 			    "find: devid match: %s 0x%x\n",
764 			    np->nvp_devpath, np->nvp_flags));
765 			DEVID_LOG_MATCH(("find", devid, np->nvp_devpath));
766 			DEVID_LOG_PATHS((CE_CONT, "%s\n", np->nvp_devpath));
767 
768 			/*
769 			 * Check if we have a cached devinfo reference for this
770 			 * devid.  Place a hold on it to prevent detach
771 			 * Otherwise, use the path instead.
772 			 * Note: returns with a hold on each dev_info
773 			 * node in the list.
774 			 */
775 			dip = NULL;
776 			if (np->nvp_flags & NVP_DEVID_DIP) {
777 				pdip = ddi_get_parent(np->nvp_dip);
778 				if (ndi_devi_tryenter(pdip, &circ)) {
779 					dip = np->nvp_dip;
780 					ndi_hold_devi(dip);
781 					ndi_devi_exit(pdip, circ);
782 					ASSERT(!DEVI_IS_ATTACHING(dip));
783 					ASSERT(!DEVI_IS_DETACHING(dip));
784 				} else {
785 					DEVID_LOG_DETACH((CE_CONT,
786 					    "may be detaching: %s\n",
787 					    np->nvp_devpath));
788 				}
789 			}
790 
791 			if (dip) {
792 				if (ndevis < retmax) {
793 					retdevis[ndevis++] = dip;
794 				} else {
795 					ndi_rele_devi(dip);
796 				}
797 				maxdevis++;
798 			} else {
799 				if (npaths < retmax)
800 					retpaths[npaths++] = np->nvp_devpath;
801 				maxpaths++;
802 			}
803 		}
804 	}
805 
806 	*retndevis = ndevis;
807 	*retnpaths = npaths;
808 	return (maxdevis > maxpaths ? maxdevis : maxpaths);
809 }
810 
811 
812 /*
813  * Search the devid cache, returning dev_t list for all
814  * device paths mapping to the device identified by the
815  * given devid.
816  *
817  * Primary interface used by ddi_lyr_devid_to_devlist()
818  */
819 int
820 e_devid_cache_to_devt_list(ddi_devid_t devid, char *minor_name,
821 	int *retndevts, dev_t **retdevts)
822 {
823 	char		*path, **paths;
824 	int		i, j, n;
825 	dev_t		*devts, *udevts;
826 	dev_t		tdevt;
827 	int		ndevts, undevts, ndevts_alloced;
828 	dev_info_t	*devi, **devis;
829 	int		ndevis, npaths, nalloced;
830 	ddi_devid_t	match_devid;
831 
832 	DEVID_LOG_FIND(("find", devid, NULL));
833 
834 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
835 	if (ddi_devid_valid(devid) != DDI_SUCCESS) {
836 		DEVID_LOG_ERR(("invalid devid", devid, NULL));
837 		return (DDI_FAILURE);
838 	}
839 
840 	nalloced = 128;
841 
842 	for (;;) {
843 		paths = kmem_zalloc(nalloced * sizeof (char *), KM_SLEEP);
844 		devis = kmem_zalloc(nalloced * sizeof (dev_info_t *), KM_SLEEP);
845 
846 		rw_enter(nvf_lock(dcfd_handle), RW_READER);
847 		n = e_devid_cache_devi_path_lists(devid, nalloced,
848 		    &ndevis, devis, &npaths, paths);
849 		if (n <= nalloced)
850 			break;
851 		rw_exit(nvf_lock(dcfd_handle));
852 		for (i = 0; i < ndevis; i++)
853 			ndi_rele_devi(devis[i]);
854 		kmem_free(paths, nalloced * sizeof (char *));
855 		kmem_free(devis, nalloced * sizeof (dev_info_t *));
856 		nalloced = n + 128;
857 	}
858 
859 	for (i = 0; i < npaths; i++) {
860 		path = i_ddi_strdup(paths[i], KM_SLEEP);
861 		paths[i] = path;
862 	}
863 	rw_exit(nvf_lock(dcfd_handle));
864 
865 	if (ndevis == 0 && npaths == 0) {
866 		DEVID_LOG_ERR(("no devid found", devid, NULL));
867 		kmem_free(paths, nalloced * sizeof (char *));
868 		kmem_free(devis, nalloced * sizeof (dev_info_t *));
869 		return (DDI_FAILURE);
870 	}
871 
872 	ndevts_alloced = 128;
873 restart:
874 	ndevts = 0;
875 	devts = kmem_alloc(ndevts_alloced * sizeof (dev_t), KM_SLEEP);
876 	for (i = 0; i < ndevis; i++) {
877 		ASSERT(!DEVI_IS_ATTACHING(devis[i]));
878 		ASSERT(!DEVI_IS_DETACHING(devis[i]));
879 		e_devid_minor_to_devlist(devis[i], minor_name,
880 		    ndevts_alloced, &ndevts, devts);
881 		if (ndevts > ndevts_alloced) {
882 			kmem_free(devts, ndevts_alloced * sizeof (dev_t));
883 			ndevts_alloced += 128;
884 			goto restart;
885 		}
886 	}
887 	for (i = 0; i < npaths; i++) {
888 		DEVID_LOG_LOOKUP((CE_CONT, "lookup %s\n", paths[i]));
889 		devi = e_ddi_hold_devi_by_path(paths[i], 0);
890 		if (devi == NULL) {
891 			DEVID_LOG_STALE(("stale device reference",
892 			    devid, paths[i]));
893 			continue;
894 		}
895 		/*
896 		 * Verify the newly attached device registered a matching devid
897 		 */
898 		if (i_ddi_devi_get_devid(DDI_DEV_T_ANY, devi,
899 		    &match_devid) != DDI_SUCCESS) {
900 			DEVIDERR((CE_CONT,
901 			    "%s: no devid registered on attach\n",
902 			    paths[i]));
903 			ddi_release_devi(devi);
904 			continue;
905 		}
906 
907 		if (ddi_devid_compare(devid, match_devid) != 0) {
908 			DEVID_LOG_STALE(("new devid registered",
909 			    devid, paths[i]));
910 			ddi_release_devi(devi);
911 			ddi_devid_free(match_devid);
912 			continue;
913 		}
914 		ddi_devid_free(match_devid);
915 
916 		e_devid_minor_to_devlist(devi, minor_name,
917 		    ndevts_alloced, &ndevts, devts);
918 		ddi_release_devi(devi);
919 		if (ndevts > ndevts_alloced) {
920 			kmem_free(devts,
921 			    ndevts_alloced * sizeof (dev_t));
922 			ndevts_alloced += 128;
923 			goto restart;
924 		}
925 	}
926 
927 	/* drop hold from e_devid_cache_devi_path_lists */
928 	for (i = 0; i < ndevis; i++) {
929 		ndi_rele_devi(devis[i]);
930 	}
931 	for (i = 0; i < npaths; i++) {
932 		kmem_free(paths[i], strlen(paths[i]) + 1);
933 	}
934 	kmem_free(paths, nalloced * sizeof (char *));
935 	kmem_free(devis, nalloced * sizeof (dev_info_t *));
936 
937 	if (ndevts == 0) {
938 		DEVID_LOG_ERR(("no devid found", devid, NULL));
939 		kmem_free(devts, ndevts_alloced * sizeof (dev_t));
940 		return (DDI_FAILURE);
941 	}
942 
943 	/*
944 	 * Build the final list of sorted dev_t's with duplicates collapsed so
945 	 * returned results are consistent. This prevents implementation
946 	 * artifacts from causing unnecessary changes in SVM namespace.
947 	 */
948 	/* bubble sort */
949 	for (i = 0; i < (ndevts - 1); i++) {
950 		for (j = 0; j < ((ndevts - 1) - i); j++) {
951 			if (devts[j + 1] < devts[j]) {
952 				tdevt = devts[j];
953 				devts[j] = devts[j + 1];
954 				devts[j + 1] = tdevt;
955 			}
956 		}
957 	}
958 
959 	/* determine number of unique values */
960 	for (undevts = ndevts, i = 1; i < ndevts; i++) {
961 		if (devts[i - 1] == devts[i])
962 			undevts--;
963 	}
964 
965 	/* allocate unique */
966 	udevts = kmem_alloc(undevts * sizeof (dev_t), KM_SLEEP);
967 
968 	/* copy unique */
969 	udevts[0] = devts[0];
970 	for (i = 1, j = 1; i < ndevts; i++) {
971 		if (devts[i - 1] != devts[i])
972 			udevts[j++] = devts[i];
973 	}
974 	ASSERT(j == undevts);
975 
976 	kmem_free(devts, ndevts_alloced * sizeof (dev_t));
977 
978 	*retndevts = undevts;
979 	*retdevts = udevts;
980 
981 	return (DDI_SUCCESS);
982 }
983 
984 void
985 e_devid_cache_free_devt_list(int ndevts, dev_t *devt_list)
986 {
987 	kmem_free(devt_list, ndevts * sizeof (dev_t *));
988 }
989 
990 #ifdef	DEBUG
991 static void
992 devid_log(char *fmt, ddi_devid_t devid, char *path)
993 {
994 	char *devidstr = ddi_devid_str_encode(devid, NULL);
995 	if (path) {
996 		cmn_err(CE_CONT, "%s: %s %s\n", fmt, path, devidstr);
997 	} else {
998 		cmn_err(CE_CONT, "%s: %s\n", fmt, devidstr);
999 	}
1000 	ddi_devid_str_free(devidstr);
1001 }
1002 #endif	/* DEBUG */
1003