xref: /illumos-gate/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c (revision d17be682a2c70b4505d43c830bbd2603da11918d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 /*
25  * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
26  * Copyright (c) 2016 by Delphix. All rights reserved.
27  * Copyright 2023 Oxide Computer Company
28  */
29 
30 /*
31  * Multiplexed I/O SCSI vHCI implementation
32  */
33 
34 #include <sys/conf.h>
35 #include <sys/file.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/scsi/scsi.h>
39 #include <sys/scsi/impl/scsi_reset_notify.h>
40 #include <sys/scsi/impl/services.h>
41 #include <sys/sunmdi.h>
42 #include <sys/mdi_impldefs.h>
43 #include <sys/scsi/adapters/scsi_vhci.h>
44 #include <sys/disp.h>
45 #include <sys/byteorder.h>
46 
47 extern uintptr_t scsi_callback_id;
48 extern ddi_dma_attr_t scsi_alloc_attr;
49 
50 #ifdef	DEBUG
51 int	vhci_debug = VHCI_DEBUG_DEFAULT_VAL;
52 #endif
53 
54 /* retry for the vhci_do_prout command when a not ready is returned */
55 int vhci_prout_not_ready_retry = 180;
56 
57 /*
58  * These values are defined to support the internal retry of
59  * SCSI packets for better sense code handling.
60  */
61 #define	VHCI_CMD_CMPLT	0
62 #define	VHCI_CMD_RETRY	1
63 #define	VHCI_CMD_ERROR	-1
64 
65 #define	PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)
66 #define	VHCI_SCSI_PERR		0x47
67 #define	VHCI_PGR_ILLEGALOP	-2
68 #define	VHCI_NUM_UPDATE_TASKQ	8
69 /* changed to 132 to accomodate HDS */
70 
71 /*
72  * Version Macros
73  */
74 #define	VHCI_NAME_VERSION	"SCSI VHCI Driver"
75 char		vhci_version_name[] = VHCI_NAME_VERSION;
76 
77 int		vhci_first_time = 0;
78 clock_t		vhci_to_ticks = 0;
79 int		vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT;
80 kcondvar_t	vhci_cv;
81 kmutex_t	vhci_global_mutex;
82 void		*vhci_softstate = NULL; /* for soft state */
83 
84 /*
85  * Flag to delay the retry of the reserve command
86  */
87 int		vhci_reserve_delay = 100000;
88 static int	vhci_path_quiesce_timeout = 60;
89 static uchar_t	zero_key[MHIOC_RESV_KEY_SIZE];
90 
91 /* uscsi delay for a TRAN_BUSY */
92 static int vhci_uscsi_delay = 100000;
93 static int vhci_uscsi_retry_count = 180;
94 /* uscsi_restart_sense timeout id in case it needs to get canceled */
95 static timeout_id_t vhci_restart_timeid = 0;
96 
97 static int	vhci_bus_config_debug = 0;
98 
99 /*
100  * Bidirectional map of 'target-port' to port id <pid> for support of
101  * iostat(8) '-Xx' and '-Yx' output.
102  */
103 static kmutex_t		vhci_targetmap_mutex;
104 static uint_t		vhci_targetmap_pid = 1;
105 static mod_hash_t	*vhci_targetmap_bypid;	/* <pid> -> 'target-port' */
106 static mod_hash_t	*vhci_targetmap_byport;	/* 'target-port' -> <pid> */
107 
108 /*
109  * functions exported by scsi_vhci struct cb_ops
110  */
111 static int vhci_open(dev_t *, int, int, cred_t *);
112 static int vhci_close(dev_t, int, int, cred_t *);
113 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
114 
115 /*
116  * functions exported by scsi_vhci struct dev_ops
117  */
118 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
119 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t);
120 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t);
121 
122 /*
123  * functions exported by scsi_vhci scsi_hba_tran_t transport table
124  */
125 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *,
126     scsi_hba_tran_t *, struct scsi_device *);
127 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *,
128     struct scsi_device *);
129 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *);
130 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *);
131 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *);
132 static int vhci_scsi_reset(struct scsi_address *, int);
133 static int vhci_scsi_reset_target(struct scsi_address *, int level,
134     uint8_t select_path);
135 static int vhci_scsi_reset_bus(struct scsi_address *);
136 static int vhci_scsi_getcap(struct scsi_address *, char *, int);
137 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int);
138 static int vhci_commoncap(struct scsi_address *, char *, int, int, int);
139 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
140     mdi_pathinfo_t *pip);
141 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *,
142     struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
143 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
144 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
145 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
146 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t),
147     caddr_t);
148 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int);
149 static int vhci_scsi_get_name(struct scsi_device *, char *, int);
150 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t,
151     void *, void *);
152 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
153     void *, dev_info_t **);
154 static int vhci_scsi_bus_unconfig(dev_info_t *, uint_t, ddi_bus_config_op_t,
155     void *);
156 static struct scsi_failover_ops *vhci_dev_fo(dev_info_t *, struct scsi_device *,
157     void **, char **);
158 
159 /*
160  * functions registered with the mpxio framework via mdi_vhci_ops_t
161  */
162 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int);
163 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int);
164 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *,
165     mdi_pathinfo_state_t, uint32_t, int);
166 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int);
167 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int);
168 static int vhci_failover(dev_info_t *, dev_info_t *, int);
169 static void vhci_client_attached(dev_info_t *);
170 static int vhci_is_dev_supported(dev_info_t *, dev_info_t *, void *);
171 
172 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
173 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *);
174 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t);
175 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t);
176 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t);
177 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t);
178 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t);
179 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t);
180 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t);
181 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t);
182 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *,
183     int, caddr_t);
184 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **,
185     uint_t, sv_iocdata_t *, int, caddr_t);
186 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t);
187 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t,
188     sv_iocdata_t *, int, caddr_t);
189 static int vhci_handle_ext_fo(struct scsi_pkt *, int);
190 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *);
191 static int vhci_quiesce_lun(struct scsi_vhci_lun *);
192 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *);
193 static void vhci_dispatch_scsi_start(void *);
194 static void vhci_efo_done(void *);
195 static void vhci_initiate_auto_failback(void *);
196 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *);
197 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *,
198     struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *);
199 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *);
200 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *,
201     scsi_vhci_lun_t *, char *, char *);
202 
203 static char *vhci_devnm_to_guid(char *);
204 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *,
205     int, int (*func)(caddr_t));
206 static void vhci_intr(struct scsi_pkt *);
207 static int vhci_do_prout(scsi_vhci_priv_t *);
208 static void vhci_run_cmd(void *);
209 static int vhci_do_prin(struct vhci_pkt **);
210 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *);
211 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *);
212 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *);
213 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *);
214 static void vhci_lun_free(struct scsi_vhci_lun *dvlp, struct scsi_device *sd);
215 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *,
216     uint8_t, uint8_t);
217 void vhci_update_pathstates(void *);
218 
219 #ifdef DEBUG
220 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int);
221 static void vhci_print_cdb(dev_info_t *dip, uint_t level,
222     char *title, uchar_t *cdb);
223 static void vhci_clean_print(dev_info_t *dev, uint_t level,
224     char *title, uchar_t *data, int len);
225 #endif
226 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *);
227 static void vhci_uscsi_iodone(struct scsi_pkt *pkt);
228 static void vhci_invalidate_mpapi_lu(struct scsi_vhci *, scsi_vhci_lun_t *);
229 
230 /*
231  * MP-API related functions
232  */
233 extern int vhci_mpapi_init(struct scsi_vhci *);
234 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *);
235 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
236 extern void vhci_update_mpapi_data(struct scsi_vhci *,
237     scsi_vhci_lun_t *, mdi_pathinfo_t *);
238 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *,
239     uint8_t, void*);
240 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int);
241 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *,
242     scsi_vhci_lun_t *);
243 
244 #define	VHCI_DMA_MAX_XFER_CAP	INT_MAX
245 
246 #define	VHCI_MAX_PGR_RETRIES	3
247 
248 /*
249  * Macros for the device-type mpxio options
250  */
251 #define	LOAD_BALANCE_OPTIONS		"load-balance-options"
252 #define	LOGICAL_BLOCK_REGION_SIZE	"region-size"
253 #define	MPXIO_OPTIONS_LIST		"device-type-mpxio-options-list"
254 #define	DEVICE_TYPE_STR			"device-type"
255 #define	isdigit(ch)			((ch) >= '0' && (ch) <= '9')
256 
257 static struct cb_ops vhci_cb_ops = {
258 	vhci_open,			/* open */
259 	vhci_close,			/* close */
260 	nodev,				/* strategy */
261 	nodev,				/* print */
262 	nodev,				/* dump */
263 	nodev,				/* read */
264 	nodev,				/* write */
265 	vhci_ioctl,			/* ioctl */
266 	nodev,				/* devmap */
267 	nodev,				/* mmap */
268 	nodev,				/* segmap */
269 	nochpoll,			/* chpoll */
270 	ddi_prop_op,			/* cb_prop_op */
271 	0,				/* streamtab */
272 	D_NEW | D_MP,			/* cb_flag */
273 	CB_REV,				/* rev */
274 	nodev,				/* aread */
275 	nodev				/* awrite */
276 };
277 
278 static struct dev_ops vhci_ops = {
279 	DEVO_REV,
280 	0,
281 	vhci_getinfo,
282 	nulldev,		/* identify */
283 	nulldev,		/* probe */
284 	vhci_attach,		/* attach and detach are mandatory */
285 	vhci_detach,
286 	nodev,			/* reset */
287 	&vhci_cb_ops,		/* cb_ops */
288 	NULL,			/* bus_ops */
289 	NULL,			/* power */
290 	ddi_quiesce_not_needed,	/* quiesce */
291 };
292 
293 extern struct mod_ops mod_driverops;
294 
295 static struct modldrv modldrv = {
296 	&mod_driverops,
297 	vhci_version_name,	/* module name */
298 	&vhci_ops
299 };
300 
301 static struct modlinkage modlinkage = {
302 	MODREV_1,
303 	&modldrv,
304 	NULL
305 };
306 
307 static mdi_vhci_ops_t vhci_opinfo = {
308 	MDI_VHCI_OPS_REV,
309 	vhci_pathinfo_init,		/* Pathinfo node init callback */
310 	vhci_pathinfo_uninit,		/* Pathinfo uninit callback */
311 	vhci_pathinfo_state_change,	/* Pathinfo node state change */
312 	vhci_failover,			/* failover callback */
313 	vhci_client_attached,		/* client attached callback	*/
314 	vhci_is_dev_supported		/* is device supported by mdi */
315 };
316 
317 /*
318  * The scsi_failover table defines an ordered set of 'fops' modules supported
319  * by scsi_vhci.  Currently, initialize this table from the 'ddi-forceload'
320  * property specified in scsi_vhci.conf.
321  */
322 static struct scsi_failover {
323 	ddi_modhandle_t			sf_mod;
324 	struct scsi_failover_ops	*sf_sfo;
325 } *scsi_failover_table;
326 static uint_t	scsi_nfailover;
327 
328 int
329 _init(void)
330 {
331 	int	rval;
332 
333 	/*
334 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
335 	 * before registering with the transport first.
336 	 */
337 	if ((rval = ddi_soft_state_init(&vhci_softstate,
338 	    sizeof (struct scsi_vhci), 1)) != 0) {
339 		VHCI_DEBUG(1, (CE_NOTE, NULL,
340 		    "!_init:soft state init failed\n"));
341 		return (rval);
342 	}
343 
344 	if ((rval = scsi_hba_init(&modlinkage)) != 0) {
345 		VHCI_DEBUG(1, (CE_NOTE, NULL,
346 		    "!_init: scsi hba init failed\n"));
347 		ddi_soft_state_fini(&vhci_softstate);
348 		return (rval);
349 	}
350 
351 	mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL);
352 	cv_init(&vhci_cv, NULL, CV_DRIVER, NULL);
353 
354 	mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL);
355 	vhci_targetmap_byport = mod_hash_create_strhash(
356 	    "vhci_targetmap_byport", 256, mod_hash_null_valdtor);
357 	vhci_targetmap_bypid = mod_hash_create_idhash(
358 	    "vhci_targetmap_bypid", 256, mod_hash_null_valdtor);
359 
360 	if ((rval = mod_install(&modlinkage)) != 0) {
361 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n"));
362 		if (vhci_targetmap_bypid)
363 			mod_hash_destroy_idhash(vhci_targetmap_bypid);
364 		if (vhci_targetmap_byport)
365 			mod_hash_destroy_strhash(vhci_targetmap_byport);
366 		mutex_destroy(&vhci_targetmap_mutex);
367 		cv_destroy(&vhci_cv);
368 		mutex_destroy(&vhci_global_mutex);
369 		scsi_hba_fini(&modlinkage);
370 		ddi_soft_state_fini(&vhci_softstate);
371 	}
372 	return (rval);
373 }
374 
375 
376 /*
377  * the system is done with us as a driver, so clean up
378  */
379 int
380 _fini(void)
381 {
382 	int rval;
383 
384 	/*
385 	 * don't start cleaning up until we know that the module remove
386 	 * has worked  -- if this works, then we know that each instance
387 	 * has successfully been DDI_DETACHed
388 	 */
389 	if ((rval = mod_remove(&modlinkage)) != 0) {
390 		VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n"));
391 		return (rval);
392 	}
393 
394 	if (vhci_targetmap_bypid)
395 		mod_hash_destroy_idhash(vhci_targetmap_bypid);
396 	if (vhci_targetmap_byport)
397 		mod_hash_destroy_strhash(vhci_targetmap_byport);
398 	mutex_destroy(&vhci_targetmap_mutex);
399 	cv_destroy(&vhci_cv);
400 	mutex_destroy(&vhci_global_mutex);
401 	scsi_hba_fini(&modlinkage);
402 	ddi_soft_state_fini(&vhci_softstate);
403 
404 	return (rval);
405 }
406 
407 int
408 _info(struct modinfo *modinfop)
409 {
410 	return (mod_info(&modlinkage, modinfop));
411 }
412 
413 /*
414  * Lookup scsi_failover by "short name" of failover module.
415  */
416 struct scsi_failover_ops *
417 vhci_failover_ops_by_name(char *name)
418 {
419 	struct scsi_failover	*sf;
420 
421 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
422 		if (sf->sf_sfo == NULL)
423 			continue;
424 		if (strcmp(sf->sf_sfo->sfo_name, name) == 0)
425 			return (sf->sf_sfo);
426 	}
427 	return (NULL);
428 }
429 
430 /*
431  * Load all scsi_failover_ops 'fops' modules.
432  */
433 static void
434 vhci_failover_modopen(struct scsi_vhci *vhci)
435 {
436 	char			**module;
437 	int			i;
438 	struct scsi_failover	*sf;
439 	char			**dt;
440 	int			e;
441 
442 	if (scsi_failover_table)
443 		return;
444 
445 	/* Get the list of modules from scsi_vhci.conf */
446 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY,
447 	    vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload",
448 	    &module, &scsi_nfailover) != DDI_PROP_SUCCESS) {
449 		cmn_err(CE_WARN, "scsi_vhci: "
450 		    "scsi_vhci.conf is missing 'ddi-forceload'");
451 		return;
452 	}
453 	if (scsi_nfailover == 0) {
454 		cmn_err(CE_WARN, "scsi_vhci: "
455 		    "scsi_vhci.conf has empty 'ddi-forceload'");
456 		ddi_prop_free(module);
457 		return;
458 	}
459 
460 	/* allocate failover table based on number of modules */
461 	scsi_failover_table = (struct scsi_failover *)
462 	    kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1),
463 	    KM_SLEEP);
464 
465 	/* loop over modules specified in scsi_vhci.conf and open each module */
466 	for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) {
467 		if (module[i] == NULL)
468 			continue;
469 
470 		sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e);
471 		if (sf->sf_mod == NULL) {
472 			/*
473 			 * A module returns EEXIST if other software is
474 			 * supporting the intended function: for example
475 			 * the scsi_vhci_f_sum_emc module returns EEXIST
476 			 * from _init if EMC powerpath software is installed.
477 			 */
478 			if (e != EEXIST)
479 				cmn_err(CE_WARN, "scsi_vhci: unable to open "
480 				    "module '%s', error %d", module[i], e);
481 			continue;
482 		}
483 		sf->sf_sfo = ddi_modsym(sf->sf_mod,
484 		    "scsi_vhci_failover_ops", &e);
485 		if (sf->sf_sfo == NULL) {
486 			cmn_err(CE_WARN, "scsi_vhci: "
487 			    "unable to import 'scsi_failover_ops' from '%s', "
488 			    "error %d", module[i], e);
489 			(void) ddi_modclose(sf->sf_mod);
490 			sf->sf_mod = NULL;
491 			continue;
492 		}
493 
494 		/* register vid/pid of devices supported with mpapi */
495 		for (dt = sf->sf_sfo->sfo_devices; *dt; dt++)
496 			vhci_mpapi_add_dev_prod(vhci, *dt);
497 		sf++;
498 	}
499 
500 	/* verify that at least the "well-known" modules were there */
501 	if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL)
502 		cmn_err(CE_WARN, "scsi_vhci: well-known module \""
503 		    SFO_NAME_SYM "\" not defined in scsi_vhci.conf's "
504 		    "'ddi-forceload'");
505 	if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL)
506 		cmn_err(CE_WARN, "scsi_vhci: well-known module \""
507 		    SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's "
508 		    "'ddi-forceload'");
509 
510 	/* call sfo_init for modules that need it */
511 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
512 		if (sf->sf_sfo && sf->sf_sfo->sfo_init)
513 			sf->sf_sfo->sfo_init();
514 	}
515 
516 	ddi_prop_free(module);
517 }
518 
519 /*
520  * unload all loaded scsi_failover_ops modules
521  */
522 static void
523 vhci_failover_modclose()
524 {
525 	struct scsi_failover	*sf;
526 
527 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
528 		if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL))
529 			continue;
530 		(void) ddi_modclose(sf->sf_mod);
531 		sf->sf_mod = NULL;
532 		sf->sf_sfo = NULL;
533 	}
534 
535 	if (scsi_failover_table && scsi_nfailover)
536 		kmem_free(scsi_failover_table,
537 		    sizeof (struct scsi_failover) * (scsi_nfailover + 1));
538 	scsi_failover_table = NULL;
539 	scsi_nfailover = 0;
540 }
541 
542 /* ARGSUSED */
543 static int
544 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp)
545 {
546 	struct scsi_vhci	*vhci;
547 
548 	if (otype != OTYP_CHR) {
549 		return (EINVAL);
550 	}
551 
552 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp)));
553 	if (vhci == NULL) {
554 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n"));
555 		return (ENXIO);
556 	}
557 
558 	mutex_enter(&vhci->vhci_mutex);
559 	if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) {
560 		mutex_exit(&vhci->vhci_mutex);
561 		vhci_log(CE_NOTE, vhci->vhci_dip,
562 		    "!vhci%d: Already open\n", getminor(*devp));
563 		return (EBUSY);
564 	}
565 
566 	vhci->vhci_state |= VHCI_STATE_OPEN;
567 	mutex_exit(&vhci->vhci_mutex);
568 	return (0);
569 }
570 
571 
572 /* ARGSUSED */
573 static int
574 vhci_close(dev_t dev, int flag, int otype, cred_t *credp)
575 {
576 	struct scsi_vhci	*vhci;
577 
578 	if (otype != OTYP_CHR) {
579 		return (EINVAL);
580 	}
581 
582 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
583 	if (vhci == NULL) {
584 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n"));
585 		return (ENXIO);
586 	}
587 
588 	mutex_enter(&vhci->vhci_mutex);
589 	vhci->vhci_state &= ~VHCI_STATE_OPEN;
590 	mutex_exit(&vhci->vhci_mutex);
591 
592 	return (0);
593 }
594 
595 /* ARGSUSED */
596 static int
597 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
598     cred_t *credp, int *rval)
599 {
600 	if (IS_DEVCTL(cmd)) {
601 		return (vhci_devctl(dev, cmd, data, mode, credp, rval));
602 	} else if (cmd == MP_CMD) {
603 		return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval));
604 	} else {
605 		return (vhci_ctl(dev, cmd, data, mode, credp, rval));
606 	}
607 }
608 
609 /*
610  * attach the module
611  */
612 static int
613 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
614 {
615 	int			rval = DDI_FAILURE;
616 	int			scsi_hba_attached = 0;
617 	int			vhci_attached = 0;
618 	int			mutex_initted = 0;
619 	int			instance;
620 	struct scsi_vhci	*vhci;
621 	scsi_hba_tran_t		*tran;
622 	char			cache_name_buf[64];
623 	char			*data;
624 
625 	VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd));
626 
627 	instance = ddi_get_instance(dip);
628 
629 	switch (cmd) {
630 	case DDI_ATTACH:
631 		break;
632 
633 	case DDI_RESUME:
634 	case DDI_PM_RESUME:
635 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet"
636 		    "implemented\n"));
637 		return (rval);
638 
639 	default:
640 		VHCI_DEBUG(1, (CE_NOTE, NULL,
641 		    "!vhci_attach: unknown ddi command\n"));
642 		return (rval);
643 	}
644 
645 	/*
646 	 * Allocate vhci data structure.
647 	 */
648 	if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) {
649 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
650 		    "soft state alloc failed\n"));
651 		return (DDI_FAILURE);
652 	}
653 
654 	if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) {
655 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
656 		    "bad soft state\n"));
657 		ddi_soft_state_free(vhci_softstate, instance);
658 		return (DDI_FAILURE);
659 	}
660 
661 	/* Allocate packet cache */
662 	(void) snprintf(cache_name_buf, sizeof (cache_name_buf),
663 	    "vhci%d_cache", instance);
664 
665 	mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL);
666 	mutex_initted++;
667 
668 	/*
669 	 * Allocate a transport structure
670 	 */
671 	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
672 	ASSERT(tran != NULL);
673 
674 	vhci->vhci_tran		= tran;
675 	vhci->vhci_dip		= dip;
676 	vhci->vhci_instance	= instance;
677 
678 	tran->tran_hba_private	= vhci;
679 	tran->tran_tgt_init	= vhci_scsi_tgt_init;
680 	tran->tran_tgt_probe	= NULL;
681 	tran->tran_tgt_free	= vhci_scsi_tgt_free;
682 
683 	tran->tran_start	= vhci_scsi_start;
684 	tran->tran_abort	= vhci_scsi_abort;
685 	tran->tran_reset	= vhci_scsi_reset;
686 	tran->tran_getcap	= vhci_scsi_getcap;
687 	tran->tran_setcap	= vhci_scsi_setcap;
688 	tran->tran_init_pkt	= vhci_scsi_init_pkt;
689 	tran->tran_destroy_pkt	= vhci_scsi_destroy_pkt;
690 	tran->tran_dmafree	= vhci_scsi_dmafree;
691 	tran->tran_sync_pkt	= vhci_scsi_sync_pkt;
692 	tran->tran_reset_notify = vhci_scsi_reset_notify;
693 
694 	tran->tran_get_bus_addr	= vhci_scsi_get_bus_addr;
695 	tran->tran_get_name	= vhci_scsi_get_name;
696 	tran->tran_bus_reset	= NULL;
697 	tran->tran_quiesce	= NULL;
698 	tran->tran_unquiesce	= NULL;
699 
700 	/*
701 	 * register event notification routines with scsa
702 	 */
703 	tran->tran_get_eventcookie = NULL;
704 	tran->tran_add_eventcall = NULL;
705 	tran->tran_remove_eventcall = NULL;
706 	tran->tran_post_event	= NULL;
707 
708 	tran->tran_bus_power	= vhci_scsi_bus_power;
709 
710 	tran->tran_bus_config	= vhci_scsi_bus_config;
711 	tran->tran_bus_unconfig	= vhci_scsi_bus_unconfig;
712 
713 	/*
714 	 * Attach this instance with the mpxio framework
715 	 */
716 	if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0)
717 	    != MDI_SUCCESS) {
718 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
719 		    "mdi_vhci_register failed\n"));
720 		goto attach_fail;
721 	}
722 	vhci_attached++;
723 
724 	/*
725 	 * Attach this instance of the hba.
726 	 *
727 	 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA
728 	 * driver, it has nothing to do with DMA. However, when calling
729 	 * scsi_hba_attach_setup() we need to pass something valid in the
730 	 * dma attributes parameter. So we just use scsi_alloc_attr.
731 	 * SCSA itself seems to care only for dma_attr_minxfer and
732 	 * dma_attr_burstsizes fields of dma attributes structure.
733 	 * It expects those fileds to be non-zero.
734 	 */
735 	if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran,
736 	    SCSI_HBA_ADDR_COMPLEX) != DDI_SUCCESS) {
737 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
738 		    "hba attach failed\n"));
739 		goto attach_fail;
740 	}
741 	scsi_hba_attached++;
742 
743 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
744 	    INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
745 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
746 		    " ddi_create_minor_node failed\n"));
747 		goto attach_fail;
748 	}
749 
750 	/*
751 	 * Set pm-want-child-notification property for
752 	 * power management of the phci and client
753 	 */
754 	if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
755 	    "pm-want-child-notification?", NULL, 0) != DDI_PROP_SUCCESS) {
756 		cmn_err(CE_WARN,
757 		    "%s%d fail to create pm-want-child-notification? prop",
758 		    ddi_driver_name(dip), ddi_get_instance(dip));
759 		goto attach_fail;
760 	}
761 
762 	vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0);
763 	vhci->vhci_update_pathstates_taskq =
764 	    taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ,
765 	    MINCLSYSPRI, 1, 4, 0);
766 	ASSERT(vhci->vhci_taskq);
767 	ASSERT(vhci->vhci_update_pathstates_taskq);
768 
769 	/*
770 	 * Set appropriate configuration flags based on options set in
771 	 * conf file.
772 	 */
773 	vhci->vhci_conf_flags = 0;
774 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS,
775 	    "auto-failback", &data) == DDI_SUCCESS) {
776 		if (strcmp(data, "enable") == 0)
777 			vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK;
778 		ddi_prop_free(data);
779 	}
780 
781 	if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK))
782 		vhci_log(CE_NOTE, dip, "!Auto-failback capability "
783 		    "disabled through scsi_vhci.conf file.");
784 
785 	/*
786 	 * Allocate an mpapi private structure
787 	 */
788 	vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP);
789 	if (vhci_mpapi_init(vhci) != 0) {
790 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: "
791 		    "vhci_mpapi_init() failed"));
792 	}
793 
794 	vhci_failover_modopen(vhci);		/* load failover modules */
795 
796 	ddi_report_dev(dip);
797 	return (DDI_SUCCESS);
798 
799 attach_fail:
800 	if (vhci_attached)
801 		(void) mdi_vhci_unregister(dip, 0);
802 
803 	if (scsi_hba_attached)
804 		(void) scsi_hba_detach(dip);
805 
806 	if (vhci->vhci_tran)
807 		scsi_hba_tran_free(vhci->vhci_tran);
808 
809 	if (mutex_initted) {
810 		mutex_destroy(&vhci->vhci_mutex);
811 	}
812 
813 	ddi_soft_state_free(vhci_softstate, instance);
814 	return (DDI_FAILURE);
815 }
816 
817 
818 /*ARGSUSED*/
819 static int
820 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
821 {
822 	int			instance = ddi_get_instance(dip);
823 	scsi_hba_tran_t		*tran;
824 	struct scsi_vhci	*vhci;
825 
826 	VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd));
827 
828 	if ((tran = ddi_get_driver_private(dip)) == NULL)
829 		return (DDI_FAILURE);
830 
831 	vhci = TRAN2HBAPRIVATE(tran);
832 	if (!vhci) {
833 		return (DDI_FAILURE);
834 	}
835 
836 	switch (cmd) {
837 	case DDI_DETACH:
838 		break;
839 
840 	case DDI_SUSPEND:
841 	case DDI_PM_SUSPEND:
842 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet"
843 		    "implemented\n"));
844 		return (DDI_FAILURE);
845 
846 	default:
847 		VHCI_DEBUG(1, (CE_NOTE, NULL,
848 		    "!vhci_detach: unknown ddi command\n"));
849 		return (DDI_FAILURE);
850 	}
851 
852 	(void) mdi_vhci_unregister(dip, 0);
853 	(void) scsi_hba_detach(dip);
854 	scsi_hba_tran_free(tran);
855 
856 	if (ddi_prop_remove(DDI_DEV_T_NONE, dip,
857 	    "pm-want-child-notification?") != DDI_PROP_SUCCESS) {
858 		cmn_err(CE_WARN,
859 		    "%s%d unable to remove prop pm-want_child_notification?",
860 		    ddi_driver_name(dip), ddi_get_instance(dip));
861 	}
862 	if (vhci_restart_timeid != 0) {
863 		(void) untimeout(vhci_restart_timeid);
864 	}
865 	vhci_restart_timeid = 0;
866 
867 	mutex_destroy(&vhci->vhci_mutex);
868 	vhci->vhci_dip = NULL;
869 	vhci->vhci_tran = NULL;
870 	taskq_destroy(vhci->vhci_taskq);
871 	taskq_destroy(vhci->vhci_update_pathstates_taskq);
872 	ddi_remove_minor_node(dip, NULL);
873 	ddi_soft_state_free(vhci_softstate, instance);
874 
875 	vhci_failover_modclose();		/* unload failover modules */
876 	return (DDI_SUCCESS);
877 }
878 
879 /*
880  * vhci_getinfo()
881  * Given the device number, return the devinfo pointer or the
882  * instance number.
883  * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach.
884  */
885 
886 /*ARGSUSED*/
887 static int
888 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
889 {
890 	struct scsi_vhci	*vhcip;
891 	int			instance = MINOR2INST(getminor((dev_t)arg));
892 
893 	switch (cmd) {
894 	case DDI_INFO_DEVT2DEVINFO:
895 		vhcip = ddi_get_soft_state(vhci_softstate, instance);
896 		if (vhcip != NULL)
897 			*result = vhcip->vhci_dip;
898 		else {
899 			*result = NULL;
900 			return (DDI_FAILURE);
901 		}
902 		break;
903 
904 	case DDI_INFO_DEVT2INSTANCE:
905 		*result = (void *)(uintptr_t)instance;
906 		break;
907 
908 	default:
909 		return (DDI_FAILURE);
910 	}
911 
912 	return (DDI_SUCCESS);
913 }
914 
915 /*ARGSUSED*/
916 static int
917 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
918     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
919 {
920 	char			*guid;
921 	scsi_vhci_lun_t		*vlun;
922 	struct scsi_vhci	*vhci;
923 	clock_t			from_ticks;
924 	mdi_pathinfo_t		*pip;
925 	int			rval;
926 
927 	ASSERT(hba_dip != NULL);
928 	ASSERT(tgt_dip != NULL);
929 
930 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
931 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
932 		/*
933 		 * This must be the .conf node without GUID property.
934 		 * The node under fp already inserts a delay, so we
935 		 * just return from here. We rely on this delay to have
936 		 * all dips be posted to the ndi hotplug thread's newdev
937 		 * list. This is necessary for the deferred attach
938 		 * mechanism to work and opens() done soon after boot to
939 		 * succeed.
940 		 */
941 		VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid "
942 		    "property failed"));
943 		return (DDI_NOT_WELL_FORMED);
944 	}
945 
946 	if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
947 		/*
948 		 * This must be .conf node with the GUID property. We don't
949 		 * merge property by ndi_merge_node() here  because the
950 		 * devi_addr_buf of .conf node is "" always according the
951 		 * implementation of vhci_scsi_get_name_bus_addr().
952 		 */
953 		ddi_set_name_addr(tgt_dip, NULL);
954 		return (DDI_FAILURE);
955 	}
956 
957 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip));
958 	ASSERT(vhci != NULL);
959 
960 	VHCI_DEBUG(4, (CE_NOTE, hba_dip,
961 	    "!tgt_init: called for %s (instance %d)\n",
962 	    ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip)));
963 
964 	vlun = vhci_lun_lookup(tgt_dip);
965 
966 	mutex_enter(&vhci_global_mutex);
967 
968 	from_ticks = ddi_get_lbolt();
969 	if (vhci_to_ticks == 0) {
970 		vhci_to_ticks = from_ticks +
971 		    drv_usectohz(vhci_init_wait_timeout);
972 	}
973 
974 #if DEBUG
975 	if (vlun) {
976 		VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
977 		    "vhci_scsi_tgt_init: guid %s : found vlun 0x%p "
978 		    "from_ticks %lx to_ticks %lx",
979 		    guid, (void *)vlun, from_ticks, vhci_to_ticks));
980 	} else {
981 		VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
982 		    "vhci_scsi_tgt_init: guid %s : vlun not found "
983 		    "from_ticks %lx to_ticks %lx", guid, from_ticks,
984 		    vhci_to_ticks));
985 	}
986 #endif
987 
988 	rval = mdi_select_path(tgt_dip, NULL,
989 	    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip);
990 	if (rval == MDI_SUCCESS) {
991 		mdi_rele_path(pip);
992 	}
993 
994 	/*
995 	 * Wait for the following conditions :
996 	 *	1. no vlun available yet
997 	 *	2. no path established
998 	 *	3. timer did not expire
999 	 */
1000 	while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) ||
1001 	    (rval != MDI_SUCCESS)) {
1002 		if (vlun && vlun->svl_not_supported) {
1003 			VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
1004 			    "vlun 0x%p lun guid %s not supported!",
1005 			    (void *)vlun, guid));
1006 			mutex_exit(&vhci_global_mutex);
1007 			ddi_prop_free(guid);
1008 			return (DDI_NOT_WELL_FORMED);
1009 		}
1010 		if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) {
1011 			vhci_first_time = 1;
1012 		}
1013 		if (vhci_first_time == 1) {
1014 			VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: "
1015 			    "no wait for %s. from_tick %lx, to_tick %lx",
1016 			    guid, from_ticks, vhci_to_ticks));
1017 			mutex_exit(&vhci_global_mutex);
1018 			ddi_prop_free(guid);
1019 			return (DDI_NOT_WELL_FORMED);
1020 		}
1021 
1022 		if (cv_timedwait(&vhci_cv,
1023 		    &vhci_global_mutex, vhci_to_ticks) == -1) {
1024 			/* Timed out */
1025 #ifdef DEBUG
1026 			if (vlun == NULL) {
1027 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1028 				    "tgt_init: no vlun for %s!", guid));
1029 			} else if (mdi_client_get_path_count(tgt_dip) == 0) {
1030 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1031 				    "tgt_init: client path count is "
1032 				    "zero for %s!", guid));
1033 			} else {
1034 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1035 				    "tgt_init: client path not "
1036 				    "available yet for %s!", guid));
1037 			}
1038 #endif /* DEBUG */
1039 			mutex_exit(&vhci_global_mutex);
1040 			ddi_prop_free(guid);
1041 			return (DDI_NOT_WELL_FORMED);
1042 		}
1043 		vlun = vhci_lun_lookup(tgt_dip);
1044 		rval = mdi_select_path(tgt_dip, NULL,
1045 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
1046 		    NULL, &pip);
1047 		if (rval == MDI_SUCCESS) {
1048 			mdi_rele_path(pip);
1049 		}
1050 		from_ticks = ddi_get_lbolt();
1051 	}
1052 	mutex_exit(&vhci_global_mutex);
1053 
1054 	ASSERT(vlun != NULL);
1055 	ddi_prop_free(guid);
1056 
1057 	scsi_device_hba_private_set(sd, vlun);
1058 
1059 	return (DDI_SUCCESS);
1060 }
1061 
1062 /*ARGSUSED*/
1063 static void
1064 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1065     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1066 {
1067 	struct scsi_vhci_lun *dvlp;
1068 	ASSERT(mdi_client_get_path_count(tgt_dip) <= 0);
1069 	dvlp = (struct scsi_vhci_lun *)scsi_device_hba_private_get(sd);
1070 	ASSERT(dvlp != NULL);
1071 
1072 	vhci_lun_free(dvlp, sd);
1073 }
1074 
1075 /*
1076  * a PGR register command has started; copy the info we need
1077  */
1078 int
1079 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt)
1080 {
1081 	struct vhci_pkt		*vpkt = TGTPKT2VHCIPKT(pkt);
1082 	void			*addr;
1083 
1084 	if (!vpkt->vpkt_tgt_init_bp)
1085 		return (TRAN_BADPKT);
1086 
1087 	addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp,
1088 	    (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
1089 	if (addr == NULL)
1090 		return (TRAN_BUSY);
1091 
1092 	mutex_enter(&vlun->svl_mutex);
1093 
1094 	vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:");
1095 
1096 	bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) -
1097 	    (2 * MHIOC_RESV_KEY_SIZE * sizeof (char)));
1098 	bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb));
1099 
1100 	vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:");
1101 
1102 	vlun->svl_time = pkt->pkt_time;
1103 	vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount;
1104 	vlun->svl_first_path = vpkt->vpkt_path;
1105 	mutex_exit(&vlun->svl_mutex);
1106 	return (0);
1107 }
1108 
1109 /*
1110  * Function name : vhci_scsi_start()
1111  *
1112  * Return Values : TRAN_FATAL_ERROR	- vhci has been shutdown
1113  *					  or other fatal failure
1114  *					  preventing packet transportation
1115  *		   TRAN_BUSY		- request queue is full
1116  *		   TRAN_ACCEPT		- pkt has been submitted to phci
1117  *					  (or is held in the waitQ)
1118  * Description	 : Implements SCSA's tran_start() entry point for
1119  *		   packet transport
1120  *
1121  */
1122 static int
1123 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1124 {
1125 	int			rval = TRAN_ACCEPT;
1126 	int			instance, held;
1127 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
1128 	struct scsi_vhci_lun	*vlun = ADDR2VLUN(ap);
1129 	struct vhci_pkt		*vpkt = TGTPKT2VHCIPKT(pkt);
1130 	int			flags = 0;
1131 	scsi_vhci_priv_t	*svp, *svp_resrv;
1132 	dev_info_t		*cdip;
1133 	client_lb_t		lbp;
1134 	int			restore_lbp = 0;
1135 	/* set if pkt is SCSI-II RESERVE cmd */
1136 	int			pkt_reserve_cmd = 0;
1137 	int			reserve_failed = 0;
1138 	int			resrv_instance = 0;
1139 	mdi_pathinfo_t		*pip;
1140 	struct scsi_pkt		*rel_pkt;
1141 
1142 	ASSERT(vhci != NULL);
1143 	ASSERT(vpkt != NULL);
1144 	ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
1145 	cdip = ADDR2DIP(ap);
1146 
1147 	/*
1148 	 * Block IOs if LUN is held or QUIESCED for IOs.
1149 	 */
1150 	if ((VHCI_LUN_IS_HELD(vlun)) ||
1151 	    ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1152 		return (TRAN_BUSY);
1153 	}
1154 
1155 	/*
1156 	 * vhci_lun needs to be quiesced before SCSI-II RESERVE command
1157 	 * can be issued.  This may require a cv_timedwait, which is
1158 	 * dangerous to perform in an interrupt context.  So if this
1159 	 * is a RESERVE command a taskq is dispatched to service it.
1160 	 * This taskq shall again call vhci_scsi_start, but we shall be
1161 	 * sure its not in an interrupt context.
1162 	 */
1163 	if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
1164 	    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
1165 		if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) {
1166 			if (taskq_dispatch(vhci->vhci_taskq,
1167 			    vhci_dispatch_scsi_start, (void *) vpkt,
1168 			    KM_NOSLEEP) != TASKQID_INVALID) {
1169 				return (TRAN_ACCEPT);
1170 			} else {
1171 				return (TRAN_BUSY);
1172 			}
1173 		}
1174 
1175 		/*
1176 		 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't
1177 		 * get serviced for a lun.
1178 		 */
1179 		VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
1180 		if (!held) {
1181 			return (TRAN_BUSY);
1182 		} else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) ==
1183 		    VLUN_QUIESCED_FLG) {
1184 			VHCI_RELEASE_LUN(vlun);
1185 			return (TRAN_BUSY);
1186 		}
1187 
1188 		/*
1189 		 * To ensure that no IOs occur for this LUN for the duration
1190 		 * of this pkt set the VLUN_QUIESCED_FLG.
1191 		 * In case this routine needs to exit on error make sure that
1192 		 * this flag is cleared.
1193 		 */
1194 		vlun->svl_flags |= VLUN_QUIESCED_FLG;
1195 		pkt_reserve_cmd = 1;
1196 
1197 		/*
1198 		 * if this is a SCSI-II RESERVE command, set load balancing
1199 		 * policy to be ALTERNATE PATH to ensure that all subsequent
1200 		 * IOs are routed on the same path.  This is because if commands
1201 		 * are routed across multiple paths then IOs on paths other than
1202 		 * the one on which the RESERVE was executed will get a
1203 		 * RESERVATION CONFLICT
1204 		 */
1205 		lbp = mdi_get_lb_policy(cdip);
1206 		if (lbp != LOAD_BALANCE_NONE) {
1207 			if (vhci_quiesce_lun(vlun) != 1) {
1208 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1209 				VHCI_RELEASE_LUN(vlun);
1210 				return (TRAN_FATAL_ERROR);
1211 			}
1212 			vlun->svl_lb_policy_save = lbp;
1213 			if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) !=
1214 			    MDI_SUCCESS) {
1215 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1216 				VHCI_RELEASE_LUN(vlun);
1217 				return (TRAN_FATAL_ERROR);
1218 			}
1219 			restore_lbp = 1;
1220 		}
1221 
1222 		VHCI_DEBUG(2, (CE_NOTE, vhci->vhci_dip,
1223 		    "!vhci_scsi_start: sending SCSI-2 RESERVE, vlun 0x%p, "
1224 		    "svl_resrv_pip 0x%p, svl_flags: %x, lb_policy %x",
1225 		    (void *)vlun, (void *)vlun->svl_resrv_pip, vlun->svl_flags,
1226 		    mdi_get_lb_policy(cdip)));
1227 
1228 		/*
1229 		 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h
1230 		 * To narrow this window where a reserve command may be sent
1231 		 * down an inactive path the path states first need to be
1232 		 * updated.  Before calling vhci_update_pathstates reset
1233 		 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set
1234 		 * for this lun.  This shall prevent an unnecessary reset
1235 		 * from being sent out.  Also remember currently reserved path
1236 		 * just for a case the new reservation will go to another path.
1237 		 */
1238 		if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1239 			resrv_instance = mdi_pi_get_path_instance(
1240 			    vlun->svl_resrv_pip);
1241 		}
1242 		vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
1243 		vhci_update_pathstates((void *)vlun);
1244 	}
1245 
1246 	instance = ddi_get_instance(vhci->vhci_dip);
1247 
1248 	/*
1249 	 * If the command is PRIN with action of zero, then the cmd
1250 	 * is reading PR keys which requires filtering on completion.
1251 	 * Data cache sync must be guaranteed.
1252 	 */
1253 	if ((pkt->pkt_cdbp[0] == SCMD_PRIN) && (pkt->pkt_cdbp[1] == 0) &&
1254 	    (vpkt->vpkt_org_vpkt == NULL)) {
1255 		vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT;
1256 	}
1257 
1258 	/*
1259 	 * Do not defer bind for PKT_DMA_PARTIAL
1260 	 */
1261 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1262 
1263 		/* This is a non pkt_dma_partial case */
1264 		if ((rval = vhci_bind_transport(
1265 		    ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC))
1266 		    != TRAN_ACCEPT) {
1267 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1268 			    "!vhci%d %x: failed to bind transport: "
1269 			    "vlun 0x%p pkt_reserved %x restore_lbp %x,"
1270 			    "lbp %x", instance, rval, (void *)vlun,
1271 			    pkt_reserve_cmd, restore_lbp, lbp));
1272 			if (restore_lbp)
1273 				(void) mdi_set_lb_policy(cdip, lbp);
1274 			if (pkt_reserve_cmd)
1275 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1276 			return (rval);
1277 		}
1278 		VHCI_DEBUG(8, (CE_NOTE, NULL,
1279 		    "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt));
1280 	}
1281 	ASSERT(vpkt->vpkt_hba_pkt != NULL);
1282 	ASSERT(vpkt->vpkt_path != NULL);
1283 
1284 	/*
1285 	 * This is the chance to adjust the pHCI's pkt and other information
1286 	 * from target driver's pkt.
1287 	 */
1288 	VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n",
1289 	    (void *)vpkt));
1290 	vhci_update_pHCI_pkt(vpkt, pkt);
1291 
1292 	if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1293 		if (vpkt->vpkt_path != vlun->svl_resrv_pip) {
1294 			VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1295 			    "!vhci_bind: reserve flag set for vlun 0x%p, but, "
1296 			    "pktpath 0x%p resrv path 0x%p differ. lb_policy %x",
1297 			    (void *)vlun, (void *)vpkt->vpkt_path,
1298 			    (void *)vlun->svl_resrv_pip,
1299 			    mdi_get_lb_policy(cdip)));
1300 			reserve_failed = 1;
1301 		}
1302 	}
1303 
1304 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
1305 	if (svp == NULL || reserve_failed) {
1306 		if (pkt_reserve_cmd) {
1307 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1308 			    "!vhci_bind returned null svp vlun 0x%p",
1309 			    (void *)vlun));
1310 			vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1311 			if (restore_lbp)
1312 				(void) mdi_set_lb_policy(cdip, lbp);
1313 		}
1314 pkt_cleanup:
1315 		if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1316 			scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1317 			vpkt->vpkt_hba_pkt = NULL;
1318 			if (vpkt->vpkt_path) {
1319 				mdi_rele_path(vpkt->vpkt_path);
1320 				vpkt->vpkt_path = NULL;
1321 			}
1322 		}
1323 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1324 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1325 		    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1326 			sema_v(&vlun->svl_pgr_sema);
1327 		}
1328 		return (TRAN_BUSY);
1329 	}
1330 
1331 	if ((resrv_instance != 0) && (resrv_instance !=
1332 	    mdi_pi_get_path_instance(vpkt->vpkt_path))) {
1333 		/*
1334 		 * This is an attempt to reserve vpkt->vpkt_path.  But the
1335 		 * previously reserved path referred by resrv_instance might
1336 		 * still be reserved.  Hence we will send a release command
1337 		 * there in order to avoid a reservation conflict.
1338 		 */
1339 		VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, "!vhci_scsi_start: "
1340 		    "conflicting reservation on another path, vlun 0x%p, "
1341 		    "reserved instance %d, new instance: %d, pip: 0x%p",
1342 		    (void *)vlun, resrv_instance,
1343 		    mdi_pi_get_path_instance(vpkt->vpkt_path),
1344 		    (void *)vpkt->vpkt_path));
1345 
1346 		/*
1347 		 * In rare cases, the path referred by resrv_instance could
1348 		 * disappear in the meantime. Calling mdi_select_path() below
1349 		 * is an attempt to find out if the path still exists. It also
1350 		 * ensures that the path will be held when the release is sent.
1351 		 */
1352 		rval = mdi_select_path(cdip, NULL, MDI_SELECT_PATH_INSTANCE,
1353 		    (void *)(intptr_t)resrv_instance, &pip);
1354 
1355 		if ((rval == MDI_SUCCESS) && (pip != NULL)) {
1356 			svp_resrv = (scsi_vhci_priv_t *)
1357 			    mdi_pi_get_vhci_private(pip);
1358 			rel_pkt = scsi_init_pkt(&svp_resrv->svp_psd->sd_address,
1359 			    NULL, NULL, CDB_GROUP0,
1360 			    sizeof (struct scsi_arq_status), 0, 0, SLEEP_FUNC,
1361 			    NULL);
1362 
1363 			if (rel_pkt == NULL) {
1364 				char	*p_path;
1365 
1366 				/*
1367 				 * This is very unlikely.
1368 				 * scsi_init_pkt(SLEEP_FUNC) does not fail
1369 				 * because of resources. But in theory it could
1370 				 * fail for some other reason. There is not an
1371 				 * easy way how to recover though. Log a warning
1372 				 * and return.
1373 				 */
1374 				p_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1375 				vhci_log(CE_WARN, vhci->vhci_dip, "!Sending "
1376 				    "RELEASE(6) to %s failed, a potential "
1377 				    "reservation conflict ahead.",
1378 				    ddi_pathname(mdi_pi_get_phci(pip), p_path));
1379 				kmem_free(p_path, MAXPATHLEN);
1380 
1381 				if (restore_lbp)
1382 					(void) mdi_set_lb_policy(cdip, lbp);
1383 
1384 				/* no need to check pkt_reserve_cmd here */
1385 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1386 				return (TRAN_FATAL_ERROR);
1387 			}
1388 
1389 			rel_pkt->pkt_cdbp[0] = SCMD_RELEASE;
1390 			rel_pkt->pkt_time = 60;
1391 
1392 			/*
1393 			 * Ignore the return value.  If it will fail
1394 			 * then most likely it is no longer reserved
1395 			 * anyway.
1396 			 */
1397 			(void) vhci_do_scsi_cmd(rel_pkt);
1398 			VHCI_DEBUG(1, (CE_NOTE, NULL,
1399 			    "!vhci_scsi_start: path 0x%p, issued SCSI-2"
1400 			    " RELEASE\n", (void *)pip));
1401 			scsi_destroy_pkt(rel_pkt);
1402 			mdi_rele_path(pip);
1403 		}
1404 	}
1405 
1406 	VHCI_INCR_PATH_CMDCOUNT(svp);
1407 
1408 	/*
1409 	 * Ensure that no other IOs raced ahead, while a RESERVE cmd was
1410 	 * QUIESCING the same lun.
1411 	 */
1412 	if ((!pkt_reserve_cmd) &&
1413 	    ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1414 		VHCI_DECR_PATH_CMDCOUNT(svp);
1415 		goto pkt_cleanup;
1416 	}
1417 
1418 	if ((pkt->pkt_cdbp[0] == SCMD_PRIN) ||
1419 	    (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1420 		/*
1421 		 * currently this thread only handles running PGR
1422 		 * commands, so don't bother creating it unless
1423 		 * something interesting is going to happen (like
1424 		 * either a PGR out, or a PGR in with enough space
1425 		 * to hold the keys that are getting returned)
1426 		 */
1427 		mutex_enter(&vlun->svl_mutex);
1428 		if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) &&
1429 		    (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1430 			vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon",
1431 			    1, MINCLSYSPRI, 1, 4, 0);
1432 			vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG;
1433 		}
1434 		mutex_exit(&vlun->svl_mutex);
1435 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1436 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1437 		    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1438 			if (rval = vhci_pgr_register_start(vlun, pkt)) {
1439 				/* an error */
1440 				sema_v(&vlun->svl_pgr_sema);
1441 				return (rval);
1442 			}
1443 		}
1444 	}
1445 
1446 	/*
1447 	 * SCSI-II RESERVE cmd is not expected in polled mode.
1448 	 * If this changes it needs to be handled for the polled scenario.
1449 	 */
1450 	flags = vpkt->vpkt_hba_pkt->pkt_flags;
1451 
1452 	/*
1453 	 * Set the path_instance *before* sending the scsi_pkt down the path
1454 	 * to mpxio's pHCI so that additional path abstractions at a pHCI
1455 	 * level (like maybe iSCSI at some point in the future) can update
1456 	 * the path_instance.
1457 	 */
1458 	if (scsi_pkt_allocated_correctly(vpkt->vpkt_hba_pkt))
1459 		vpkt->vpkt_hba_pkt->pkt_path_instance =
1460 		    mdi_pi_get_path_instance(vpkt->vpkt_path);
1461 
1462 	rval = scsi_transport(vpkt->vpkt_hba_pkt);
1463 	if (rval == TRAN_ACCEPT) {
1464 		if (flags & FLAG_NOINTR) {
1465 			struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt;
1466 			struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt;
1467 
1468 			ASSERT(tpkt != NULL);
1469 			*(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
1470 			tpkt->pkt_resid = pkt->pkt_resid;
1471 			tpkt->pkt_state = pkt->pkt_state;
1472 			tpkt->pkt_statistics = pkt->pkt_statistics;
1473 			tpkt->pkt_reason = pkt->pkt_reason;
1474 
1475 			if ((*(pkt->pkt_scbp) == STATUS_CHECK) &&
1476 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
1477 				bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
1478 				    vpkt->vpkt_tgt_init_scblen);
1479 			}
1480 
1481 			VHCI_DECR_PATH_CMDCOUNT(svp);
1482 			if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1483 				scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1484 				vpkt->vpkt_hba_pkt = NULL;
1485 				if (vpkt->vpkt_path) {
1486 					mdi_rele_path(vpkt->vpkt_path);
1487 					vpkt->vpkt_path = NULL;
1488 				}
1489 			}
1490 			/*
1491 			 * This path will not automatically retry pkts
1492 			 * internally, therefore, vpkt_org_vpkt should
1493 			 * never be set.
1494 			 */
1495 			ASSERT(vpkt->vpkt_org_vpkt == NULL);
1496 			scsi_hba_pkt_comp(tpkt);
1497 		}
1498 		return (rval);
1499 	} else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1500 	    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1501 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1502 		/* the command exited with bad status */
1503 		sema_v(&vlun->svl_pgr_sema);
1504 	} else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
1505 		/* the command exited with bad status */
1506 		sema_v(&vlun->svl_pgr_sema);
1507 	} else if (pkt_reserve_cmd) {
1508 		VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1509 		    "!vhci_scsi_start: reserve failed vlun 0x%p",
1510 		    (void *)vlun));
1511 		vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1512 		if (restore_lbp)
1513 			(void) mdi_set_lb_policy(cdip, lbp);
1514 	}
1515 
1516 	ASSERT(vpkt->vpkt_hba_pkt != NULL);
1517 	VHCI_DECR_PATH_CMDCOUNT(svp);
1518 
1519 	/* Do not destroy phci packet information for PKT_DMA_PARTIAL */
1520 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1521 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1522 		vpkt->vpkt_hba_pkt = NULL;
1523 		if (vpkt->vpkt_path) {
1524 			MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR);
1525 			mdi_rele_path(vpkt->vpkt_path);
1526 			vpkt->vpkt_path = NULL;
1527 		}
1528 	}
1529 	return (TRAN_BUSY);
1530 }
1531 
1532 /*
1533  * Function name : vhci_scsi_reset()
1534  *
1535  * Return Values : 0 - reset failed
1536  *		   1 - reset succeeded
1537  */
1538 
1539 /* ARGSUSED */
1540 static int
1541 vhci_scsi_reset(struct scsi_address *ap, int level)
1542 {
1543 	int rval = 0;
1544 
1545 	cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level);
1546 	if ((level == RESET_TARGET) || (level == RESET_LUN)) {
1547 		return (vhci_scsi_reset_target(ap, level, TRUE));
1548 	} else if (level == RESET_ALL) {
1549 		return (vhci_scsi_reset_bus(ap));
1550 	}
1551 
1552 	return (rval);
1553 }
1554 
1555 /*
1556  * vhci_recovery_reset:
1557  *	Issues reset to the device
1558  * Input:
1559  *	vlun - vhci lun pointer of the device
1560  *	ap - address of the device
1561  *	select_path:
1562  *		If select_path is FALSE, then the address specified in ap is
1563  *		the path on which reset will be issued.
1564  *		If select_path is TRUE, then path is obtained by calling
1565  *		mdi_select_path.
1566  *
1567  *	recovery_depth:
1568  *		Caller can specify the level of reset.
1569  *		VHCI_DEPTH_LUN -
1570  *			Issues LUN RESET if device supports lun reset.
1571  *		VHCI_DEPTH_TARGET -
1572  *			If Lun Reset fails or the device does not support
1573  *			Lun Reset, issues TARGET RESET
1574  *		VHCI_DEPTH_ALL -
1575  *			If Lun Reset fails or the device does not support
1576  *			Lun Reset, issues TARGET RESET.
1577  *			If TARGET RESET does not succeed, issues Bus Reset.
1578  */
1579 
1580 static int
1581 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap,
1582     uint8_t select_path, uint8_t recovery_depth)
1583 {
1584 	int	ret = 0;
1585 
1586 	ASSERT(ap != NULL);
1587 
1588 	if (vlun && vlun->svl_support_lun_reset == 1) {
1589 		ret = vhci_scsi_reset_target(ap, RESET_LUN,
1590 		    select_path);
1591 	}
1592 
1593 	recovery_depth--;
1594 
1595 	if ((ret == 0) && recovery_depth) {
1596 		ret = vhci_scsi_reset_target(ap, RESET_TARGET,
1597 		    select_path);
1598 		recovery_depth--;
1599 	}
1600 
1601 	if ((ret == 0) && recovery_depth) {
1602 		(void) scsi_reset(ap, RESET_ALL);
1603 	}
1604 
1605 	return (ret);
1606 }
1607 
1608 /*
1609  * Note: The scsi_address passed to this routine could be the scsi_address
1610  * for the virtual device or the physical device. No assumptions should be
1611  * made in this routine about the contents of the ap structure.
1612  * Further, note that the child dip would be the dip of the ssd node regardless
1613  * of the scsi_address passed in.
1614  */
1615 static int
1616 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path)
1617 {
1618 	dev_info_t		*vdip, *cdip;
1619 	mdi_pathinfo_t		*pip = NULL;
1620 	mdi_pathinfo_t		*npip = NULL;
1621 	int			rval = -1;
1622 	scsi_vhci_priv_t	*svp = NULL;
1623 	struct scsi_address	*pap = NULL;
1624 	scsi_hba_tran_t		*hba = NULL;
1625 	int			sps;
1626 	struct scsi_vhci	*vhci = NULL;
1627 
1628 	if (select_path != TRUE) {
1629 		ASSERT(ap != NULL);
1630 		if (level == RESET_LUN) {
1631 			hba = ap->a_hba_tran;
1632 			ASSERT(hba != NULL);
1633 			return (hba->tran_reset(ap, RESET_LUN));
1634 		}
1635 		return (scsi_reset(ap, level));
1636 	}
1637 
1638 	cdip = ADDR2DIP(ap);
1639 	ASSERT(cdip != NULL);
1640 	vdip = ddi_get_parent(cdip);
1641 	ASSERT(vdip != NULL);
1642 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
1643 	ASSERT(vhci != NULL);
1644 
1645 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip);
1646 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
1647 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1648 		    "Unable to get a path, dip 0x%p", (void *)cdip));
1649 		return (0);
1650 	}
1651 again:
1652 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
1653 	if (svp == NULL) {
1654 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1655 		    "priv is NULL, pip 0x%p", (void *)pip));
1656 		mdi_rele_path(pip);
1657 		return (0);
1658 	}
1659 
1660 	if (svp->svp_psd == NULL) {
1661 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1662 		    "psd is NULL, pip 0x%p, svp 0x%p",
1663 		    (void *)pip, (void *)svp));
1664 		mdi_rele_path(pip);
1665 		return (0);
1666 	}
1667 
1668 	pap = &svp->svp_psd->sd_address;
1669 	hba = pap->a_hba_tran;
1670 
1671 	ASSERT(pap != NULL);
1672 	ASSERT(hba != NULL);
1673 
1674 	if (hba->tran_reset != NULL) {
1675 		if (hba->tran_reset(pap, level) == 0) {
1676 			vhci_log(CE_WARN, vdip, "!%s%d: "
1677 			    "path %s, reset %d failed",
1678 			    ddi_driver_name(cdip), ddi_get_instance(cdip),
1679 			    mdi_pi_spathname(pip), level);
1680 
1681 			/*
1682 			 * Select next path and issue the reset, repeat
1683 			 * until all paths are exhausted
1684 			 */
1685 			sps = mdi_select_path(cdip, NULL,
1686 			    MDI_SELECT_ONLINE_PATH, pip, &npip);
1687 			if ((sps != MDI_SUCCESS) || (npip == NULL)) {
1688 				mdi_rele_path(pip);
1689 				return (0);
1690 			}
1691 			mdi_rele_path(pip);
1692 			pip = npip;
1693 			goto again;
1694 		}
1695 		mdi_rele_path(pip);
1696 		mutex_enter(&vhci->vhci_mutex);
1697 		scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
1698 		    &vhci->vhci_reset_notify_listf);
1699 		mutex_exit(&vhci->vhci_mutex);
1700 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: "
1701 		    "reset %d sent down pip:%p for cdip:%p\n", level,
1702 		    (void *)pip, (void *)cdip));
1703 		return (1);
1704 	}
1705 	mdi_rele_path(pip);
1706 	return (0);
1707 }
1708 
1709 
1710 /* ARGSUSED */
1711 static int
1712 vhci_scsi_reset_bus(struct scsi_address *ap)
1713 {
1714 	return (1);
1715 }
1716 
1717 
1718 /*
1719  * called by vhci_getcap and vhci_setcap to get and set (respectively)
1720  * SCSI capabilities
1721  */
1722 /* ARGSUSED */
1723 static int
1724 vhci_commoncap(struct scsi_address *ap, char *cap,
1725     int val, int tgtonly, int doset)
1726 {
1727 	struct scsi_vhci		*vhci = ADDR2VHCI(ap);
1728 	struct scsi_vhci_lun		*vlun = ADDR2VLUN(ap);
1729 	int			cidx;
1730 	int			rval = 0;
1731 
1732 	if (cap == (char *)0) {
1733 		VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1734 		    "!vhci_commoncap: invalid arg"));
1735 		return (rval);
1736 	}
1737 
1738 	if (vlun == NULL) {
1739 		VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1740 		    "!vhci_commoncap: vlun is null"));
1741 		return (rval);
1742 	}
1743 
1744 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
1745 		return (UNDEFINED);
1746 	}
1747 
1748 	/*
1749 	 * Process setcap request.
1750 	 */
1751 	if (doset) {
1752 		/*
1753 		 * At present, we can only set binary (0/1) values
1754 		 */
1755 		switch (cidx) {
1756 		case SCSI_CAP_ARQ:
1757 			if (val == 0) {
1758 				rval = 0;
1759 			} else {
1760 				rval = 1;
1761 			}
1762 			break;
1763 
1764 		case SCSI_CAP_LUN_RESET:
1765 			if (tgtonly == 0) {
1766 				VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1767 				    "scsi_vhci_setcap: "
1768 				    "Returning error since whom = 0"));
1769 				rval = -1;
1770 				break;
1771 			}
1772 			/*
1773 			 * Set the capability accordingly.
1774 			 */
1775 			mutex_enter(&vlun->svl_mutex);
1776 			vlun->svl_support_lun_reset = val;
1777 			rval = val;
1778 			mutex_exit(&vlun->svl_mutex);
1779 			break;
1780 
1781 		case SCSI_CAP_SECTOR_SIZE:
1782 			mutex_enter(&vlun->svl_mutex);
1783 			vlun->svl_sector_size = val;
1784 			vlun->svl_setcap_done = 1;
1785 			mutex_exit(&vlun->svl_mutex);
1786 			(void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL);
1787 
1788 			/* Always return success */
1789 			rval = 1;
1790 			break;
1791 
1792 		default:
1793 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1794 			    "!vhci_setcap: unsupported %d", cidx));
1795 			rval = UNDEFINED;
1796 			break;
1797 		}
1798 
1799 		VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1800 		    "!set cap: cap=%s, val/tgtonly/doset/rval = "
1801 		    "0x%x/0x%x/0x%x/%d\n",
1802 		    cap, val, tgtonly, doset, rval));
1803 
1804 	} else {
1805 		/*
1806 		 * Process getcap request.
1807 		 */
1808 		switch (cidx) {
1809 		case SCSI_CAP_DMA_MAX:
1810 			/*
1811 			 * For X86 this capability is caught in scsi_ifgetcap().
1812 			 * XXX Should this be getting the value from the pHCI?
1813 			 */
1814 			rval = (int)VHCI_DMA_MAX_XFER_CAP;
1815 			break;
1816 
1817 		case SCSI_CAP_INITIATOR_ID:
1818 			rval = 0x00;
1819 			break;
1820 
1821 		case SCSI_CAP_ARQ:
1822 		case SCSI_CAP_RESET_NOTIFICATION:
1823 		case SCSI_CAP_TAGGED_QING:
1824 			rval = 1;
1825 			break;
1826 
1827 		case SCSI_CAP_SCSI_VERSION:
1828 			rval = 3;
1829 			break;
1830 
1831 		case SCSI_CAP_INTERCONNECT_TYPE:
1832 			rval = INTERCONNECT_FABRIC;
1833 			break;
1834 
1835 		case SCSI_CAP_LUN_RESET:
1836 			/*
1837 			 * scsi_vhci will always return success for LUN reset.
1838 			 * When request for doing LUN reset comes
1839 			 * through scsi_reset entry point, at that time attempt
1840 			 * will be made to do reset through all the possible
1841 			 * paths.
1842 			 */
1843 			mutex_enter(&vlun->svl_mutex);
1844 			rval = vlun->svl_support_lun_reset;
1845 			mutex_exit(&vlun->svl_mutex);
1846 			VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip,
1847 			    "scsi_vhci_getcap:"
1848 			    "Getting the Lun reset capability %d", rval));
1849 			break;
1850 
1851 		case SCSI_CAP_SECTOR_SIZE:
1852 			mutex_enter(&vlun->svl_mutex);
1853 			rval = vlun->svl_sector_size;
1854 			mutex_exit(&vlun->svl_mutex);
1855 			break;
1856 
1857 		case SCSI_CAP_CDB_LEN:
1858 			rval = VHCI_SCSI_CDB_SIZE;
1859 			break;
1860 
1861 		case SCSI_CAP_DMA_MAX_ARCH:
1862 			/*
1863 			 * For X86 this capability is caught in scsi_ifgetcap().
1864 			 * XXX Should this be getting the value from the pHCI?
1865 			 */
1866 			rval = 0;
1867 			break;
1868 
1869 		default:
1870 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1871 			    "!vhci_getcap: unsupported %d", cidx));
1872 			rval = UNDEFINED;
1873 			break;
1874 		}
1875 
1876 		VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1877 		    "!get cap: cap=%s, val/tgtonly/doset/rval = "
1878 		    "0x%x/0x%x/0x%x/%d\n",
1879 		    cap, val, tgtonly, doset, rval));
1880 	}
1881 	return (rval);
1882 }
1883 
1884 
1885 /*
1886  * Function name : vhci_scsi_getcap()
1887  *
1888  */
1889 static int
1890 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
1891 {
1892 	return (vhci_commoncap(ap, cap, 0, whom, 0));
1893 }
1894 
1895 static int
1896 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1897 {
1898 	return (vhci_commoncap(ap, cap, value, whom, 1));
1899 }
1900 
1901 /*
1902  * Function name : vhci_scsi_abort()
1903  */
1904 /* ARGSUSED */
1905 static int
1906 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1907 {
1908 	return (0);
1909 }
1910 
1911 /*
1912  * Function name : vhci_scsi_init_pkt
1913  *
1914  * Return Values : pointer to scsi_pkt, or NULL
1915  */
1916 /* ARGSUSED */
1917 static struct scsi_pkt *
1918 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1919     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1920     int flags, int (*callback)(caddr_t), caddr_t arg)
1921 {
1922 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
1923 	struct vhci_pkt		*vpkt;
1924 	int			rval;
1925 	int			newpkt = 0;
1926 	struct scsi_pkt		*pktp;
1927 
1928 
1929 	if (pkt == NULL) {
1930 		if (cmdlen > VHCI_SCSI_CDB_SIZE) {
1931 			if ((cmdlen != VHCI_SCSI_OSD_CDB_SIZE) ||
1932 			    ((flags & VHCI_SCSI_OSD_PKT_FLAGS) !=
1933 			    VHCI_SCSI_OSD_PKT_FLAGS)) {
1934 				VHCI_DEBUG(1, (CE_NOTE, NULL,
1935 				    "!init pkt: cdb size not supported\n"));
1936 				return (NULL);
1937 			}
1938 		}
1939 
1940 		pktp = scsi_hba_pkt_alloc(vhci->vhci_dip,
1941 		    ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback,
1942 		    arg);
1943 
1944 		if (pktp == NULL) {
1945 			return (NULL);
1946 		}
1947 
1948 		/* Get the vhci's private structure */
1949 		vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private);
1950 		ASSERT(vpkt);
1951 
1952 		/* Save the target driver's packet */
1953 		vpkt->vpkt_tgt_pkt = pktp;
1954 
1955 		/*
1956 		 * Save pkt_tgt_init_pkt fields if deferred binding
1957 		 * is needed or for other purposes.
1958 		 */
1959 		vpkt->vpkt_tgt_init_pkt_flags = flags;
1960 		vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0;
1961 		vpkt->vpkt_state = VHCI_PKT_IDLE;
1962 		vpkt->vpkt_tgt_init_cdblen = cmdlen;
1963 		vpkt->vpkt_tgt_init_scblen = statuslen;
1964 		newpkt = 1;
1965 	} else { /* pkt not NULL */
1966 		vpkt = pkt->pkt_ha_private;
1967 	}
1968 
1969 	VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt "
1970 	    "vpkt %p flags %x\n", (void *)vpkt, flags));
1971 
1972 	/* Clear any stale error flags */
1973 	if (bp) {
1974 		bioerror(bp, 0);
1975 	}
1976 
1977 	vpkt->vpkt_tgt_init_bp = bp;
1978 
1979 	if (flags & PKT_DMA_PARTIAL) {
1980 
1981 		/*
1982 		 * Immediate binding is needed.
1983 		 * Target driver may not set this flag in next invocation.
1984 		 * vhci has to remember this flag was set during first
1985 		 * invocation of vhci_scsi_init_pkt.
1986 		 */
1987 		vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL;
1988 	}
1989 
1990 	if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) {
1991 
1992 		/*
1993 		 * Re-initialize some of the target driver packet state
1994 		 * information.
1995 		 */
1996 		vpkt->vpkt_tgt_pkt->pkt_state = 0;
1997 		vpkt->vpkt_tgt_pkt->pkt_statistics = 0;
1998 		vpkt->vpkt_tgt_pkt->pkt_reason = 0;
1999 
2000 		/*
2001 		 * Binding a vpkt->vpkt_path for this IO at init_time.
2002 		 * If an IO error happens later, target driver will clear
2003 		 * this vpkt->vpkt_path binding before re-init IO again.
2004 		 */
2005 		VHCI_DEBUG(8, (CE_NOTE, NULL,
2006 		    "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n",
2007 		    (void *)vpkt, newpkt));
2008 		if (pkt && vpkt->vpkt_hba_pkt) {
2009 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2010 			    "v_s_i_p calling update_pHCI_pkt resid %ld\n",
2011 			    pkt->pkt_resid));
2012 			vhci_update_pHCI_pkt(vpkt, pkt);
2013 		}
2014 		if (callback == SLEEP_FUNC) {
2015 			rval = vhci_bind_transport(
2016 			    ap, vpkt, flags, callback);
2017 		} else {
2018 			rval = vhci_bind_transport(
2019 			    ap, vpkt, flags, NULL_FUNC);
2020 		}
2021 		VHCI_DEBUG(8, (CE_NOTE, NULL,
2022 		    "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n",
2023 		    (void *)vpkt, rval));
2024 		if (bp) {
2025 			if (rval == TRAN_FATAL_ERROR) {
2026 				/*
2027 				 * No paths available. Could not bind
2028 				 * any pHCI. Setting EFAULT as a way
2029 				 * to indicate no DMA is mapped.
2030 				 */
2031 				bioerror(bp, EFAULT);
2032 			} else {
2033 				/*
2034 				 * Do not indicate any pHCI errors to
2035 				 * target driver otherwise.
2036 				 */
2037 				bioerror(bp, 0);
2038 			}
2039 		}
2040 		if (rval != TRAN_ACCEPT) {
2041 			VHCI_DEBUG(8, (CE_NOTE, NULL,
2042 			    "vhci_scsi_init_pkt: "
2043 			    "v_b_t failed 0x%p newpkt %x\n",
2044 			    (void *)vpkt, newpkt));
2045 			if (newpkt) {
2046 				scsi_hba_pkt_free(ap,
2047 				    vpkt->vpkt_tgt_pkt);
2048 			}
2049 			return (NULL);
2050 		}
2051 		ASSERT(vpkt->vpkt_hba_pkt != NULL);
2052 		ASSERT(vpkt->vpkt_path != NULL);
2053 
2054 		/* Update the resid for the target driver */
2055 		vpkt->vpkt_tgt_pkt->pkt_resid =
2056 		    vpkt->vpkt_hba_pkt->pkt_resid;
2057 	}
2058 
2059 	return (vpkt->vpkt_tgt_pkt);
2060 }
2061 
2062 /*
2063  * Function name : vhci_scsi_destroy_pkt
2064  *
2065  * Return Values : none
2066  */
2067 static void
2068 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2069 {
2070 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2071 
2072 	VHCI_DEBUG(8, (CE_NOTE, NULL,
2073 	    "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt));
2074 
2075 	vpkt->vpkt_tgt_init_pkt_flags = 0;
2076 	if (vpkt->vpkt_hba_pkt) {
2077 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
2078 		vpkt->vpkt_hba_pkt = NULL;
2079 	}
2080 	if (vpkt->vpkt_path) {
2081 		mdi_rele_path(vpkt->vpkt_path);
2082 		vpkt->vpkt_path = NULL;
2083 	}
2084 
2085 	ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
2086 	scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt);
2087 }
2088 
2089 /*
2090  * Function name : vhci_scsi_dmafree()
2091  *
2092  * Return Values : none
2093  */
2094 /*ARGSUSED*/
2095 static void
2096 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2097 {
2098 	struct vhci_pkt	*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2099 
2100 	VHCI_DEBUG(6, (CE_NOTE, NULL,
2101 	    "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt));
2102 
2103 	ASSERT(vpkt != NULL);
2104 	if (vpkt->vpkt_hba_pkt) {
2105 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
2106 		vpkt->vpkt_hba_pkt = NULL;
2107 	}
2108 	if (vpkt->vpkt_path) {
2109 		mdi_rele_path(vpkt->vpkt_path);
2110 		vpkt->vpkt_path = NULL;
2111 	}
2112 }
2113 
2114 /*
2115  * Function name : vhci_scsi_sync_pkt()
2116  *
2117  * Return Values : none
2118  */
2119 /*ARGSUSED*/
2120 static void
2121 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2122 {
2123 	struct vhci_pkt	*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2124 
2125 	ASSERT(vpkt != NULL);
2126 	if (vpkt->vpkt_hba_pkt) {
2127 		scsi_sync_pkt(vpkt->vpkt_hba_pkt);
2128 	}
2129 }
2130 
2131 /*
2132  * routine for reset notification setup, to register or cancel.
2133  */
2134 static int
2135 vhci_scsi_reset_notify(struct scsi_address *ap, int flag,
2136     void (*callback)(caddr_t), caddr_t arg)
2137 {
2138 	struct scsi_vhci *vhci = ADDR2VHCI(ap);
2139 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
2140 	    &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf));
2141 }
2142 
2143 static int
2144 vhci_scsi_get_name_bus_addr(struct scsi_device *sd,
2145     char *name, int len, int bus_addr)
2146 {
2147 	dev_info_t		*cdip;
2148 	char			*guid;
2149 	scsi_vhci_lun_t		*vlun;
2150 
2151 	ASSERT(sd != NULL);
2152 	ASSERT(name != NULL);
2153 
2154 	*name = 0;
2155 	cdip = sd->sd_dev;
2156 
2157 	ASSERT(cdip != NULL);
2158 
2159 	if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS)
2160 		return (1);
2161 
2162 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
2163 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS)
2164 		return (1);
2165 
2166 	/*
2167 	 * Message is "sd# at scsi_vhci0: unit-address <guid>: <bus_addr>".
2168 	 *	<guid>		bus_addr argument == 0
2169 	 *	<bus_addr>	bus_addr argument != 0
2170 	 * Since the <guid> is already provided with unit-address, we just
2171 	 * provide failover module in <bus_addr> to keep output shorter.
2172 	 */
2173 	vlun = ADDR2VLUN(&sd->sd_address);
2174 	if (bus_addr == 0) {
2175 		/* report the guid:  */
2176 		(void) snprintf(name, len, "g%s", guid);
2177 	} else if (vlun && vlun->svl_fops_name) {
2178 		/* report the name of the failover module */
2179 		(void) snprintf(name, len, "%s", vlun->svl_fops_name);
2180 	}
2181 
2182 	ddi_prop_free(guid);
2183 	return (1);
2184 }
2185 
2186 static int
2187 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
2188 {
2189 	return (vhci_scsi_get_name_bus_addr(sd, name, len, 1));
2190 }
2191 
2192 static int
2193 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len)
2194 {
2195 	return (vhci_scsi_get_name_bus_addr(sd, name, len, 0));
2196 }
2197 
2198 /*
2199  * Return a pointer to the guid part of the devnm.
2200  * devnm format is "nodename@busaddr", busaddr format is "gGUID".
2201  */
2202 static char *
2203 vhci_devnm_to_guid(char *devnm)
2204 {
2205 	char *cp = devnm;
2206 
2207 	if (devnm == NULL)
2208 		return (NULL);
2209 
2210 	while (*cp != '\0' && *cp != '@')
2211 		cp++;
2212 	if (*cp == '@' && *(cp + 1) == 'g')
2213 		return (cp + 2);
2214 	return (NULL);
2215 }
2216 
2217 static int
2218 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags,
2219     int (*func)(caddr_t))
2220 {
2221 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
2222 	dev_info_t		*cdip = ADDR2DIP(ap);
2223 	mdi_pathinfo_t		*pip = NULL;
2224 	mdi_pathinfo_t		*npip = NULL;
2225 	scsi_vhci_priv_t	*svp = NULL;
2226 	struct scsi_device	*psd = NULL;
2227 	struct scsi_address	*address = NULL;
2228 	struct scsi_pkt		*pkt = NULL;
2229 	int			rval = -1;
2230 	int			pgr_sema_held = 0;
2231 	int			held;
2232 	int			mps_flag = MDI_SELECT_ONLINE_PATH;
2233 	struct scsi_vhci_lun	*vlun;
2234 	int			path_instance = 0;
2235 
2236 	vlun = ADDR2VLUN(ap);
2237 	ASSERT(vlun != 0);
2238 
2239 	if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) &&
2240 	    (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2241 	    VHCI_PROUT_REGISTER) ||
2242 	    ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2243 	    VHCI_PROUT_R_AND_IGNORE))) {
2244 		if (!sema_tryp(&vlun->svl_pgr_sema))
2245 			return (TRAN_BUSY);
2246 		pgr_sema_held = 1;
2247 		if (vlun->svl_first_path != NULL) {
2248 			rval = mdi_select_path(cdip, NULL,
2249 			    MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
2250 			    NULL, &pip);
2251 			if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2252 				VHCI_DEBUG(4, (CE_NOTE, NULL,
2253 				    "vhci_bind_transport: path select fail\n"));
2254 			} else {
2255 				npip = pip;
2256 				do {
2257 					if (npip == vlun->svl_first_path) {
2258 						VHCI_DEBUG(4, (CE_NOTE, NULL,
2259 						    "vhci_bind_transport: "
2260 						    "valid first path 0x%p\n",
2261 						    (void *)
2262 						    vlun->svl_first_path));
2263 						pip = vlun->svl_first_path;
2264 						goto bind_path;
2265 					}
2266 					pip = npip;
2267 					rval = mdi_select_path(cdip, NULL,
2268 					    MDI_SELECT_ONLINE_PATH |
2269 					    MDI_SELECT_STANDBY_PATH,
2270 					    pip, &npip);
2271 					mdi_rele_path(pip);
2272 				} while ((rval == MDI_SUCCESS) &&
2273 				    (npip != NULL));
2274 			}
2275 		}
2276 
2277 		if (vlun->svl_first_path) {
2278 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2279 			    "vhci_bind_transport: invalid first path 0x%p\n",
2280 			    (void *)vlun->svl_first_path));
2281 			vlun->svl_first_path = NULL;
2282 		}
2283 	} else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
2284 		if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) {
2285 			if (!sema_tryp(&vlun->svl_pgr_sema))
2286 				return (TRAN_BUSY);
2287 		}
2288 		pgr_sema_held = 1;
2289 	}
2290 
2291 	/*
2292 	 * If the path is already bound for PKT_PARTIAL_DMA case,
2293 	 * try to use the same path.
2294 	 */
2295 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) {
2296 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2297 		    "vhci_bind_transport: PKT_PARTIAL_DMA "
2298 		    "vpkt 0x%p, path 0x%p\n",
2299 		    (void *)vpkt, (void *)vpkt->vpkt_path));
2300 		pip = vpkt->vpkt_path;
2301 		goto bind_path;
2302 	}
2303 
2304 	/*
2305 	 * Get path_instance. Non-zero with FLAG_PKT_PATH_INSTANCE set
2306 	 * indicates that mdi_select_path should be called to select a
2307 	 * specific instance.
2308 	 *
2309 	 * NB: Condition pkt_path_instance reference on proper allocation.
2310 	 */
2311 	if ((vpkt->vpkt_tgt_pkt->pkt_flags & FLAG_PKT_PATH_INSTANCE) &&
2312 	    scsi_pkt_allocated_correctly(vpkt->vpkt_tgt_pkt)) {
2313 		path_instance = vpkt->vpkt_tgt_pkt->pkt_path_instance;
2314 	}
2315 
2316 	/*
2317 	 * If reservation is active bind the transport directly to the pip
2318 	 * with the reservation.
2319 	 */
2320 	if (vpkt->vpkt_hba_pkt == NULL) {
2321 		if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
2322 			if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) {
2323 				pip = vlun->svl_resrv_pip;
2324 				mdi_hold_path(pip);
2325 				vlun->svl_waiting_for_activepath = 0;
2326 				rval = MDI_SUCCESS;
2327 				goto bind_path;
2328 			} else {
2329 				if (pgr_sema_held) {
2330 					sema_v(&vlun->svl_pgr_sema);
2331 				}
2332 				return (TRAN_BUSY);
2333 			}
2334 		}
2335 try_again:
2336 		rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2337 		    path_instance ? MDI_SELECT_PATH_INSTANCE : 0,
2338 		    (void *)(intptr_t)path_instance, &pip);
2339 		if (rval == MDI_BUSY) {
2340 			if (pgr_sema_held) {
2341 				sema_v(&vlun->svl_pgr_sema);
2342 			}
2343 			return (TRAN_BUSY);
2344 		} else if (rval == MDI_DEVI_ONLINING) {
2345 			/*
2346 			 * if we are here then we are in the midst of
2347 			 * an attach/probe of the client device.
2348 			 * We attempt to bind to ONLINE path if available,
2349 			 * else it is OK to bind to a STANDBY path (instead
2350 			 * of triggering a failover) because IO associated
2351 			 * with attach/probe (eg. INQUIRY, block 0 read)
2352 			 * are completed by targets even on passive paths
2353 			 * If no ONLINE paths available, it is important
2354 			 * to set svl_waiting_for_activepath for two
2355 			 * reasons: (1) avoid sense analysis in the
2356 			 * "external failure detection" codepath in
2357 			 * vhci_intr().  Failure to do so will result in
2358 			 * infinite loop (unless an ONLINE path becomes
2359 			 * available at some point) (2) avoid
2360 			 * unnecessary failover (see "---Waiting For Active
2361 			 * Path---" comment below).
2362 			 */
2363 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining "
2364 			    "state\n", (void *)cdip));
2365 			pip = NULL;
2366 			rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2367 			    mps_flag, NULL, &pip);
2368 			if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2369 				if (vlun->svl_waiting_for_activepath == 0) {
2370 					vlun->svl_waiting_for_activepath = 1;
2371 					vlun->svl_wfa_time = gethrtime();
2372 				}
2373 				mps_flag |= MDI_SELECT_STANDBY_PATH;
2374 				rval = mdi_select_path(cdip,
2375 				    vpkt->vpkt_tgt_init_bp,
2376 				    mps_flag, NULL, &pip);
2377 				if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2378 					if (pgr_sema_held) {
2379 						sema_v(&vlun->svl_pgr_sema);
2380 					}
2381 					return (TRAN_FATAL_ERROR);
2382 				}
2383 				goto bind_path;
2384 			}
2385 		} else if ((rval == MDI_FAILURE) ||
2386 		    ((rval == MDI_NOPATH) && (path_instance))) {
2387 			if (pgr_sema_held) {
2388 				sema_v(&vlun->svl_pgr_sema);
2389 			}
2390 			return (TRAN_FATAL_ERROR);
2391 		}
2392 
2393 		if ((pip == NULL) || (rval == MDI_NOPATH)) {
2394 			while (vlun->svl_waiting_for_activepath) {
2395 				/*
2396 				 * ---Waiting For Active Path---
2397 				 * This device was discovered across a
2398 				 * passive path; lets wait for a little
2399 				 * bit, hopefully an active path will
2400 				 * show up obviating the need for a
2401 				 * failover
2402 				 */
2403 				if ((gethrtime() - vlun->svl_wfa_time) >=
2404 				    (60 * NANOSEC)) {
2405 					vlun->svl_waiting_for_activepath = 0;
2406 				} else {
2407 					drv_usecwait(1000);
2408 					if (vlun->svl_waiting_for_activepath
2409 					    == 0) {
2410 						/*
2411 						 * an active path has come
2412 						 * online!
2413 						 */
2414 						goto try_again;
2415 					}
2416 				}
2417 			}
2418 			VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
2419 			if (!held) {
2420 				VHCI_DEBUG(4, (CE_NOTE, NULL,
2421 				    "!Lun not held\n"));
2422 				if (pgr_sema_held) {
2423 					sema_v(&vlun->svl_pgr_sema);
2424 				}
2425 				return (TRAN_BUSY);
2426 			}
2427 			/*
2428 			 * now that the LUN is stable, one last check
2429 			 * to make sure no other changes sneaked in
2430 			 * (like a path coming online or a
2431 			 * failover initiated by another thread)
2432 			 */
2433 			pip = NULL;
2434 			rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2435 			    0, NULL, &pip);
2436 			if (pip != NULL) {
2437 				VHCI_RELEASE_LUN(vlun);
2438 				vlun->svl_waiting_for_activepath = 0;
2439 				goto bind_path;
2440 			}
2441 
2442 			/*
2443 			 * Check if there is an ONLINE path OR a STANDBY path
2444 			 * available. If none is available, do not attempt
2445 			 * to do a failover, just return a fatal error at this
2446 			 * point.
2447 			 */
2448 			npip = NULL;
2449 			rval = mdi_select_path(cdip, NULL,
2450 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
2451 			    NULL, &npip);
2452 			if ((npip == NULL) || (rval != MDI_SUCCESS)) {
2453 				/*
2454 				 * No paths available, jus return FATAL error.
2455 				 */
2456 				VHCI_RELEASE_LUN(vlun);
2457 				if (pgr_sema_held) {
2458 					sema_v(&vlun->svl_pgr_sema);
2459 				}
2460 				return (TRAN_FATAL_ERROR);
2461 			}
2462 			mdi_rele_path(npip);
2463 			if (!(vpkt->vpkt_state & VHCI_PKT_IN_FAILOVER)) {
2464 				VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking "
2465 				    "mdi_failover\n"));
2466 				rval = mdi_failover(vhci->vhci_dip, cdip,
2467 				    MDI_FAILOVER_ASYNC);
2468 			} else {
2469 				rval = vlun->svl_failover_status;
2470 			}
2471 			if (rval == MDI_FAILURE) {
2472 				VHCI_RELEASE_LUN(vlun);
2473 				if (pgr_sema_held) {
2474 					sema_v(&vlun->svl_pgr_sema);
2475 				}
2476 				return (TRAN_FATAL_ERROR);
2477 			} else if (rval == MDI_BUSY) {
2478 				VHCI_RELEASE_LUN(vlun);
2479 				if (pgr_sema_held) {
2480 					sema_v(&vlun->svl_pgr_sema);
2481 				}
2482 				return (TRAN_BUSY);
2483 			} else {
2484 				if (pgr_sema_held) {
2485 					sema_v(&vlun->svl_pgr_sema);
2486 				}
2487 				vpkt->vpkt_state |= VHCI_PKT_IN_FAILOVER;
2488 				return (TRAN_BUSY);
2489 			}
2490 		}
2491 		vlun->svl_waiting_for_activepath = 0;
2492 bind_path:
2493 		vpkt->vpkt_path = pip;
2494 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2495 		ASSERT(svp != NULL);
2496 
2497 		psd = svp->svp_psd;
2498 		ASSERT(psd != NULL);
2499 		address = &psd->sd_address;
2500 	} else {
2501 		pkt = vpkt->vpkt_hba_pkt;
2502 		address = &pkt->pkt_address;
2503 	}
2504 
2505 	/* Verify match of specified path_instance and selected path_instance */
2506 	ASSERT((path_instance == 0) ||
2507 	    (path_instance == mdi_pi_get_path_instance(vpkt->vpkt_path)));
2508 
2509 	/*
2510 	 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever
2511 	 * target driver calls vhci_scsi_init_pkt.
2512 	 */
2513 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) &&
2514 	    vpkt->vpkt_path && vpkt->vpkt_hba_pkt) {
2515 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2516 		    "vhci_bind_transport: PKT_PARTIAL_DMA "
2517 		    "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n",
2518 		    (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt));
2519 		pkt = vpkt->vpkt_hba_pkt;
2520 		address = &pkt->pkt_address;
2521 	}
2522 
2523 	if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) {
2524 		pkt = scsi_init_pkt(address, pkt,
2525 		    vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
2526 		    vpkt->vpkt_tgt_init_scblen, 0, flags, func, NULL);
2527 
2528 		if (pkt == NULL) {
2529 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2530 			    "!bind transport: 0x%p 0x%p 0x%p\n",
2531 			    (void *)vhci, (void *)psd, (void *)vpkt));
2532 			if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) {
2533 				MDI_PI_ERRSTAT(vpkt->vpkt_path,
2534 				    MDI_PI_TRANSERR);
2535 				mdi_rele_path(vpkt->vpkt_path);
2536 				vpkt->vpkt_path = NULL;
2537 			}
2538 			if (pgr_sema_held) {
2539 				sema_v(&vlun->svl_pgr_sema);
2540 			}
2541 			/*
2542 			 * Consider it a fatal error if b_error is
2543 			 * set as a result of DMA binding failure
2544 			 * vs. a condition of being temporarily out of
2545 			 * some resource
2546 			 */
2547 			if (vpkt->vpkt_tgt_init_bp == NULL ||
2548 			    geterror(vpkt->vpkt_tgt_init_bp))
2549 				return (TRAN_FATAL_ERROR);
2550 			else
2551 				return (TRAN_BUSY);
2552 		}
2553 	}
2554 
2555 	pkt->pkt_private = vpkt;
2556 	vpkt->vpkt_hba_pkt = pkt;
2557 	return (TRAN_ACCEPT);
2558 }
2559 
2560 
2561 /*PRINTFLIKE3*/
2562 void
2563 vhci_log(int level, dev_info_t *dip, const char *fmt, ...)
2564 {
2565 	char		buf[256];
2566 	va_list		ap;
2567 
2568 	va_start(ap, fmt);
2569 	(void) vsprintf(buf, fmt, ap);
2570 	va_end(ap);
2571 
2572 	scsi_log(dip, "scsi_vhci", level, buf);
2573 }
2574 
2575 /* do a PGR out with the information we've saved away */
2576 static int
2577 vhci_do_prout(scsi_vhci_priv_t *svp)
2578 {
2579 
2580 	struct scsi_pkt			*new_pkt;
2581 	struct buf			*bp;
2582 	scsi_vhci_lun_t			*vlun = svp->svp_svl;
2583 	int				rval, retry, nr_retry, ua_retry;
2584 	uint8_t				*sns, skey;
2585 
2586 	bp = getrbuf(KM_SLEEP);
2587 	bp->b_flags = B_WRITE;
2588 	bp->b_resid = 0;
2589 	bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2590 	bp->b_bcount = vlun->svl_bcount;
2591 
2592 	VHCI_INCR_PATH_CMDCOUNT(svp);
2593 
2594 	new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
2595 	    CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0,
2596 	    SLEEP_FUNC, NULL);
2597 	if (new_pkt == NULL) {
2598 		VHCI_DECR_PATH_CMDCOUNT(svp);
2599 		freerbuf(bp);
2600 		cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed");
2601 		return (0);
2602 	}
2603 	mutex_enter(&vlun->svl_mutex);
2604 	bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2605 	bp->b_bcount = vlun->svl_bcount;
2606 	bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp,
2607 	    sizeof (vlun->svl_cdb));
2608 	new_pkt->pkt_time = vlun->svl_time;
2609 	mutex_exit(&vlun->svl_mutex);
2610 	new_pkt->pkt_flags = FLAG_NOINTR;
2611 
2612 	ua_retry = nr_retry = retry = 0;
2613 again:
2614 	rval = vhci_do_scsi_cmd(new_pkt);
2615 	if (rval != 1) {
2616 		if ((new_pkt->pkt_reason == CMD_CMPLT) &&
2617 		    (SCBP_C(new_pkt) == STATUS_CHECK) &&
2618 		    (new_pkt->pkt_state & STATE_ARQ_DONE)) {
2619 			sns = (uint8_t *)
2620 			    &(((struct scsi_arq_status *)(uintptr_t)
2621 			    (new_pkt->pkt_scbp))->sts_sensedata);
2622 			skey = scsi_sense_key(sns);
2623 			if ((skey == KEY_UNIT_ATTENTION) ||
2624 			    (skey == KEY_NOT_READY)) {
2625 				int max_retry;
2626 				struct scsi_failover_ops *fops;
2627 				fops = vlun->svl_fops;
2628 				rval = fops->sfo_analyze_sense(svp->svp_psd,
2629 				    sns, vlun->svl_fops_ctpriv);
2630 				if (rval == SCSI_SENSE_NOT_READY) {
2631 					max_retry = vhci_prout_not_ready_retry;
2632 					retry = nr_retry++;
2633 					delay(1 * drv_usectohz(1000000));
2634 				} else {
2635 					/* chk for state change and update */
2636 					if (rval == SCSI_SENSE_STATE_CHANGED) {
2637 						int held;
2638 						VHCI_HOLD_LUN(vlun,
2639 						    VH_NOSLEEP, held);
2640 						if (!held) {
2641 							rval = TRAN_BUSY;
2642 						} else {
2643 							/* chk for alua first */
2644 							vhci_update_pathstates(
2645 							    (void *)vlun);
2646 						}
2647 					}
2648 					retry = ua_retry++;
2649 					max_retry = VHCI_MAX_PGR_RETRIES;
2650 				}
2651 				if (retry < max_retry) {
2652 					VHCI_DEBUG(4, (CE_WARN, NULL,
2653 					    "!vhci_do_prout retry 0x%x "
2654 					    "(0x%x 0x%x 0x%x)",
2655 					    SCBP_C(new_pkt),
2656 					    new_pkt->pkt_cdbp[0],
2657 					    new_pkt->pkt_cdbp[1],
2658 					    new_pkt->pkt_cdbp[2]));
2659 					goto again;
2660 				}
2661 				rval = 0;
2662 				VHCI_DEBUG(4, (CE_WARN, NULL,
2663 				    "!vhci_do_prout 0x%x "
2664 				    "(0x%x 0x%x 0x%x)",
2665 				    SCBP_C(new_pkt),
2666 				    new_pkt->pkt_cdbp[0],
2667 				    new_pkt->pkt_cdbp[1],
2668 				    new_pkt->pkt_cdbp[2]));
2669 			} else if (skey == KEY_ILLEGAL_REQUEST)
2670 				rval = VHCI_PGR_ILLEGALOP;
2671 		}
2672 	} else {
2673 		rval = 1;
2674 	}
2675 	scsi_destroy_pkt(new_pkt);
2676 	VHCI_DECR_PATH_CMDCOUNT(svp);
2677 	freerbuf(bp);
2678 	return (rval);
2679 }
2680 
2681 static void
2682 vhci_run_cmd(void *arg)
2683 {
2684 	struct scsi_pkt		*pkt = (struct scsi_pkt *)arg;
2685 	struct scsi_pkt		*tpkt;
2686 	scsi_vhci_priv_t	*svp;
2687 	mdi_pathinfo_t		*pip, *npip;
2688 	scsi_vhci_lun_t		*vlun;
2689 	dev_info_t		*cdip;
2690 	scsi_vhci_priv_t	*nsvp;
2691 	int			fail = 0;
2692 	int			rval;
2693 	struct vhci_pkt		*vpkt;
2694 	uchar_t			cdb_1;
2695 	vhci_prout_t		*prout;
2696 
2697 	vpkt = (struct vhci_pkt *)pkt->pkt_private;
2698 	tpkt = vpkt->vpkt_tgt_pkt;
2699 	pip = vpkt->vpkt_path;
2700 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2701 	if (svp == NULL) {
2702 		tpkt->pkt_reason = CMD_TRAN_ERR;
2703 		tpkt->pkt_statistics = STAT_ABORTED;
2704 		goto done;
2705 	}
2706 	vlun = svp->svp_svl;
2707 	prout = &vlun->svl_prout;
2708 	if (SCBP_C(pkt) != STATUS_GOOD)
2709 		fail++;
2710 	cdip = vlun->svl_dip;
2711 	pip = npip = NULL;
2712 	rval = mdi_select_path(cdip, NULL,
2713 	    MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH, NULL, &npip);
2714 	if ((rval != MDI_SUCCESS) || (npip == NULL)) {
2715 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2716 		    "vhci_run_cmd: no path! 0x%p\n", (void *)svp));
2717 		tpkt->pkt_reason = CMD_TRAN_ERR;
2718 		tpkt->pkt_statistics = STAT_ABORTED;
2719 		goto done;
2720 	}
2721 
2722 	cdb_1 = vlun->svl_cdb[1];
2723 	vlun->svl_cdb[1] &= 0xe0;
2724 	vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
2725 
2726 	do {
2727 		nsvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
2728 		if (nsvp == NULL) {
2729 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2730 			    "vhci_run_cmd: no "
2731 			    "client priv! 0x%p offlined?\n",
2732 			    (void *)npip));
2733 			goto next_path;
2734 		}
2735 		if (vlun->svl_first_path == npip) {
2736 			goto next_path;
2737 		} else {
2738 			if (vhci_do_prout(nsvp) != 1)
2739 				fail++;
2740 		}
2741 next_path:
2742 		pip = npip;
2743 		rval = mdi_select_path(cdip, NULL,
2744 		    MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
2745 		    pip, &npip);
2746 		mdi_rele_path(pip);
2747 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
2748 
2749 	vlun->svl_cdb[1] = cdb_1;
2750 
2751 	if (fail) {
2752 		VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, "
2753 		    "couldn't be replicated on all paths",
2754 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
2755 		vhci_print_prout_keys(vlun, "vhci_run_cmd: ");
2756 
2757 		if (SCBP_C(pkt) != STATUS_GOOD) {
2758 			tpkt->pkt_reason = CMD_TRAN_ERR;
2759 			tpkt->pkt_statistics = STAT_ABORTED;
2760 		}
2761 	} else {
2762 		vlun->svl_pgr_active = 1;
2763 		vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:");
2764 
2765 		bcopy((const void *)prout->service_key,
2766 		    (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE);
2767 		bcopy((const void *)prout->res_key,
2768 		    (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE);
2769 
2770 		vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:");
2771 	}
2772 done:
2773 	if (SCBP_C(pkt) == STATUS_GOOD)
2774 		vlun->svl_first_path = NULL;
2775 
2776 	if (svp)
2777 		VHCI_DECR_PATH_CMDCOUNT(svp);
2778 
2779 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
2780 		scsi_destroy_pkt(pkt);
2781 		vpkt->vpkt_hba_pkt = NULL;
2782 		if (vpkt->vpkt_path) {
2783 			mdi_rele_path(vpkt->vpkt_path);
2784 			vpkt->vpkt_path = NULL;
2785 		}
2786 	}
2787 
2788 	sema_v(&vlun->svl_pgr_sema);
2789 	/*
2790 	 * The PROUT commands are not included in the automatic retry
2791 	 * mechanism, therefore, vpkt_org_vpkt should never be set here.
2792 	 */
2793 	ASSERT(vpkt->vpkt_org_vpkt == NULL);
2794 	scsi_hba_pkt_comp(tpkt);
2795 }
2796 
2797 /*
2798  * Get the keys registered with this target.  Since we will have
2799  * registered the same key with multiple initiators, strip out
2800  * any duplicate keys.
2801  *
2802  * The pointers which will be used to filter the registered keys from
2803  * the device will be stored in filter_prin and filter_pkt.  If the
2804  * allocation length of the buffer was sufficient for the number of
2805  * parameter data bytes available to be returned by the device then the
2806  * key filtering will use the keylist returned from the original
2807  * request.  If the allocation length of the buffer was not sufficient,
2808  * then the filtering will use the keylist returned from the request
2809  * that is resent below.
2810  *
2811  * If the device returns an additional length field that is greater than
2812  * the allocation length of the buffer, then allocate a new buffer which
2813  * can accommodate the number of parameter data bytes available to be
2814  * returned.  Resend the scsi PRIN command, filter out the duplicate
2815  * keys and return as many of the unique keys found that was originally
2816  * requested and set the additional length field equal to the data bytes
2817  * of unique reservation keys available to be returned.
2818  *
2819  * If the device returns an additional length field that is less than or
2820  * equal to the allocation length of the buffer, then all the available
2821  * keys registered were returned by the device.  Filter out the
2822  * duplicate keys and return all of the unique keys found and set the
2823  * additional length field equal to the data bytes of the reservation
2824  * keys to be returned.
2825  */
2826 
2827 #define	VHCI_PRIN_HEADER_SZ (sizeof (prin->length) + sizeof (prin->generation))
2828 
2829 static int
2830 vhci_do_prin(struct vhci_pkt **intr_vpkt)
2831 {
2832 	scsi_vhci_priv_t *svp;
2833 	struct vhci_pkt *vpkt = *intr_vpkt;
2834 	vhci_prin_readkeys_t *prin;
2835 	scsi_vhci_lun_t *vlun;
2836 	struct scsi_vhci *vhci = ADDR2VHCI(&vpkt->vpkt_tgt_pkt->pkt_address);
2837 
2838 	struct buf		*new_bp = NULL;
2839 	struct scsi_pkt		*new_pkt = NULL;
2840 	struct vhci_pkt		*new_vpkt = NULL;
2841 	uint32_t		needed_length;
2842 	int			rval = VHCI_CMD_CMPLT;
2843 	uint32_t		prin_length = 0;
2844 	uint32_t		svl_prin_length = 0;
2845 
2846 	ASSERT(vpkt->vpkt_path);
2847 	svp = mdi_pi_get_vhci_private(vpkt->vpkt_path);
2848 	ASSERT(svp);
2849 	vlun = svp->svp_svl;
2850 	ASSERT(vlun);
2851 
2852 	/*
2853 	 * If the caller only asked for an amount of data that would not
2854 	 * be enough to include any key data it is likely that they will
2855 	 * send the next command with a buffer size based on the information
2856 	 * from this header. Doing recovery on this would be a duplication
2857 	 * of efforts.
2858 	 */
2859 	if (vpkt->vpkt_tgt_init_bp->b_bcount <= VHCI_PRIN_HEADER_SZ) {
2860 		rval = VHCI_CMD_CMPLT;
2861 		goto exit;
2862 	}
2863 
2864 	if (vpkt->vpkt_org_vpkt == NULL) {
2865 		/*
2866 		 * Can fail as sleep is not allowed.
2867 		 */
2868 		prin = (vhci_prin_readkeys_t *)
2869 		    bp_mapin_common(vpkt->vpkt_tgt_init_bp, VM_NOSLEEP);
2870 	} else {
2871 		/*
2872 		 * The retry buf doesn't need to be mapped in.
2873 		 */
2874 		prin = (vhci_prin_readkeys_t *)
2875 		    vpkt->vpkt_tgt_init_bp->b_un.b_daddr;
2876 	}
2877 
2878 	if (prin == NULL) {
2879 		VHCI_DEBUG(5, (CE_WARN, NULL,
2880 		    "vhci_do_prin: bp_mapin_common failed."));
2881 		rval = VHCI_CMD_ERROR;
2882 		goto fail;
2883 	}
2884 
2885 	prin_length = BE_32(prin->length);
2886 
2887 	/*
2888 	 * According to SPC-3r22, sec 4.3.4.6: "If the amount of
2889 	 * information to be transferred exceeds the maximum value
2890 	 * that the ALLOCATION LENGTH field is capable of specifying,
2891 	 * the device server shall...terminate the command with CHECK
2892 	 * CONDITION status".  The ALLOCATION LENGTH field of the
2893 	 * PERSISTENT RESERVE IN command is 2 bytes. We should never
2894 	 * get here with an ADDITIONAL LENGTH greater than 0xFFFF
2895 	 * so if we do, then it is an error!
2896 	 */
2897 
2898 
2899 	if ((prin_length + VHCI_PRIN_HEADER_SZ) > 0xFFFF) {
2900 		VHCI_DEBUG(5, (CE_NOTE, NULL,
2901 		    "vhci_do_prin: Device returned invalid "
2902 		    "length 0x%x\n", prin_length));
2903 		rval = VHCI_CMD_ERROR;
2904 		goto fail;
2905 	}
2906 	needed_length = prin_length + VHCI_PRIN_HEADER_SZ;
2907 
2908 	/*
2909 	 * If prin->length is greater than the byte count allocated in the
2910 	 * original buffer, then resend the request with enough buffer
2911 	 * allocated to get all of the available registered keys.
2912 	 */
2913 	if ((vpkt->vpkt_tgt_init_bp->b_bcount < needed_length) &&
2914 	    (vpkt->vpkt_org_vpkt == NULL)) {
2915 
2916 		new_pkt = vhci_create_retry_pkt(vpkt);
2917 		if (new_pkt == NULL) {
2918 			rval = VHCI_CMD_ERROR;
2919 			goto fail;
2920 		}
2921 		new_vpkt = TGTPKT2VHCIPKT(new_pkt);
2922 
2923 		/*
2924 		 * This is the buf with buffer pointer
2925 		 * where the prin readkeys will be
2926 		 * returned from the device
2927 		 */
2928 		new_bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
2929 		    NULL, needed_length, B_READ, NULL_FUNC, NULL);
2930 		if ((new_bp == NULL) || (new_bp->b_un.b_addr == NULL)) {
2931 			if (new_bp) {
2932 				scsi_free_consistent_buf(new_bp);
2933 			}
2934 			vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt);
2935 			rval = VHCI_CMD_ERROR;
2936 			goto fail;
2937 		}
2938 		new_bp->b_bcount = needed_length;
2939 		new_pkt->pkt_cdbp[7] = (uchar_t)(needed_length >> 8);
2940 		new_pkt->pkt_cdbp[8] = (uchar_t)needed_length;
2941 
2942 		rval = VHCI_CMD_RETRY;
2943 
2944 		new_vpkt->vpkt_tgt_init_bp = new_bp;
2945 	}
2946 
2947 	if (rval == VHCI_CMD_RETRY) {
2948 
2949 		/*
2950 		 * There were more keys then the original request asked for.
2951 		 */
2952 		mdi_pathinfo_t *path_holder = vpkt->vpkt_path;
2953 
2954 		/*
2955 		 * Release the old path because it does not matter which path
2956 		 * this command is sent down.  This allows the normal bind
2957 		 * transport mechanism to be used.
2958 		 */
2959 		if (vpkt->vpkt_path != NULL) {
2960 			mdi_rele_path(vpkt->vpkt_path);
2961 			vpkt->vpkt_path = NULL;
2962 		}
2963 
2964 		/*
2965 		 * Dispatch the retry command
2966 		 */
2967 		if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start,
2968 		    (void *) new_vpkt, KM_NOSLEEP) == TASKQID_INVALID) {
2969 			if (path_holder) {
2970 				vpkt->vpkt_path = path_holder;
2971 				mdi_hold_path(path_holder);
2972 			}
2973 			scsi_free_consistent_buf(new_bp);
2974 			vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt);
2975 			rval = VHCI_CMD_ERROR;
2976 			goto fail;
2977 		}
2978 
2979 		/*
2980 		 * If we return VHCI_CMD_RETRY, that means the caller
2981 		 * is going to bail and wait for the reissued command
2982 		 * to complete.  In that case, we need to decrement
2983 		 * the path command count right now.  In any other
2984 		 * case, it'll be decremented by the caller.
2985 		 */
2986 		VHCI_DECR_PATH_CMDCOUNT(svp);
2987 		goto exit;
2988 
2989 	}
2990 
2991 	if (rval == VHCI_CMD_CMPLT) {
2992 		/*
2993 		 * The original request got all of the keys or the recovery
2994 		 * packet returns.
2995 		 */
2996 		int new;
2997 		int old;
2998 		int num_keys = prin_length / MHIOC_RESV_KEY_SIZE;
2999 
3000 		VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n",
3001 		    num_keys));
3002 
3003 #ifdef DEBUG
3004 		VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n"));
3005 		if (vhci_debug == 5)
3006 			vhci_print_prin_keys(prin, num_keys);
3007 		VHCI_DEBUG(5, (CE_NOTE, NULL,
3008 		    "vhci_do_prin: MPxIO old keys:\n"));
3009 		if (vhci_debug == 5)
3010 			vhci_print_prin_keys(&vlun->svl_prin, num_keys);
3011 #endif
3012 
3013 		/*
3014 		 * Filter out all duplicate keys returned from the device
3015 		 * We know that we use a different key for every host, so we
3016 		 * can simply strip out duplicates. Otherwise we would need to
3017 		 * do more bookkeeping to figure out which keys to strip out.
3018 		 */
3019 
3020 		new = 0;
3021 
3022 		/*
3023 		 * If we got at least 1 key copy it.
3024 		 */
3025 		if (num_keys > 0) {
3026 			vlun->svl_prin.keylist[0] = prin->keylist[0];
3027 			new++;
3028 		}
3029 
3030 		/*
3031 		 * find next unique key.
3032 		 */
3033 		for (old = 1; old < num_keys; old++) {
3034 			int j;
3035 			int match = 0;
3036 
3037 			if (new >= VHCI_NUM_RESV_KEYS)
3038 				break;
3039 			for (j = 0; j < new; j++) {
3040 				if (bcmp(&prin->keylist[old],
3041 				    &vlun->svl_prin.keylist[j],
3042 				    sizeof (mhioc_resv_key_t)) == 0) {
3043 					match = 1;
3044 					break;
3045 				}
3046 			}
3047 			if (!match) {
3048 				vlun->svl_prin.keylist[new] =
3049 				    prin->keylist[old];
3050 				new++;
3051 			}
3052 		}
3053 
3054 		/* Stored Big Endian */
3055 		vlun->svl_prin.generation = prin->generation;
3056 		svl_prin_length = new * sizeof (mhioc_resv_key_t);
3057 		/* Stored Big Endian */
3058 		vlun->svl_prin.length = BE_32(svl_prin_length);
3059 		svl_prin_length += VHCI_PRIN_HEADER_SZ;
3060 
3061 		/*
3062 		 * If we arrived at this point after issuing a retry, make sure
3063 		 * that we put everything back the way it originally was so
3064 		 * that the target driver can complete the command correctly.
3065 		 */
3066 		if (vpkt->vpkt_org_vpkt != NULL) {
3067 			new_bp = vpkt->vpkt_tgt_init_bp;
3068 
3069 			scsi_free_consistent_buf(new_bp);
3070 
3071 			vpkt = vhci_sync_retry_pkt(vpkt);
3072 			*intr_vpkt = vpkt;
3073 
3074 			/*
3075 			 * Make sure the original buffer is mapped into kernel
3076 			 * space before we try to copy the filtered keys into
3077 			 * it.
3078 			 */
3079 			prin = (vhci_prin_readkeys_t *)bp_mapin_common(
3080 			    vpkt->vpkt_tgt_init_bp, VM_NOSLEEP);
3081 		}
3082 
3083 		/*
3084 		 * Now copy the desired number of prin keys into the original
3085 		 * target buffer.
3086 		 */
3087 		if (svl_prin_length <= vpkt->vpkt_tgt_init_bp->b_bcount) {
3088 			/*
3089 			 * It is safe to return all of the available unique
3090 			 * keys
3091 			 */
3092 			bcopy(&vlun->svl_prin, prin, svl_prin_length);
3093 		} else {
3094 			/*
3095 			 * Not all of the available keys were requested by the
3096 			 * original command.
3097 			 */
3098 			bcopy(&vlun->svl_prin, prin,
3099 			    vpkt->vpkt_tgt_init_bp->b_bcount);
3100 		}
3101 #ifdef DEBUG
3102 		VHCI_DEBUG(5, (CE_NOTE, NULL,
3103 		    "vhci_do_prin: To Application:\n"));
3104 		if (vhci_debug == 5)
3105 			vhci_print_prin_keys(prin, new);
3106 		VHCI_DEBUG(5, (CE_NOTE, NULL,
3107 		    "vhci_do_prin: MPxIO new keys:\n"));
3108 		if (vhci_debug == 5)
3109 			vhci_print_prin_keys(&vlun->svl_prin, new);
3110 #endif
3111 	}
3112 fail:
3113 	if (rval == VHCI_CMD_ERROR) {
3114 		/*
3115 		 * If we arrived at this point after issuing a
3116 		 * retry, make sure that we put everything back
3117 		 * the way it originally was so that ssd can
3118 		 * complete the command correctly.
3119 		 */
3120 
3121 		if (vpkt->vpkt_org_vpkt != NULL) {
3122 			new_bp = vpkt->vpkt_tgt_init_bp;
3123 			if (new_bp != NULL) {
3124 				scsi_free_consistent_buf(new_bp);
3125 			}
3126 
3127 			new_vpkt = vpkt;
3128 			vpkt = vpkt->vpkt_org_vpkt;
3129 
3130 			vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3131 			    new_vpkt->vpkt_tgt_pkt);
3132 		}
3133 
3134 		/*
3135 		 * Mark this command completion as having an error so that
3136 		 * ssd will retry the command.
3137 		 */
3138 
3139 		vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3140 		vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3141 
3142 		rval = VHCI_CMD_CMPLT;
3143 	}
3144 exit:
3145 	/*
3146 	 * Make sure that the semaphore is only released once.
3147 	 */
3148 	if (rval == VHCI_CMD_CMPLT) {
3149 		sema_v(&vlun->svl_pgr_sema);
3150 	}
3151 
3152 	return (rval);
3153 }
3154 
3155 static void
3156 vhci_intr(struct scsi_pkt *pkt)
3157 {
3158 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_private;
3159 	struct scsi_pkt		*tpkt;
3160 	scsi_vhci_priv_t	*svp;
3161 	scsi_vhci_lun_t		*vlun;
3162 	int			rval, held;
3163 	struct scsi_failover_ops	*fops;
3164 	uint8_t			*sns, skey, asc, ascq;
3165 	mdi_pathinfo_t		*lpath;
3166 	static char		*timeout_err = "Command Timeout";
3167 	static char		*parity_err = "Parity Error";
3168 	char			*err_str = NULL;
3169 	dev_info_t		*vdip, *cdip;
3170 	char			*cpath;
3171 
3172 	ASSERT(vpkt != NULL);
3173 	tpkt = vpkt->vpkt_tgt_pkt;
3174 	ASSERT(tpkt != NULL);
3175 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3176 	ASSERT(svp != NULL);
3177 	vlun = svp->svp_svl;
3178 	ASSERT(vlun != NULL);
3179 	lpath = vpkt->vpkt_path;
3180 
3181 	/*
3182 	 * sync up the target driver's pkt with the pkt that
3183 	 * we actually used
3184 	 */
3185 	*(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
3186 	tpkt->pkt_resid = pkt->pkt_resid;
3187 	tpkt->pkt_state = pkt->pkt_state;
3188 	tpkt->pkt_statistics = pkt->pkt_statistics;
3189 	tpkt->pkt_reason = pkt->pkt_reason;
3190 
3191 	/* Return path_instance information back to the target driver. */
3192 	if (scsi_pkt_allocated_correctly(tpkt)) {
3193 		if (scsi_pkt_allocated_correctly(pkt)) {
3194 			/*
3195 			 * If both packets were correctly allocated,
3196 			 * return path returned by pHCI.
3197 			 */
3198 			tpkt->pkt_path_instance = pkt->pkt_path_instance;
3199 		} else {
3200 			/* Otherwise return path of pHCI we used */
3201 			tpkt->pkt_path_instance =
3202 			    mdi_pi_get_path_instance(lpath);
3203 		}
3204 	}
3205 
3206 	if (pkt->pkt_cdbp[0] == SCMD_PROUT &&
3207 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3208 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) {
3209 		if ((SCBP_C(pkt) != STATUS_GOOD) ||
3210 		    (pkt->pkt_reason != CMD_CMPLT)) {
3211 			sema_v(&vlun->svl_pgr_sema);
3212 		}
3213 	} else if (pkt->pkt_cdbp[0] == SCMD_PRIN) {
3214 		if (pkt->pkt_reason != CMD_CMPLT ||
3215 		    (SCBP_C(pkt) != STATUS_GOOD)) {
3216 			sema_v(&vlun->svl_pgr_sema);
3217 		}
3218 	}
3219 
3220 	switch (pkt->pkt_reason) {
3221 	case CMD_CMPLT:
3222 		/*
3223 		 * cmd completed successfully, check for scsi errors
3224 		 */
3225 		switch (*(pkt->pkt_scbp)) {
3226 		case STATUS_CHECK:
3227 			if (pkt->pkt_state & STATE_ARQ_DONE) {
3228 				sns = (uint8_t *)
3229 				    &(((struct scsi_arq_status *)(uintptr_t)
3230 				    (pkt->pkt_scbp))->sts_sensedata);
3231 				skey = scsi_sense_key(sns);
3232 				asc = scsi_sense_asc(sns);
3233 				ascq = scsi_sense_ascq(sns);
3234 				fops = vlun->svl_fops;
3235 				ASSERT(fops != NULL);
3236 				VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: "
3237 				    "Received sns key %x  esc %x  escq %x\n",
3238 				    skey, asc, ascq));
3239 
3240 				if (vlun->svl_waiting_for_activepath == 1) {
3241 					/*
3242 					 * if we are here it means we are
3243 					 * in the midst of a probe/attach
3244 					 * through a passive path; this
3245 					 * case is exempt from sense analysis
3246 					 * for detection of ext. failover
3247 					 * because that would unnecessarily
3248 					 * increase attach time.
3249 					 */
3250 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3251 					    vpkt->vpkt_tgt_init_scblen);
3252 					break;
3253 				}
3254 				if (asc == VHCI_SCSI_PERR) {
3255 					/*
3256 					 * parity error
3257 					 */
3258 					err_str = parity_err;
3259 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3260 					    vpkt->vpkt_tgt_init_scblen);
3261 					break;
3262 				}
3263 				rval = fops->sfo_analyze_sense(svp->svp_psd,
3264 				    sns, vlun->svl_fops_ctpriv);
3265 				if ((rval == SCSI_SENSE_NOFAILOVER) ||
3266 				    (rval == SCSI_SENSE_UNKNOWN) ||
3267 				    (rval == SCSI_SENSE_NOT_READY)) {
3268 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3269 					    vpkt->vpkt_tgt_init_scblen);
3270 					break;
3271 				} else if (rval == SCSI_SENSE_STATE_CHANGED) {
3272 					struct scsi_vhci	*vhci;
3273 					vhci = ADDR2VHCI(&tpkt->pkt_address);
3274 					VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3275 					if (!held) {
3276 						/*
3277 						 * looks like some other thread
3278 						 * has already detected this
3279 						 * condition
3280 						 */
3281 						tpkt->pkt_state &=
3282 						    ~STATE_ARQ_DONE;
3283 						*(tpkt->pkt_scbp) =
3284 						    STATUS_BUSY;
3285 						break;
3286 					}
3287 					(void) taskq_dispatch(
3288 					    vhci->vhci_update_pathstates_taskq,
3289 					    vhci_update_pathstates,
3290 					    (void *)vlun, KM_SLEEP);
3291 				} else {
3292 					/*
3293 					 * externally initiated failover
3294 					 * has occurred or is in progress
3295 					 */
3296 					VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3297 					if (!held) {
3298 						/*
3299 						 * looks like some other thread
3300 						 * has already detected this
3301 						 * condition
3302 						 */
3303 						tpkt->pkt_state &=
3304 						    ~STATE_ARQ_DONE;
3305 						*(tpkt->pkt_scbp) =
3306 						    STATUS_BUSY;
3307 						break;
3308 					} else {
3309 						rval = vhci_handle_ext_fo
3310 						    (pkt, rval);
3311 						if (rval == BUSY_RETURN) {
3312 							tpkt->pkt_state &=
3313 							    ~STATE_ARQ_DONE;
3314 							*(tpkt->pkt_scbp) =
3315 							    STATUS_BUSY;
3316 							break;
3317 						}
3318 						bcopy(pkt->pkt_scbp,
3319 						    tpkt->pkt_scbp,
3320 						    vpkt->vpkt_tgt_init_scblen);
3321 						break;
3322 					}
3323 				}
3324 			}
3325 			break;
3326 
3327 		/*
3328 		 * If this is a good SCSI-II RELEASE cmd completion then restore
3329 		 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG.
3330 		 * If this is a good SCSI-II RESERVE cmd completion then set
3331 		 * VLUN_RESERVE_ACTIVE_FLG.
3332 		 */
3333 		case STATUS_GOOD:
3334 			if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) ||
3335 			    (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) {
3336 				(void) mdi_set_lb_policy(vlun->svl_dip,
3337 				    vlun->svl_lb_policy_save);
3338 				vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
3339 				VHCI_DEBUG(1, (CE_WARN, NULL,
3340 				    "!vhci_intr: vlun 0x%p release path 0x%p",
3341 				    (void *)vlun, (void *)vpkt->vpkt_path));
3342 			}
3343 
3344 			if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3345 			    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3346 				vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG;
3347 				vlun->svl_resrv_pip = vpkt->vpkt_path;
3348 				VHCI_DEBUG(1, (CE_WARN, NULL,
3349 				    "!vhci_intr: vlun 0x%p reserved path 0x%p",
3350 				    (void *)vlun, (void *)vpkt->vpkt_path));
3351 			}
3352 			break;
3353 
3354 		case STATUS_RESERVATION_CONFLICT:
3355 			VHCI_DEBUG(1, (CE_WARN, NULL,
3356 			    "!vhci_intr: vlun 0x%p "
3357 			    "reserve conflict on path 0x%p",
3358 			    (void *)vlun, (void *)vpkt->vpkt_path));
3359 			/* FALLTHROUGH */
3360 		default:
3361 			break;
3362 		}
3363 
3364 		/*
3365 		 * Update I/O completion statistics for the path
3366 		 */
3367 		mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp);
3368 
3369 		/*
3370 		 * Command completed successfully, release the dma binding and
3371 		 * destroy the transport side of the packet.
3372 		 */
3373 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
3374 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3375 		    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
3376 			if (SCBP_C(pkt) == STATUS_GOOD) {
3377 				ASSERT(vlun->svl_taskq);
3378 				svp->svp_last_pkt_reason = pkt->pkt_reason;
3379 				(void) taskq_dispatch(vlun->svl_taskq,
3380 				    vhci_run_cmd, pkt, KM_SLEEP);
3381 				return;
3382 			}
3383 		}
3384 		if ((SCBP_C(pkt) == STATUS_GOOD) &&
3385 		    (pkt->pkt_cdbp[0] == SCMD_PRIN) && vpkt->vpkt_tgt_init_bp) {
3386 			/*
3387 			 * If the action (value in byte 1 of the cdb) is zero,
3388 			 * we're reading keys, and that's the only condition
3389 			 * where we need to be concerned with filtering keys
3390 			 * and potential retries.  Otherwise, we simply signal
3391 			 * the semaphore and move on.
3392 			 */
3393 			if (pkt->pkt_cdbp[1] == 0) {
3394 				/*
3395 				 * If this is the completion of an internal
3396 				 * retry then we need to make sure that the
3397 				 * pkt and tpkt pointers are readjusted so
3398 				 * the calls to scsi_destroy_pkt and pkt_comp
3399 				 * below work * correctly.
3400 				 */
3401 				if (vpkt->vpkt_org_vpkt != NULL) {
3402 					pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt;
3403 					tpkt = vpkt->vpkt_org_vpkt->
3404 					    vpkt_tgt_pkt;
3405 
3406 					/*
3407 					 * If this command was issued through
3408 					 * the taskq then we need to clear
3409 					 * this flag for proper processing in
3410 					 * the case of a retry from the target
3411 					 * driver.
3412 					 */
3413 					vpkt->vpkt_state &=
3414 					    ~VHCI_PKT_THRU_TASKQ;
3415 				}
3416 
3417 				/*
3418 				 * if vhci_do_prin returns VHCI_CMD_CMPLT then
3419 				 * vpkt will contain the address of the
3420 				 * original vpkt
3421 				 */
3422 				if (vhci_do_prin(&vpkt) == VHCI_CMD_RETRY) {
3423 					/*
3424 					 * The command has been resent to get
3425 					 * all the keys from the device.  Don't
3426 					 * complete the command with ssd until
3427 					 * the retry completes.
3428 					 */
3429 					return;
3430 				}
3431 			} else {
3432 				sema_v(&vlun->svl_pgr_sema);
3433 			}
3434 		}
3435 
3436 		break;
3437 
3438 	case CMD_TIMEOUT:
3439 		if ((pkt->pkt_statistics &
3440 		    (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) == 0) {
3441 
3442 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3443 			    "!scsi vhci timeout invoked\n"));
3444 
3445 			(void) vhci_recovery_reset(vlun, &pkt->pkt_address,
3446 			    FALSE, VHCI_DEPTH_ALL);
3447 		}
3448 		MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR);
3449 		tpkt->pkt_statistics |= STAT_ABORTED;
3450 		err_str = timeout_err;
3451 		break;
3452 
3453 	case CMD_TRAN_ERR:
3454 		/*
3455 		 * This status is returned if the transport has sent the cmd
3456 		 * down the link to the target and then some error occurs.
3457 		 * In case of SCSI-II RESERVE cmd, we don't know if the
3458 		 * reservation been accepted by the target or not, so we need
3459 		 * to clear the reservation.
3460 		 */
3461 		if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3462 		    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3463 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received"
3464 			    " cmd_tran_err for scsi-2 reserve cmd\n"));
3465 			if (!vhci_recovery_reset(vlun, &pkt->pkt_address,
3466 			    TRUE, VHCI_DEPTH_TARGET)) {
3467 				VHCI_DEBUG(1, (CE_WARN, NULL,
3468 				    "!vhci_intr cmd_tran_err reset failed!"));
3469 			}
3470 		}
3471 		break;
3472 
3473 	case CMD_DEV_GONE:
3474 		/*
3475 		 * If this is the last path then report CMD_DEV_GONE to the
3476 		 * target driver, otherwise report BUSY to triggger retry.
3477 		 */
3478 		if (vlun->svl_dip &&
3479 		    (mdi_client_get_path_count(vlun->svl_dip) <= 1)) {
3480 			struct scsi_vhci	*vhci;
3481 			vhci = ADDR2VHCI(&tpkt->pkt_address);
3482 			VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
3483 			    "cmd_dev_gone on last path\n"));
3484 			(void) vhci_invalidate_mpapi_lu(vhci, vlun);
3485 			break;
3486 		}
3487 
3488 		/* Report CMD_CMPLT-with-BUSY to cause retry. */
3489 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
3490 		    "cmd_dev_gone\n"));
3491 		tpkt->pkt_reason = CMD_CMPLT;
3492 		tpkt->pkt_state = STATE_GOT_BUS |
3493 		    STATE_GOT_TARGET | STATE_SENT_CMD |
3494 		    STATE_GOT_STATUS;
3495 		*(tpkt->pkt_scbp) = STATUS_BUSY;
3496 		break;
3497 
3498 	default:
3499 		break;
3500 	}
3501 
3502 	/*
3503 	 * SCSI-II RESERVE cmd has been serviced by the lower layers clear
3504 	 * the flag so the lun is not QUIESCED any longer.
3505 	 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt
3506 	 * is retried, a taskq shall again be dispatched to service it.  Else
3507 	 * it may lead to a system hang if the retry is within interrupt
3508 	 * context.
3509 	 */
3510 	if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3511 	    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3512 		vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
3513 		vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
3514 	}
3515 
3516 	/*
3517 	 * vpkt_org_vpkt should always be NULL here if the retry command
3518 	 * has been successfully processed.  If vpkt_org_vpkt != NULL at
3519 	 * this point, it is an error so restore the original vpkt and
3520 	 * return an error to the target driver so it can retry the
3521 	 * command as appropriate.
3522 	 */
3523 	if (vpkt->vpkt_org_vpkt != NULL) {
3524 		struct vhci_pkt *new_vpkt = vpkt;
3525 		vpkt = vpkt->vpkt_org_vpkt;
3526 
3527 		vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3528 		    new_vpkt->vpkt_tgt_pkt);
3529 
3530 		/*
3531 		 * Mark this command completion as having an error so that
3532 		 * ssd will retry the command.
3533 		 */
3534 		vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3535 		vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3536 
3537 		pkt = vpkt->vpkt_hba_pkt;
3538 		tpkt = vpkt->vpkt_tgt_pkt;
3539 	}
3540 
3541 	if ((err_str != NULL) && (pkt->pkt_reason !=
3542 	    svp->svp_last_pkt_reason)) {
3543 		cdip = vlun->svl_dip;
3544 		vdip = ddi_get_parent(cdip);
3545 		cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3546 		vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s",
3547 		    ddi_pathname(cdip, cpath), ddi_driver_name(cdip),
3548 		    ddi_get_instance(cdip), err_str,
3549 		    mdi_pi_spathname(vpkt->vpkt_path));
3550 		kmem_free(cpath, MAXPATHLEN);
3551 	}
3552 	svp->svp_last_pkt_reason = pkt->pkt_reason;
3553 	VHCI_DECR_PATH_CMDCOUNT(svp);
3554 
3555 	/*
3556 	 * For PARTIAL_DMA, vhci should not free the path.
3557 	 * Target driver will call into vhci_scsi_dmafree or
3558 	 * destroy pkt to release this path.
3559 	 */
3560 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
3561 		scsi_destroy_pkt(pkt);
3562 		vpkt->vpkt_hba_pkt = NULL;
3563 		if (vpkt->vpkt_path) {
3564 			mdi_rele_path(vpkt->vpkt_path);
3565 			vpkt->vpkt_path = NULL;
3566 		}
3567 	}
3568 
3569 	scsi_hba_pkt_comp(tpkt);
3570 }
3571 
3572 /*
3573  * two possibilities: (1) failover has completed
3574  * or (2) is in progress; update our path states for
3575  * the former case; for the latter case,
3576  * initiate a scsi_watch request to
3577  * determine when failover completes - vlun is HELD
3578  * until failover completes; BUSY is returned to upper
3579  * layer in both the cases
3580  */
3581 static int
3582 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat)
3583 {
3584 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_private;
3585 	struct scsi_pkt		*tpkt;
3586 	scsi_vhci_priv_t	*svp;
3587 	scsi_vhci_lun_t		*vlun;
3588 	struct scsi_vhci	*vhci;
3589 	scsi_vhci_swarg_t	*swarg;
3590 	char			*path;
3591 
3592 	ASSERT(vpkt != NULL);
3593 	tpkt = vpkt->vpkt_tgt_pkt;
3594 	ASSERT(tpkt != NULL);
3595 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3596 	ASSERT(svp != NULL);
3597 	vlun = svp->svp_svl;
3598 	ASSERT(vlun != NULL);
3599 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3600 
3601 	vhci = ADDR2VHCI(&tpkt->pkt_address);
3602 
3603 	if (fostat == SCSI_SENSE_INACTIVE) {
3604 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover "
3605 		    "detected for %s; updating path states...\n",
3606 		    vlun->svl_lun_wwn));
3607 		/*
3608 		 * set the vlun flag to indicate to the task that the target
3609 		 * port group needs updating
3610 		 */
3611 		vlun->svl_flags |= VLUN_UPDATE_TPG;
3612 		(void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3613 		    vhci_update_pathstates, (void *)vlun, KM_SLEEP);
3614 	} else {
3615 		path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3616 		vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip),
3617 		    "!%s (%s%d): Waiting for externally initiated failover "
3618 		    "to complete", ddi_pathname(vlun->svl_dip, path),
3619 		    ddi_driver_name(vlun->svl_dip),
3620 		    ddi_get_instance(vlun->svl_dip));
3621 		kmem_free(path, MAXPATHLEN);
3622 		swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP);
3623 		if (swarg == NULL) {
3624 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: "
3625 			    "request packet allocation for %s failed....\n",
3626 			    vlun->svl_lun_wwn));
3627 			VHCI_RELEASE_LUN(vlun);
3628 			return (PKT_RETURN);
3629 		}
3630 		swarg->svs_svp = svp;
3631 		swarg->svs_tos = gethrtime();
3632 		swarg->svs_pi = vpkt->vpkt_path;
3633 		swarg->svs_release_lun = 0;
3634 		swarg->svs_done = 0;
3635 		/*
3636 		 * place a hold on the path...we don't want it to
3637 		 * vanish while scsi_watch is in progress
3638 		 */
3639 		mdi_hold_path(vpkt->vpkt_path);
3640 		svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd,
3641 		    VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb,
3642 		    (caddr_t)swarg);
3643 	}
3644 	return (BUSY_RETURN);
3645 }
3646 
3647 /*
3648  * vhci_efo_watch_cb:
3649  *	Callback from scsi_watch request to check the failover status.
3650  *	Completion is either due to successful failover or timeout.
3651  *	Upon successful completion, vhci_update_path_states is called.
3652  *	For timeout condition, vhci_efo_done is called.
3653  *	Always returns 0 to scsi_watch to keep retrying till vhci_efo_done
3654  *	terminates this request properly in a separate thread.
3655  */
3656 
3657 static int
3658 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
3659 {
3660 	struct scsi_status		*statusp = resultp->statusp;
3661 	uint8_t				*sensep = (uint8_t *)resultp->sensep;
3662 	struct scsi_pkt			*pkt = resultp->pkt;
3663 	scsi_vhci_swarg_t		*swarg;
3664 	scsi_vhci_priv_t		*svp;
3665 	scsi_vhci_lun_t			*vlun;
3666 	struct scsi_vhci		*vhci;
3667 	dev_info_t			*vdip;
3668 	int				rval, updt_paths;
3669 
3670 	swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg;
3671 	svp = swarg->svs_svp;
3672 	if (swarg->svs_done) {
3673 		/*
3674 		 * Already completed failover or timedout.
3675 		 * Waiting for vhci_efo_done to terminate this scsi_watch.
3676 		 */
3677 		return (0);
3678 	}
3679 
3680 	ASSERT(svp != NULL);
3681 	vlun = svp->svp_svl;
3682 	ASSERT(vlun != NULL);
3683 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3684 	vlun->svl_efo_update_path = 0;
3685 	vdip = ddi_get_parent(vlun->svl_dip);
3686 	vhci = ddi_get_soft_state(vhci_softstate,
3687 	    ddi_get_instance(vdip));
3688 
3689 	updt_paths = 0;
3690 
3691 	if (pkt->pkt_reason != CMD_CMPLT) {
3692 		if ((gethrtime() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3693 			swarg->svs_release_lun = 1;
3694 			goto done;
3695 		}
3696 		return (0);
3697 	}
3698 	if (*((unsigned char *)statusp) == STATUS_CHECK) {
3699 		rval = vlun->svl_fops->sfo_analyze_sense(svp->svp_psd, sensep,
3700 		    vlun->svl_fops_ctpriv);
3701 		switch (rval) {
3702 			/*
3703 			 * Only update path states in case path is definitely
3704 			 * inactive, or no failover occurred.  For all other
3705 			 * check conditions continue pinging.  A unexpected
3706 			 * check condition shouldn't cause pinging to complete
3707 			 * prematurely.
3708 			 */
3709 			case SCSI_SENSE_INACTIVE:
3710 			case SCSI_SENSE_NOFAILOVER:
3711 				updt_paths = 1;
3712 				break;
3713 			default:
3714 				if ((gethrtime() - swarg->svs_tos)
3715 				    >= VHCI_EXTFO_TIMEOUT) {
3716 					swarg->svs_release_lun = 1;
3717 					goto done;
3718 				}
3719 				return (0);
3720 		}
3721 	} else if (*((unsigned char *)statusp) ==
3722 	    STATUS_RESERVATION_CONFLICT) {
3723 		updt_paths = 1;
3724 	} else if ((*((unsigned char *)statusp)) &
3725 	    (STATUS_BUSY | STATUS_QFULL)) {
3726 		return (0);
3727 	}
3728 	if ((*((unsigned char *)statusp) == STATUS_GOOD) ||
3729 	    (updt_paths == 1)) {
3730 		/*
3731 		 * we got here because we had detected an
3732 		 * externally initiated failover; things
3733 		 * have settled down now, so let's
3734 		 * start up a task to update the
3735 		 * path states and target port group
3736 		 */
3737 		vlun->svl_efo_update_path = 1;
3738 		swarg->svs_done = 1;
3739 		vlun->svl_swarg = swarg;
3740 		vlun->svl_flags |= VLUN_UPDATE_TPG;
3741 		(void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3742 		    vhci_update_pathstates, (void *)vlun,
3743 		    KM_SLEEP);
3744 		return (0);
3745 	}
3746 	if ((gethrtime() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3747 		swarg->svs_release_lun = 1;
3748 		goto done;
3749 	}
3750 	return (0);
3751 done:
3752 	swarg->svs_done = 1;
3753 	(void) taskq_dispatch(vhci->vhci_taskq,
3754 	    vhci_efo_done, (void *)swarg, KM_SLEEP);
3755 	return (0);
3756 }
3757 
3758 /*
3759  * vhci_efo_done:
3760  *	cleanly terminates scsi_watch and free up resources.
3761  *	Called as taskq function in vhci_efo_watch_cb for EFO timeout condition
3762  *	or by vhci_update_path_states invoked during external initiated
3763  *	failover completion.
3764  */
3765 static void
3766 vhci_efo_done(void *arg)
3767 {
3768 	scsi_vhci_lun_t			*vlun;
3769 	scsi_vhci_swarg_t		*swarg = (scsi_vhci_swarg_t *)arg;
3770 	scsi_vhci_priv_t		*svp = swarg->svs_svp;
3771 	ASSERT(svp);
3772 
3773 	vlun = svp->svp_svl;
3774 	ASSERT(vlun);
3775 
3776 	/* Wait for clean termination of scsi_watch */
3777 	(void) scsi_watch_request_terminate(svp->svp_sw_token,
3778 	    SCSI_WATCH_TERMINATE_ALL_WAIT);
3779 	svp->svp_sw_token = NULL;
3780 
3781 	/* release path and freeup resources to indicate failover completion */
3782 	mdi_rele_path(swarg->svs_pi);
3783 	if (swarg->svs_release_lun) {
3784 		VHCI_RELEASE_LUN(vlun);
3785 	}
3786 	kmem_free((void *)swarg, sizeof (*swarg));
3787 }
3788 
3789 /*
3790  * Update the path states
3791  * vlun should be HELD when this is invoked.
3792  * Calls vhci_efo_done to cleanup resources allocated for EFO.
3793  */
3794 void
3795 vhci_update_pathstates(void *arg)
3796 {
3797 	mdi_pathinfo_t			*pip, *npip;
3798 	dev_info_t			*dip;
3799 	struct scsi_failover_ops	*fo;
3800 	struct scsi_vhci_priv		*svp;
3801 	struct scsi_device		*psd;
3802 	struct scsi_path_opinfo		opinfo;
3803 	char				*pclass, *tptr;
3804 	struct scsi_vhci_lun		*vlun = (struct scsi_vhci_lun *)arg;
3805 	int				sps; /* mdi_select_path() status */
3806 	char				*cpath;
3807 	struct scsi_vhci		*vhci;
3808 	struct scsi_pkt			*pkt;
3809 	struct buf			*bp;
3810 	struct scsi_vhci_priv		*svp_conflict = NULL;
3811 
3812 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3813 	dip  = vlun->svl_dip;
3814 	pip = npip = NULL;
3815 
3816 	vhci = ddi_get_soft_state(vhci_softstate,
3817 	    ddi_get_instance(ddi_get_parent(dip)));
3818 
3819 	sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH |
3820 	    MDI_SELECT_STANDBY_PATH | MDI_SELECT_NO_PREFERRED), NULL, &npip);
3821 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
3822 		goto done;
3823 	}
3824 
3825 	fo = vlun->svl_fops;
3826 	do {
3827 		pip = npip;
3828 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
3829 		psd = svp->svp_psd;
3830 		if (fo->sfo_path_get_opinfo(psd, &opinfo,
3831 		    vlun->svl_fops_ctpriv) != 0) {
3832 			sps = mdi_select_path(dip, NULL,
3833 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3834 			    MDI_SELECT_NO_PREFERRED), pip, &npip);
3835 			mdi_rele_path(pip);
3836 			continue;
3837 		}
3838 
3839 		if (mdi_prop_lookup_string(pip, "path-class", &pclass) !=
3840 		    MDI_SUCCESS) {
3841 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3842 			    "!vhci_update_pathstates: prop lookup failed for "
3843 			    "path 0x%p\n", (void *)pip));
3844 			sps = mdi_select_path(dip, NULL,
3845 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3846 			    MDI_SELECT_NO_PREFERRED), pip, &npip);
3847 			mdi_rele_path(pip);
3848 			continue;
3849 		}
3850 
3851 		/*
3852 		 * Need to update the "path-class" property
3853 		 * value in the device tree if different
3854 		 * from the existing value.
3855 		 */
3856 		if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) {
3857 			(void) mdi_prop_update_string(pip, "path-class",
3858 			    opinfo.opinfo_path_attr);
3859 		}
3860 
3861 		/*
3862 		 * Only change the state if needed. i.e. Don't call
3863 		 * mdi_pi_set_state to ONLINE a path if its already
3864 		 * ONLINE. Same for STANDBY paths.
3865 		 */
3866 
3867 		if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE ||
3868 		    opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) {
3869 			if (!(MDI_PI_IS_ONLINE(pip))) {
3870 				VHCI_DEBUG(1, (CE_NOTE, NULL,
3871 				    "!vhci_update_pathstates: marking path"
3872 				    " 0x%p as ONLINE\n", (void *)pip));
3873 				cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3874 				vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
3875 				    "(%s%d): path %s "
3876 				    "is now ONLINE because of "
3877 				    "an externally initiated failover",
3878 				    ddi_pathname(dip, cpath),
3879 				    ddi_driver_name(dip),
3880 				    ddi_get_instance(dip),
3881 				    mdi_pi_spathname(pip));
3882 				kmem_free(cpath, MAXPATHLEN);
3883 				mdi_pi_set_state(pip,
3884 				    MDI_PATHINFO_STATE_ONLINE);
3885 				mdi_pi_set_preferred(pip,
3886 				    opinfo.opinfo_preferred);
3887 				tptr = kmem_alloc(strlen
3888 				    (opinfo.opinfo_path_attr) + 1, KM_SLEEP);
3889 				(void) strlcpy(tptr, opinfo.opinfo_path_attr,
3890 				    (strlen(opinfo.opinfo_path_attr) + 1));
3891 				mutex_enter(&vlun->svl_mutex);
3892 				if (vlun->svl_active_pclass != NULL) {
3893 					kmem_free(vlun->svl_active_pclass,
3894 					    strlen(vlun->svl_active_pclass) +
3895 					    1);
3896 				}
3897 				vlun->svl_active_pclass = tptr;
3898 				if (vlun->svl_waiting_for_activepath) {
3899 					vlun->svl_waiting_for_activepath = 0;
3900 				}
3901 				mutex_exit(&vlun->svl_mutex);
3902 			} else if (MDI_PI_IS_ONLINE(pip)) {
3903 				if (strcmp(pclass, opinfo.opinfo_path_attr)
3904 				    != 0) {
3905 					mdi_pi_set_preferred(pip,
3906 					    opinfo.opinfo_preferred);
3907 					mutex_enter(&vlun->svl_mutex);
3908 					if (vlun->svl_active_pclass == NULL ||
3909 					    strcmp(opinfo.opinfo_path_attr,
3910 					    vlun->svl_active_pclass) != 0) {
3911 						mutex_exit(&vlun->svl_mutex);
3912 						tptr = kmem_alloc(strlen
3913 						    (opinfo.opinfo_path_attr) +
3914 						    1, KM_SLEEP);
3915 						(void) strlcpy(tptr,
3916 						    opinfo.opinfo_path_attr,
3917 						    (strlen
3918 						    (opinfo.opinfo_path_attr)
3919 						    + 1));
3920 						mutex_enter(&vlun->svl_mutex);
3921 					} else {
3922 						/*
3923 						 * No need to update
3924 						 * svl_active_pclass
3925 						 */
3926 						tptr = NULL;
3927 						mutex_exit(&vlun->svl_mutex);
3928 					}
3929 					if (tptr) {
3930 						if (vlun->svl_active_pclass
3931 						    != NULL) {
3932 							kmem_free(vlun->
3933 							    svl_active_pclass,
3934 							    strlen(vlun->
3935 							    svl_active_pclass)
3936 							    + 1);
3937 						}
3938 						vlun->svl_active_pclass = tptr;
3939 						mutex_exit(&vlun->svl_mutex);
3940 					}
3941 				}
3942 			}
3943 
3944 			/* Check for Reservation Conflict */
3945 			bp = scsi_alloc_consistent_buf(
3946 			    &svp->svp_psd->sd_address, (struct buf *)NULL,
3947 			    DEV_BSIZE, B_READ, NULL, NULL);
3948 			if (!bp) {
3949 				VHCI_DEBUG(1, (CE_NOTE, NULL,
3950 				    "!vhci_update_pathstates: No resources "
3951 				    "(buf)\n"));
3952 				mdi_rele_path(pip);
3953 				goto done;
3954 			}
3955 			pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
3956 			    CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
3957 			    PKT_CONSISTENT, NULL, NULL);
3958 			if (pkt) {
3959 				(void) scsi_setup_cdb((union scsi_cdb *)
3960 				    (uintptr_t)pkt->pkt_cdbp, SCMD_READ, 1, 1,
3961 				    0);
3962 				pkt->pkt_time = 3 * 30;
3963 				pkt->pkt_flags = FLAG_NOINTR;
3964 				pkt->pkt_path_instance =
3965 				    mdi_pi_get_path_instance(pip);
3966 
3967 				if ((scsi_transport(pkt) == TRAN_ACCEPT) &&
3968 				    (pkt->pkt_reason == CMD_CMPLT) &&
3969 				    (SCBP_C(pkt) ==
3970 				    STATUS_RESERVATION_CONFLICT)) {
3971 					VHCI_DEBUG(1, (CE_NOTE, NULL,
3972 					    "!vhci_update_pathstates: reserv. "
3973 					    "conflict to be resolved on 0x%p\n",
3974 					    (void *)pip));
3975 					svp_conflict = svp;
3976 				}
3977 				scsi_destroy_pkt(pkt);
3978 			}
3979 			scsi_free_consistent_buf(bp);
3980 		} else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) &&
3981 		    !(MDI_PI_IS_STANDBY(pip))) {
3982 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3983 			    "!vhci_update_pathstates: marking path"
3984 			    " 0x%p as STANDBY\n", (void *)pip));
3985 			cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3986 			vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
3987 			    "(%s%d): path %s "
3988 			    "is now STANDBY because of "
3989 			    "an externally initiated failover",
3990 			    ddi_pathname(dip, cpath),
3991 			    ddi_driver_name(dip),
3992 			    ddi_get_instance(dip),
3993 			    mdi_pi_spathname(pip));
3994 			kmem_free(cpath, MAXPATHLEN);
3995 			mdi_pi_set_state(pip,
3996 			    MDI_PATHINFO_STATE_STANDBY);
3997 			mdi_pi_set_preferred(pip,
3998 			    opinfo.opinfo_preferred);
3999 			mutex_enter(&vlun->svl_mutex);
4000 			if (vlun->svl_active_pclass != NULL) {
4001 				if (strcmp(vlun->svl_active_pclass,
4002 				    opinfo.opinfo_path_attr) == 0) {
4003 					kmem_free(vlun->
4004 					    svl_active_pclass,
4005 					    strlen(vlun->
4006 					    svl_active_pclass) + 1);
4007 					vlun->svl_active_pclass = NULL;
4008 				}
4009 			}
4010 			mutex_exit(&vlun->svl_mutex);
4011 		}
4012 		(void) mdi_prop_free(pclass);
4013 		sps = mdi_select_path(dip, NULL,
4014 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
4015 		    MDI_SELECT_NO_PREFERRED), pip, &npip);
4016 		mdi_rele_path(pip);
4017 
4018 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
4019 
4020 	/*
4021 	 * Check to see if this vlun has an active SCSI-II RESERVE.  If so
4022 	 * clear the reservation by sending a reset, so the host doesn't
4023 	 * receive a reservation conflict.  The reset has to be sent via a
4024 	 * working path.  Let's use a path referred to by svp_conflict as it
4025 	 * should be working.
4026 	 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun.  Also notify ssd
4027 	 * of the reset, explicitly.
4028 	 */
4029 	if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4030 		if (svp_conflict && (vlun->svl_xlf_capable == 0)) {
4031 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathstates:"
4032 			    " sending recovery reset on 0x%p, path_state: %x",
4033 			    svp_conflict->svp_psd->sd_private,
4034 			    mdi_pi_get_state((mdi_pathinfo_t *)
4035 			    svp_conflict->svp_psd->sd_private)));
4036 
4037 			(void) vhci_recovery_reset(vlun,
4038 			    &svp_conflict->svp_psd->sd_address, FALSE,
4039 			    VHCI_DEPTH_TARGET);
4040 		}
4041 		vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
4042 		mutex_enter(&vhci->vhci_mutex);
4043 		scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
4044 		    &vhci->vhci_reset_notify_listf);
4045 		mutex_exit(&vhci->vhci_mutex);
4046 	}
4047 	if (vlun->svl_flags & VLUN_UPDATE_TPG) {
4048 		/*
4049 		 * Update the AccessState of related MP-API TPGs
4050 		 */
4051 		(void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
4052 		vlun->svl_flags &= ~VLUN_UPDATE_TPG;
4053 	}
4054 done:
4055 	if (vlun->svl_efo_update_path) {
4056 		vlun->svl_efo_update_path = 0;
4057 		vhci_efo_done(vlun->svl_swarg);
4058 		vlun->svl_swarg = 0;
4059 	}
4060 	VHCI_RELEASE_LUN(vlun);
4061 }
4062 
4063 /* ARGSUSED */
4064 static int
4065 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4066 {
4067 	scsi_hba_tran_t		*hba = NULL;
4068 	struct scsi_device	*psd = NULL;
4069 	scsi_vhci_lun_t		*vlun = NULL;
4070 	dev_info_t		*pdip = NULL;
4071 	dev_info_t		*tgt_dip;
4072 	struct scsi_vhci	*vhci;
4073 	char			*guid;
4074 	scsi_vhci_priv_t	*svp = NULL;
4075 	int			rval = MDI_FAILURE;
4076 	int			vlun_alloced = 0;
4077 
4078 	ASSERT(vdip != NULL);
4079 	ASSERT(pip != NULL);
4080 
4081 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4082 	ASSERT(vhci != NULL);
4083 
4084 	pdip = mdi_pi_get_phci(pip);
4085 	ASSERT(pdip != NULL);
4086 
4087 	hba = ddi_get_driver_private(pdip);
4088 	ASSERT(hba != NULL);
4089 
4090 	tgt_dip = mdi_pi_get_client(pip);
4091 	ASSERT(tgt_dip != NULL);
4092 
4093 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
4094 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
4095 		VHCI_DEBUG(1, (CE_WARN, NULL,
4096 		    "vhci_pathinfo_init: lun guid property failed"));
4097 		goto failure;
4098 	}
4099 
4100 	vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced);
4101 	ddi_prop_free(guid);
4102 
4103 	vlun->svl_dip = tgt_dip;
4104 
4105 	svp = kmem_zalloc(sizeof (*svp), KM_SLEEP);
4106 	svp->svp_svl = vlun;
4107 
4108 	/*
4109 	 * Initialize svl_lb_policy_save only for newly allocated vlun. Writing
4110 	 * to svl_lb_policy_save later could accidentally overwrite saved lb
4111 	 * policy.
4112 	 */
4113 	if (vlun_alloced) {
4114 		vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip);
4115 	}
4116 
4117 	mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL);
4118 	cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL);
4119 
4120 	psd = kmem_zalloc(sizeof (*psd), KM_SLEEP);
4121 	mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL);
4122 
4123 	if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
4124 		/*
4125 		 * For a SCSI_HBA_ADDR_COMPLEX transport we store a pointer to
4126 		 * scsi_device in the scsi_address structure.  This allows an
4127 		 * an HBA driver to find its scsi_device(9S) and
4128 		 * per-scsi_device(9S) HBA private data given a
4129 		 * scsi_address(9S) by using scsi_address_device(9F) and
4130 		 * scsi_device_hba_private_get(9F)).
4131 		 */
4132 		psd->sd_address.a.a_sd = psd;
4133 	} else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4134 		/*
4135 		 * Clone transport structure if requested, so
4136 		 * Self enumerating HBAs always need to use cloning
4137 		 */
4138 		scsi_hba_tran_t	*clone =
4139 		    kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP);
4140 		bcopy(hba, clone, sizeof (scsi_hba_tran_t));
4141 		hba = clone;
4142 		hba->tran_sd = psd;
4143 	} else {
4144 		/*
4145 		 * SPI pHCI unit-address. If we ever need to support this
4146 		 * we could set a.spi.a_target/a.spi.a_lun based on pathinfo
4147 		 * node unit-address properties.  For now we fail...
4148 		 */
4149 		goto failure;
4150 	}
4151 
4152 	psd->sd_dev = tgt_dip;
4153 	psd->sd_address.a_hba_tran = hba;
4154 
4155 	/*
4156 	 * Mark scsi_device as being associated with a pathinfo node. For
4157 	 * a scsi_device structure associated with a devinfo node,
4158 	 * scsi_ctlops_initchild sets this field to NULL.
4159 	 */
4160 	psd->sd_pathinfo = pip;
4161 
4162 	/*
4163 	 * LEGACY: sd_private: set for older mpxio-capable pHCI drivers with
4164 	 * too much scsi_vhci/mdi/ndi knowledge. Remove this code when all
4165 	 * mpxio-capable pHCI drivers use SCSA enumeration services (or at
4166 	 * least have been changed to use sd_pathinfo instead).
4167 	 */
4168 	psd->sd_private = (caddr_t)pip;
4169 
4170 	/* See scsi_hba.c for info on sd_tran_safe kludge */
4171 	psd->sd_tran_safe = hba;
4172 
4173 	svp->svp_psd = psd;
4174 	mdi_pi_set_vhci_private(pip, (caddr_t)svp);
4175 
4176 	/*
4177 	 * call hba's target init entry point if it exists
4178 	 */
4179 	if (hba->tran_tgt_init != NULL) {
4180 		psd->sd_tran_tgt_free_done = 0;
4181 		if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip,
4182 		    hba, psd)) != DDI_SUCCESS) {
4183 			VHCI_DEBUG(1, (CE_WARN, pdip,
4184 			    "!vhci_pathinfo_init: tran_tgt_init failed for "
4185 			    "path=0x%p rval=%x", (void *)pip, rval));
4186 			goto failure;
4187 		}
4188 	}
4189 
4190 	svp->svp_new_path = 1;
4191 
4192 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n",
4193 	    (void *)pip));
4194 	return (MDI_SUCCESS);
4195 
4196 failure:
4197 	if (psd) {
4198 		mutex_destroy(&psd->sd_mutex);
4199 		kmem_free(psd, sizeof (*psd));
4200 	}
4201 	if (svp) {
4202 		mdi_pi_set_vhci_private(pip, NULL);
4203 		mutex_destroy(&svp->svp_mutex);
4204 		cv_destroy(&svp->svp_cv);
4205 		kmem_free(svp, sizeof (*svp));
4206 	}
4207 	if (hba && (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE))
4208 		kmem_free(hba, sizeof (scsi_hba_tran_t));
4209 
4210 	if (vlun_alloced)
4211 		vhci_lun_free(vlun, NULL);
4212 
4213 	return (rval);
4214 }
4215 
4216 /* ARGSUSED */
4217 static int
4218 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4219 {
4220 	scsi_hba_tran_t		*hba = NULL;
4221 	struct scsi_device	*psd = NULL;
4222 	dev_info_t		*pdip = NULL;
4223 	dev_info_t		*cdip = NULL;
4224 	scsi_vhci_priv_t	*svp = NULL;
4225 
4226 	ASSERT(vdip != NULL);
4227 	ASSERT(pip != NULL);
4228 
4229 	pdip = mdi_pi_get_phci(pip);
4230 	ASSERT(pdip != NULL);
4231 
4232 	cdip = mdi_pi_get_client(pip);
4233 	ASSERT(cdip != NULL);
4234 
4235 	hba = ddi_get_driver_private(pdip);
4236 	ASSERT(hba != NULL);
4237 
4238 	vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_UNINIT);
4239 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4240 	if (svp == NULL) {
4241 		/* path already freed. Nothing to do. */
4242 		return (MDI_SUCCESS);
4243 	}
4244 
4245 	psd = svp->svp_psd;
4246 	ASSERT(psd != NULL);
4247 
4248 	if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
4249 		/* Verify plumbing */
4250 		ASSERT(psd->sd_address.a_hba_tran == hba);
4251 		ASSERT(psd->sd_address.a.a_sd == psd);
4252 	} else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4253 		/* Switch to cloned scsi_hba_tran(9S) structure */
4254 		hba = psd->sd_address.a_hba_tran;
4255 		ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE);
4256 		ASSERT(hba->tran_sd == psd);
4257 	}
4258 
4259 	if ((hba->tran_tgt_free != NULL) && !psd->sd_tran_tgt_free_done) {
4260 		(*hba->tran_tgt_free) (pdip, cdip, hba, psd);
4261 		psd->sd_tran_tgt_free_done = 1;
4262 	}
4263 	mutex_destroy(&psd->sd_mutex);
4264 	if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4265 		kmem_free(hba, sizeof (*hba));
4266 	}
4267 
4268 	mdi_pi_set_vhci_private(pip, NULL);
4269 
4270 	/*
4271 	 * Free the pathinfo related scsi_device inquiry data. Note that this
4272 	 * matches what happens for scsi_hba.c devinfo case at uninitchild time.
4273 	 */
4274 	if (psd->sd_inq)
4275 		kmem_free((caddr_t)psd->sd_inq, sizeof (struct scsi_inquiry));
4276 	kmem_free((caddr_t)psd, sizeof (*psd));
4277 
4278 	mutex_destroy(&svp->svp_mutex);
4279 	cv_destroy(&svp->svp_cv);
4280 	kmem_free((caddr_t)svp, sizeof (*svp));
4281 
4282 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n",
4283 	    (void *)pip));
4284 	return (MDI_SUCCESS);
4285 }
4286 
4287 /* ARGSUSED */
4288 static int
4289 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip,
4290     mdi_pathinfo_state_t state, uint32_t ext_state, int flags)
4291 {
4292 	int			rval = MDI_SUCCESS;
4293 	scsi_vhci_priv_t	*svp;
4294 	scsi_vhci_lun_t		*vlun;
4295 	int			held;
4296 	int			op = (flags & 0xf00) >> 8;
4297 	struct scsi_vhci	*vhci;
4298 
4299 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4300 
4301 	if (flags & MDI_EXT_STATE_CHANGE) {
4302 		/*
4303 		 * We do not want to issue any commands down the path in case
4304 		 * sync flag is set. Lower layers might not be ready to accept
4305 		 * any I/O commands.
4306 		 */
4307 		if (op == DRIVER_DISABLE)
4308 			return (MDI_SUCCESS);
4309 
4310 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4311 		if (svp == NULL) {
4312 			return (MDI_FAILURE);
4313 		}
4314 		vlun = svp->svp_svl;
4315 
4316 		if (flags & MDI_BEFORE_STATE_CHANGE) {
4317 			/*
4318 			 * Hold the LUN.
4319 			 */
4320 			VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
4321 			if (flags & MDI_DISABLE_OP)  {
4322 				/*
4323 				 * Issue scsi reset if it happens to be
4324 				 * reserved path.
4325 				 */
4326 				if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4327 					/*
4328 					 * if reservation pending on
4329 					 * this path, dont' mark the
4330 					 * path busy
4331 					 */
4332 					if (op == DRIVER_DISABLE_TRANSIENT) {
4333 						VHCI_DEBUG(1, (CE_NOTE, NULL,
4334 						    "!vhci_pathinfo"
4335 						    "_state_change (pip:%p): "
4336 						    " reservation: fail busy\n",
4337 						    (void *)pip));
4338 						return (MDI_FAILURE);
4339 					}
4340 					if (pip == vlun->svl_resrv_pip) {
4341 						if (vhci_recovery_reset(
4342 						    svp->svp_svl,
4343 						    &svp->svp_psd->sd_address,
4344 						    TRUE,
4345 						    VHCI_DEPTH_TARGET) == 0) {
4346 							VHCI_DEBUG(1,
4347 							    (CE_NOTE, NULL,
4348 							    "!vhci_pathinfo"
4349 							    "_state_change "
4350 							    " (pip:%p): "
4351 							    "reset failed, "
4352 							    "give up!\n",
4353 							    (void *)pip));
4354 						}
4355 						vlun->svl_flags &=
4356 						    ~VLUN_RESERVE_ACTIVE_FLG;
4357 					}
4358 				}
4359 			} else if (flags & MDI_ENABLE_OP)  {
4360 				if (((vhci->vhci_conf_flags &
4361 				    VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4362 				    VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4363 				    MDI_PI_IS_USER_DISABLE(pip) &&
4364 				    MDI_PI_IS_STANDBY(pip)) {
4365 					struct scsi_failover_ops	*fo;
4366 					char *best_pclass, *pclass = NULL;
4367 					int  best_class, rv;
4368 					/*
4369 					 * Failback if enabling a standby path
4370 					 * and it is the primary class or
4371 					 * preferred class
4372 					 */
4373 					best_class = mdi_pi_get_preferred(pip);
4374 					if (best_class == 0) {
4375 						/*
4376 						 * if not preferred - compare
4377 						 * path-class with class
4378 						 */
4379 						fo = vlun->svl_fops;
4380 						(void) fo->sfo_pathclass_next(
4381 						    NULL, &best_pclass,
4382 						    vlun->svl_fops_ctpriv);
4383 						pclass = NULL;
4384 						rv = mdi_prop_lookup_string(pip,
4385 						    "path-class", &pclass);
4386 						if (rv != MDI_SUCCESS ||
4387 						    pclass == NULL) {
4388 							vhci_log(CE_NOTE, vdip,
4389 							    "!path-class "
4390 							    " lookup "
4391 							    "failed. rv: %d"
4392 							    "class: %p", rv,
4393 							    (void *)pclass);
4394 						} else if (strncmp(pclass,
4395 						    best_pclass,
4396 						    strlen(best_pclass)) == 0) {
4397 							best_class = 1;
4398 						}
4399 						if (rv == MDI_SUCCESS &&
4400 						    pclass != NULL) {
4401 							rv = mdi_prop_free(
4402 							    pclass);
4403 							if (rv !=
4404 							    DDI_PROP_SUCCESS) {
4405 								vhci_log(
4406 								    CE_NOTE,
4407 								    vdip,
4408 								    "!path-"
4409 								    "class"
4410 								    " free"
4411 								    " failed"
4412 								    " rv: %d"
4413 								    " class: "
4414 								    "%p",
4415 								    rv,
4416 								    (void *)
4417 								    pclass);
4418 							}
4419 						}
4420 					}
4421 					if (best_class == 1) {
4422 						VHCI_DEBUG(1, (CE_NOTE, NULL,
4423 						    "preferred path: %p "
4424 						    "USER_DISABLE->USER_ENABLE "
4425 						    "transition for lun %s\n",
4426 						    (void *)pip,
4427 						    vlun->svl_lun_wwn));
4428 						(void) taskq_dispatch(
4429 						    vhci->vhci_taskq,
4430 						    vhci_initiate_auto_failback,
4431 						    (void *) vlun, KM_SLEEP);
4432 					}
4433 				}
4434 				/*
4435 				 * if PGR is active, revalidate key and
4436 				 * register on this path also, if key is
4437 				 * still valid
4438 				 */
4439 				sema_p(&vlun->svl_pgr_sema);
4440 				if (vlun->svl_pgr_active)
4441 					(void)
4442 					    vhci_pgr_validate_and_register(svp);
4443 				sema_v(&vlun->svl_pgr_sema);
4444 				/*
4445 				 * Inform target driver about any
4446 				 * reservations to be reinstated if target
4447 				 * has dropped reservation during the busy
4448 				 * period.
4449 				 */
4450 				mutex_enter(&vhci->vhci_mutex);
4451 				scsi_hba_reset_notify_callback(
4452 				    &vhci->vhci_mutex,
4453 				    &vhci->vhci_reset_notify_listf);
4454 				mutex_exit(&vhci->vhci_mutex);
4455 			}
4456 		}
4457 		if (flags & MDI_AFTER_STATE_CHANGE) {
4458 			if (flags & MDI_ENABLE_OP)  {
4459 				mutex_enter(&vhci_global_mutex);
4460 				cv_broadcast(&vhci_cv);
4461 				mutex_exit(&vhci_global_mutex);
4462 			}
4463 			if (vlun->svl_setcap_done) {
4464 				(void) vhci_pHCI_cap(&svp->svp_psd->sd_address,
4465 				    "sector-size", vlun->svl_sector_size,
4466 				    1, pip);
4467 			}
4468 
4469 			/*
4470 			 * Release the LUN
4471 			 */
4472 			VHCI_RELEASE_LUN(vlun);
4473 
4474 			/*
4475 			 * Path transition is complete.
4476 			 * Run callback to indicate target driver to
4477 			 * retry to prevent IO starvation.
4478 			 */
4479 			if (scsi_callback_id != 0) {
4480 				ddi_run_callback(&scsi_callback_id);
4481 			}
4482 		}
4483 	} else {
4484 		switch (state) {
4485 		case MDI_PATHINFO_STATE_ONLINE:
4486 			rval = vhci_pathinfo_online(vdip, pip, flags);
4487 			break;
4488 
4489 		case MDI_PATHINFO_STATE_OFFLINE:
4490 			rval = vhci_pathinfo_offline(vdip, pip, flags);
4491 			break;
4492 
4493 		default:
4494 			break;
4495 		}
4496 		/*
4497 		 * Path transition is complete.
4498 		 * Run callback to indicate target driver to
4499 		 * retry to prevent IO starvation.
4500 		 */
4501 		if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) {
4502 			ddi_run_callback(&scsi_callback_id);
4503 		}
4504 		return (rval);
4505 	}
4506 
4507 	return (MDI_SUCCESS);
4508 }
4509 
4510 /*
4511  * Parse the mpxio load balancing options. The datanameptr
4512  * will point to a string containing the load-balance-options value.
4513  * The load-balance-options value will be a property that
4514  * defines the load-balance algorithm and any arguments to that
4515  * algorithm.
4516  * For example:
4517  * device-type-mpxio-options-list=
4518  * "device-type=SUN    SENA", "load-balance-options=logical-block-options"
4519  * "device-type=SUN     SE6920", "round-robin-options";
4520  * logical-block-options="load-balance=logical-block", "region-size=15";
4521  * round-robin-options="load-balance=round-robin";
4522  *
4523  * If the load-balance is not defined the load balance algorithm will
4524  * default to the global setting. There will be default values assigned
4525  * to the arguments (region-size=18) and if an argument is one
4526  * that is not known, it will be ignored.
4527  */
4528 static void
4529 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip,
4530     caddr_t datanameptr)
4531 {
4532 	char			*dataptr, *next_entry;
4533 	caddr_t			config_list	= NULL;
4534 	int			config_list_len = 0, list_len = 0;
4535 	int			region_size = -1;
4536 	client_lb_t		load_balance;
4537 
4538 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr,
4539 	    (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) {
4540 		return;
4541 	}
4542 
4543 	list_len = config_list_len;
4544 	next_entry = config_list;
4545 	while (config_list_len > 0) {
4546 		dataptr = next_entry;
4547 
4548 		if (strncmp(mdi_load_balance, dataptr,
4549 		    strlen(mdi_load_balance)) == 0) {
4550 			/* get the load-balance scheme */
4551 			dataptr += strlen(mdi_load_balance) + 1;
4552 			if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) {
4553 				(void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR);
4554 				load_balance = LOAD_BALANCE_RR;
4555 			} else if (strcmp(dataptr,
4556 			    LOAD_BALANCE_PROP_LBA) == 0) {
4557 				(void) mdi_set_lb_policy(cdip,
4558 				    LOAD_BALANCE_LBA);
4559 				load_balance = LOAD_BALANCE_LBA;
4560 			} else if (strcmp(dataptr,
4561 			    LOAD_BALANCE_PROP_NONE) == 0) {
4562 				(void) mdi_set_lb_policy(cdip,
4563 				    LOAD_BALANCE_NONE);
4564 				load_balance = LOAD_BALANCE_NONE;
4565 			}
4566 		} else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE,
4567 		    strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) {
4568 			int	i = 0;
4569 			char	*ptr;
4570 			char	*tmp;
4571 
4572 			tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1);
4573 			/* check for numeric value */
4574 			for (ptr = tmp; i < strlen(tmp); i++, ptr++) {
4575 				if (!isdigit(*ptr)) {
4576 					cmn_err(CE_WARN,
4577 					    "Illegal region size: %s."
4578 					    " Setting to default value: %d",
4579 					    tmp,
4580 					    LOAD_BALANCE_DEFAULT_REGION_SIZE);
4581 					region_size =
4582 					    LOAD_BALANCE_DEFAULT_REGION_SIZE;
4583 					break;
4584 				}
4585 			}
4586 			if (i >= strlen(tmp)) {
4587 				region_size = stoi(&tmp);
4588 			}
4589 			(void) mdi_set_lb_region_size(cdip, region_size);
4590 		}
4591 		config_list_len -= (strlen(next_entry) + 1);
4592 		next_entry += strlen(next_entry) + 1;
4593 	}
4594 #ifdef DEBUG
4595 	if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) {
4596 		VHCI_DEBUG(1, (CE_NOTE, dip,
4597 		    "!vhci_parse_mpxio_lb_options: region-size: %d"
4598 		    "only valid for load-balance=logical-block\n",
4599 		    region_size));
4600 	}
4601 #endif
4602 	if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) {
4603 		VHCI_DEBUG(1, (CE_NOTE, dip,
4604 		    "!vhci_parse_mpxio_lb_options: No region-size"
4605 		    " defined load-balance=logical-block."
4606 		    " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE));
4607 		(void) mdi_set_lb_region_size(cdip,
4608 		    LOAD_BALANCE_DEFAULT_REGION_SIZE);
4609 	}
4610 	if (list_len > 0) {
4611 		kmem_free(config_list, list_len);
4612 	}
4613 }
4614 
4615 /*
4616  * Parse the device-type-mpxio-options-list looking for the key of
4617  * "load-balance-options". If found, parse the load balancing options.
4618  * Check the comment of the vhci_get_device_type_mpxio_options()
4619  * for the device-type-mpxio-options-list.
4620  */
4621 static void
4622 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4623     caddr_t datanameptr, int list_len)
4624 {
4625 	char		*dataptr;
4626 	int		len;
4627 
4628 	/*
4629 	 * get the data list
4630 	 */
4631 	dataptr = datanameptr;
4632 	len = 0;
4633 	while (len < list_len &&
4634 	    strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR))
4635 	    != 0) {
4636 		if (strncmp(dataptr, LOAD_BALANCE_OPTIONS,
4637 		    strlen(LOAD_BALANCE_OPTIONS)) == 0) {
4638 			len += strlen(LOAD_BALANCE_OPTIONS) + 1;
4639 			dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1;
4640 			vhci_parse_mpxio_lb_options(dip, cdip, dataptr);
4641 		}
4642 		len += strlen(dataptr) + 1;
4643 		dataptr += strlen(dataptr) + 1;
4644 	}
4645 }
4646 
4647 /*
4648  * Check the inquriy string returned from the device with the device-type
4649  * Check for the existence of the device-type-mpxio-options-list and
4650  * if found parse the list checking for a match with the device-type
4651  * value and the inquiry string returned from the device. If a match
4652  * is found, parse the mpxio options list. The format of the
4653  * device-type-mpxio-options-list is:
4654  * device-type-mpxio-options-list=
4655  * "device-type=SUN    SENA", "load-balance-options=logical-block-options"
4656  * "device-type=SUN     SE6920", "round-robin-options";
4657  * logical-block-options="load-balance=logical-block", "region-size=15";
4658  * round-robin-options="load-balance=round-robin";
4659  */
4660 void
4661 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4662     struct scsi_device *devp)
4663 {
4664 
4665 	caddr_t			config_list	= NULL;
4666 	caddr_t			vidptr, datanameptr;
4667 	int			vidlen, dupletlen = 0;
4668 	int			config_list_len = 0, len;
4669 	struct scsi_inquiry	*inq = devp->sd_inq;
4670 
4671 	/*
4672 	 * look up the device-type-mpxio-options-list and walk thru
4673 	 * the list compare the vendor ids of the earlier inquiry command and
4674 	 * with those vids in the list if there is a match, lookup
4675 	 * the mpxio-options value
4676 	 */
4677 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
4678 	    MPXIO_OPTIONS_LIST,
4679 	    (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) {
4680 
4681 		/*
4682 		 * Compare vids in each duplet - if it matches,
4683 		 * parse the mpxio options list.
4684 		 */
4685 		for (len = config_list_len, vidptr = config_list; len > 0;
4686 		    len -= dupletlen) {
4687 
4688 			dupletlen = 0;
4689 
4690 			if (strlen(vidptr) != 0 &&
4691 			    strncmp(vidptr, DEVICE_TYPE_STR,
4692 			    strlen(DEVICE_TYPE_STR)) == 0) {
4693 				/* point to next duplet */
4694 				datanameptr = vidptr + strlen(vidptr) + 1;
4695 				/* add len of this duplet */
4696 				dupletlen += strlen(vidptr) + 1;
4697 				/* get to device type */
4698 				vidptr += strlen(DEVICE_TYPE_STR) + 1;
4699 				vidlen = strlen(vidptr);
4700 				if ((vidlen != 0) &&
4701 				    bcmp(inq->inq_vid, vidptr, vidlen) == 0) {
4702 					vhci_parse_mpxio_options(dip, cdip,
4703 					    datanameptr, len - dupletlen);
4704 					break;
4705 				}
4706 				/* get to next duplet */
4707 				vidptr += strlen(vidptr) + 1;
4708 			}
4709 			/* get to the next device-type */
4710 			while (len - dupletlen > 0 &&
4711 			    strlen(vidptr) != 0 &&
4712 			    strncmp(vidptr, DEVICE_TYPE_STR,
4713 			    strlen(DEVICE_TYPE_STR)) != 0) {
4714 				dupletlen += strlen(vidptr) + 1;
4715 				vidptr += strlen(vidptr) + 1;
4716 			}
4717 		}
4718 		if (config_list_len > 0) {
4719 			kmem_free(config_list, config_list_len);
4720 		}
4721 	}
4722 }
4723 
4724 static int
4725 vhci_update_pathinfo(struct scsi_device *psd,  mdi_pathinfo_t *pip,
4726     struct scsi_failover_ops *fo, scsi_vhci_lun_t *vlun,
4727     struct scsi_vhci *vhci)
4728 {
4729 	struct scsi_path_opinfo		opinfo;
4730 	char				*pclass, *best_pclass;
4731 	char				*resrv_pclass = NULL;
4732 	int				force_rereserve = 0;
4733 	int				update_pathinfo_done = 0;
4734 
4735 	if (fo->sfo_path_get_opinfo(psd, &opinfo, vlun->svl_fops_ctpriv) != 0) {
4736 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: "
4737 		    "Failed to get operation info for path:%p\n", (void *)pip));
4738 		return (MDI_FAILURE);
4739 	}
4740 	/* set the xlf capable flag in the vlun for future use */
4741 	vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable;
4742 	(void) mdi_prop_update_string(pip, "path-class",
4743 	    opinfo.opinfo_path_attr);
4744 
4745 	pclass = opinfo.opinfo_path_attr;
4746 	if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) {
4747 		mutex_enter(&vlun->svl_mutex);
4748 		if (vlun->svl_active_pclass != NULL) {
4749 			if (strcmp(vlun->svl_active_pclass, pclass) != 0) {
4750 				mutex_exit(&vlun->svl_mutex);
4751 				/*
4752 				 * Externally initiated failover has happened;
4753 				 * force the path state to be STANDBY/ONLINE,
4754 				 * next IO will trigger failover and thus
4755 				 * sync-up the pathstates.  Reason we don't
4756 				 * sync-up immediately by invoking
4757 				 * vhci_update_pathstates() is because it
4758 				 * needs a VHCI_HOLD_LUN() and we don't
4759 				 * want to block here.
4760 				 *
4761 				 * Further, if the device is an ALUA device,
4762 				 * then failure to exactly match 'pclass' and
4763 				 * 'svl_active_pclass'(as is the case here)
4764 				 * indicates that the currently active path
4765 				 * is a 'non-optimized' path - which means
4766 				 * that 'svl_active_pclass' needs to be
4767 				 * replaced with opinfo.opinfo_path_state
4768 				 * value.
4769 				 */
4770 
4771 				if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) {
4772 					char	*tptr;
4773 
4774 					/*
4775 					 * The device is ALUA compliant. The
4776 					 * state need to be changed to online
4777 					 * rather than standby state which is
4778 					 * done typically for a asymmetric
4779 					 * device that is non ALUA compliant.
4780 					 */
4781 					mdi_pi_set_state(pip,
4782 					    MDI_PATHINFO_STATE_ONLINE);
4783 					tptr = kmem_alloc(strlen
4784 					    (opinfo.opinfo_path_attr) + 1,
4785 					    KM_SLEEP);
4786 					(void) strlcpy(tptr,
4787 					    opinfo.opinfo_path_attr,
4788 					    (strlen(opinfo.opinfo_path_attr)
4789 					    + 1));
4790 					mutex_enter(&vlun->svl_mutex);
4791 					kmem_free(vlun->svl_active_pclass,
4792 					    strlen(vlun->svl_active_pclass) +
4793 					    1);
4794 					vlun->svl_active_pclass = tptr;
4795 					mutex_exit(&vlun->svl_mutex);
4796 				} else {
4797 					/*
4798 					 * Non ALUA device case.
4799 					 */
4800 					mdi_pi_set_state(pip,
4801 					    MDI_PATHINFO_STATE_STANDBY);
4802 				}
4803 				vlun->svl_fo_support = opinfo.opinfo_mode;
4804 				mdi_pi_set_preferred(pip,
4805 				    opinfo.opinfo_preferred);
4806 				update_pathinfo_done = 1;
4807 			}
4808 
4809 			/*
4810 			 * Find out a class of currently reserved path if there
4811 			 * is any.
4812 			 */
4813 			if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) &&
4814 			    mdi_prop_lookup_string(vlun->svl_resrv_pip,
4815 			    "path-class", &resrv_pclass) != MDI_SUCCESS) {
4816 				VHCI_DEBUG(1, (CE_NOTE, NULL,
4817 				    "!vhci_update_pathinfo: prop lookup "
4818 				    "failed for path 0x%p\n",
4819 				    (void *)vlun->svl_resrv_pip));
4820 				/*
4821 				 * Something is wrong with the reserved path.
4822 				 * We can't do much with that right here. Just
4823 				 * force re-reservation to another path.
4824 				 */
4825 				force_rereserve = 1;
4826 			}
4827 
4828 			(void) fo->sfo_pathclass_next(NULL, &best_pclass,
4829 			    vlun->svl_fops_ctpriv);
4830 			if ((force_rereserve == 1) || ((resrv_pclass != NULL) &&
4831 			    (strcmp(pclass, best_pclass) == 0) &&
4832 			    (strcmp(resrv_pclass, best_pclass) != 0))) {
4833 				/*
4834 				 * Inform target driver that a reservation
4835 				 * should be reinstated because the reserved
4836 				 * path is not the most preferred one.
4837 				 */
4838 				mutex_enter(&vhci->vhci_mutex);
4839 				scsi_hba_reset_notify_callback(
4840 				    &vhci->vhci_mutex,
4841 				    &vhci->vhci_reset_notify_listf);
4842 				mutex_exit(&vhci->vhci_mutex);
4843 			}
4844 
4845 			if (update_pathinfo_done == 1) {
4846 				return (MDI_SUCCESS);
4847 			}
4848 		} else {
4849 			char	*tptr;
4850 
4851 			/*
4852 			 * lets release the mutex before we try to
4853 			 * allocate since the potential to sleep is
4854 			 * possible.
4855 			 */
4856 			mutex_exit(&vlun->svl_mutex);
4857 			tptr = kmem_alloc(strlen(pclass) + 1, KM_SLEEP);
4858 			(void) strlcpy(tptr, pclass, (strlen(pclass) + 1));
4859 			mutex_enter(&vlun->svl_mutex);
4860 			vlun->svl_active_pclass = tptr;
4861 		}
4862 		mutex_exit(&vlun->svl_mutex);
4863 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4864 		vlun->svl_waiting_for_activepath = 0;
4865 	} else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) {
4866 		mutex_enter(&vlun->svl_mutex);
4867 		if (vlun->svl_active_pclass == NULL) {
4868 			char	*tptr;
4869 
4870 			mutex_exit(&vlun->svl_mutex);
4871 			tptr = kmem_alloc(strlen(pclass) + 1, KM_SLEEP);
4872 			(void) strlcpy(tptr, pclass, (strlen(pclass) + 1));
4873 			mutex_enter(&vlun->svl_mutex);
4874 			vlun->svl_active_pclass = tptr;
4875 		}
4876 		mutex_exit(&vlun->svl_mutex);
4877 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4878 		vlun->svl_waiting_for_activepath = 0;
4879 	} else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) {
4880 		mutex_enter(&vlun->svl_mutex);
4881 		if (vlun->svl_active_pclass != NULL) {
4882 			if (strcmp(vlun->svl_active_pclass, pclass) == 0) {
4883 				mutex_exit(&vlun->svl_mutex);
4884 				/*
4885 				 * externally initiated failover has happened;
4886 				 * force state to ONLINE (see comment above)
4887 				 */
4888 				mdi_pi_set_state(pip,
4889 				    MDI_PATHINFO_STATE_ONLINE);
4890 				vlun->svl_fo_support = opinfo.opinfo_mode;
4891 				mdi_pi_set_preferred(pip,
4892 				    opinfo.opinfo_preferred);
4893 				return (MDI_SUCCESS);
4894 			}
4895 		}
4896 		mutex_exit(&vlun->svl_mutex);
4897 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY);
4898 
4899 		/*
4900 		 * Initiate auto-failback, if enabled, for path if path-state
4901 		 * is transitioning from OFFLINE->STANDBY and pathclass is the
4902 		 * preferred pathclass for this storage.
4903 		 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE
4904 		 * (above), where the pi state is set to STANDBY, we don't
4905 		 * initiate auto-failback as the next IO shall take care of.
4906 		 * this. See comment above.
4907 		 */
4908 		(void) fo->sfo_pathclass_next(NULL, &best_pclass,
4909 		    vlun->svl_fops_ctpriv);
4910 		if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4911 		    VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4912 		    (strcmp(pclass, best_pclass) == 0) &&
4913 		    ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE) ||
4914 		    (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) {
4915 			VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p"
4916 			    " OFFLINE->STANDBY transition for lun %s\n",
4917 			    best_pclass, (void *)pip, vlun->svl_lun_wwn));
4918 			(void) taskq_dispatch(vhci->vhci_taskq,
4919 			    vhci_initiate_auto_failback, (void *) vlun,
4920 			    KM_SLEEP);
4921 		}
4922 	}
4923 	vlun->svl_fo_support = opinfo.opinfo_mode;
4924 	mdi_pi_set_preferred(pip, opinfo.opinfo_preferred);
4925 
4926 	VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x,"
4927 	    " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n",
4928 	    opinfo.opinfo_rev, opinfo.opinfo_path_state,
4929 	    opinfo.opinfo_preferred, opinfo.opinfo_mode));
4930 
4931 	return (MDI_SUCCESS);
4932 }
4933 
4934 /*
4935  * Form the kstat name and and call mdi_pi_kstat_create()
4936  */
4937 void
4938 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip)
4939 {
4940 	dev_info_t	*tgt_dip;
4941 	dev_info_t	*pdip;
4942 	char		*guid;
4943 	char		*target_port, *target_port_dup;
4944 	char		ks_name[KSTAT_STRLEN];
4945 	uint_t		pid;
4946 	int		by_id;
4947 	mod_hash_val_t	hv;
4948 
4949 
4950 	/* return if we have already allocated kstats */
4951 	if (mdi_pi_kstat_exists(pip))
4952 		return;
4953 
4954 	/*
4955 	 * We need instance numbers to create a kstat name, return if we don't
4956 	 * have instance numbers assigned yet.
4957 	 */
4958 	tgt_dip = mdi_pi_get_client(pip);
4959 	pdip = mdi_pi_get_phci(pip);
4960 	if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1))
4961 		return;
4962 
4963 	/*
4964 	 * A path oriented kstat has a ks_name of the form:
4965 	 *
4966 	 * <client-driver><instance>.t<pid>.<pHCI-driver><instance>
4967 	 *
4968 	 * We maintain a bidirectional 'target-port' to <pid> map,
4969 	 * called targetmap. All pathinfo nodes with the same
4970 	 * 'target-port' map to the same <pid>. The iostat(8) code,
4971 	 * when parsing a path oriented kstat name, uses the <pid> as
4972 	 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order
4973 	 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats,
4974 	 * this ioctl needs to translate a <pid> to a 'target-port'
4975 	 * even after all pathinfo nodes associated with the
4976 	 * 'target-port' have been destroyed. This is needed to support
4977 	 * consistent first-iteration activity-since-boot iostat(8)
4978 	 * output. Because of this requirement, the mapping can't be
4979 	 * based on pathinfo information in a devinfo snapshot.
4980 	 */
4981 
4982 	/* determine 'target-port' */
4983 	if (mdi_prop_lookup_string(pip,
4984 	    SCSI_ADDR_PROP_TARGET_PORT, &target_port) == MDI_SUCCESS) {
4985 		target_port_dup = i_ddi_strdup(target_port, KM_SLEEP);
4986 		(void) mdi_prop_free(target_port);
4987 		by_id = 1;
4988 	} else {
4989 		/*
4990 		 * If the pHCI did not set up 'target-port' on this
4991 		 * pathinfo node, assume that our client is the only
4992 		 * one with paths to the device by using the guid
4993 		 * value as the 'target-port'. Since no other client
4994 		 * will have the same guid, no other client will use
4995 		 * the same <pid>.  NOTE: a client with an instance
4996 		 * number always has a guid.
4997 		 */
4998 		(void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
4999 		    PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid);
5000 		target_port_dup = i_ddi_strdup(guid, KM_SLEEP);
5001 		ddi_prop_free(guid);
5002 
5003 		/*
5004 		 * For this type of mapping we don't want the
5005 		 * <id> -> 'target-port' mapping to be made.  This
5006 		 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl
5007 		 * to fail, and the iostat(8) long '-n' output will
5008 		 * still use the <pid>.  We do this because we just
5009 		 * made up the 'target-port' using the guid, and we
5010 		 * don't want to expose that fact in iostat output.
5011 		 */
5012 		by_id = 0;
5013 	}
5014 
5015 	/* find/establish <pid> given 'target-port' */
5016 	mutex_enter(&vhci_targetmap_mutex);
5017 	if (mod_hash_find(vhci_targetmap_byport,
5018 	    (mod_hash_key_t)target_port_dup, &hv) == 0) {
5019 		pid = (int)(intptr_t)hv;	/* mapping exists */
5020 	} else {
5021 		pid = vhci_targetmap_pid++;	/* new mapping */
5022 
5023 		(void) mod_hash_insert(vhci_targetmap_byport,
5024 		    (mod_hash_key_t)target_port_dup,
5025 		    (mod_hash_val_t)(intptr_t)pid);
5026 		if (by_id) {
5027 			(void) mod_hash_insert(vhci_targetmap_bypid,
5028 			    (mod_hash_key_t)(uintptr_t)pid,
5029 			    (mod_hash_val_t)(uintptr_t)target_port_dup);
5030 		}
5031 		target_port_dup = NULL;		/* owned by hash */
5032 	}
5033 	mutex_exit(&vhci_targetmap_mutex);
5034 
5035 	/* form kstat name */
5036 	(void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d",
5037 	    ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip),
5038 	    pid, ddi_driver_name(pdip), ddi_get_instance(pdip));
5039 
5040 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p "
5041 	    "kstat %s: pid %x <-> port %s\n", (void *)pip,
5042 	    ks_name, pid, target_port_dup));
5043 	if (target_port_dup)
5044 		kmem_free(target_port_dup, strlen(target_port_dup) + 1);
5045 
5046 	/* call mdi to create kstats with the name we built */
5047 	(void) mdi_pi_kstat_create(pip, ks_name);
5048 }
5049 
5050 /* ARGSUSED */
5051 static int
5052 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5053 {
5054 	scsi_hba_tran_t			*hba = NULL;
5055 	struct scsi_device		*psd = NULL;
5056 	scsi_vhci_lun_t			*vlun = NULL;
5057 	dev_info_t			*pdip = NULL;
5058 	dev_info_t			*cdip;
5059 	dev_info_t			*tgt_dip;
5060 	struct scsi_vhci		*vhci;
5061 	char				*guid;
5062 	struct scsi_failover_ops	*sfo;
5063 	scsi_vhci_priv_t		*svp = NULL;
5064 	struct scsi_address		*ap;
5065 	struct scsi_pkt			*pkt;
5066 	int				rval = MDI_FAILURE;
5067 	mpapi_item_list_t		*list_ptr;
5068 	mpapi_lu_data_t			*ld;
5069 
5070 	ASSERT(vdip != NULL);
5071 	ASSERT(pip != NULL);
5072 
5073 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
5074 	ASSERT(vhci != NULL);
5075 
5076 	pdip = mdi_pi_get_phci(pip);
5077 	hba = ddi_get_driver_private(pdip);
5078 	ASSERT(hba != NULL);
5079 
5080 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5081 	ASSERT(svp != NULL);
5082 
5083 	cdip = mdi_pi_get_client(pip);
5084 	ASSERT(cdip != NULL);
5085 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
5086 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
5087 		VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid "
5088 		    "property failed"));
5089 		goto failure;
5090 	}
5091 
5092 	vlun = vhci_lun_lookup(cdip);
5093 	ASSERT(vlun != NULL);
5094 
5095 	ddi_prop_free(guid);
5096 
5097 	vlun->svl_dip = mdi_pi_get_client(pip);
5098 	ASSERT(vlun->svl_dip != NULL);
5099 
5100 	psd = svp->svp_psd;
5101 	ASSERT(psd != NULL);
5102 
5103 	ap = &psd->sd_address;
5104 
5105 	/*
5106 	 * Get inquiry data into pathinfo related scsi_device structure.
5107 	 * Free sq_inq when pathinfo related scsi_device structure is destroyed
5108 	 * by vhci_pathinfo_uninit(). In other words, vhci maintains its own
5109 	 * copy of scsi_device and scsi_inquiry data on a per-path basis.
5110 	 */
5111 	if (scsi_probe(psd, SLEEP_FUNC) != SCSIPROBE_EXISTS) {
5112 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: "
5113 		    "scsi_probe failed path:%p rval:%x\n", (void *)pip, rval));
5114 		rval = MDI_FAILURE;
5115 		goto failure;
5116 	}
5117 
5118 	/*
5119 	 * See if we have a failover module to support the device.
5120 	 *
5121 	 * We re-probe to determine the failover ops for each path. This
5122 	 * is done in case there are any path-specific side-effects associated
5123 	 * with the sfo_device_probe implementation.
5124 	 *
5125 	 * Give the first successfull sfo_device_probe the opportunity to
5126 	 * establish 'ctpriv', vlun/client private data. The ctpriv will
5127 	 * then be passed into the failover module on all other sfo_device_*()
5128 	 * operations (and must be freed by sfo_device_unprobe implementation).
5129 	 *
5130 	 * NOTE: While sfo_device_probe is done once per path,
5131 	 * sfo_device_unprobe only occurs once - when the vlun is destroyed.
5132 	 *
5133 	 * NOTE: We don't currently support per-path fops private data
5134 	 * mechanism.
5135 	 */
5136 	sfo = vhci_dev_fo(vdip, psd,
5137 	    &vlun->svl_fops_ctpriv, &vlun->svl_fops_name);
5138 
5139 	/* check path configuration result with current vlun state */
5140 	if (((sfo && vlun->svl_fops) && (sfo != vlun->svl_fops)) ||
5141 	    (sfo && vlun->svl_not_supported) ||
5142 	    ((sfo == NULL) && vlun->svl_fops)) {
5143 		/* Getting different results for different paths. */
5144 		VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
5145 		    "!vhci_pathinfo_online: dev (path 0x%p) contradiction\n",
5146 		    (void *)pip));
5147 		cmn_err(CE_WARN, "scsi_vhci: failover contradiction: "
5148 		    "'%s'.vs.'%s': path %s\n",
5149 		    vlun->svl_fops ? vlun->svl_fops->sfo_name : "NULL",
5150 		    sfo ? sfo->sfo_name : "NULL", mdi_pi_pathname(pip));
5151 		vlun->svl_not_supported = 1;
5152 		rval = MDI_NOT_SUPPORTED;
5153 		goto done;
5154 	} else if (sfo == NULL) {
5155 		/* No failover module - device not supported under vHCI.  */
5156 		VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
5157 		    "!vhci_pathinfo_online: dev (path 0x%p) not "
5158 		    "supported\n", (void *)pip));
5159 
5160 		/* XXX does this contradict vhci_is_dev_supported ? */
5161 		vlun->svl_not_supported = 1;
5162 		rval = MDI_NOT_SUPPORTED;
5163 		goto done;
5164 	}
5165 
5166 	/* failover supported for device - save failover_ops in vlun */
5167 	vlun->svl_fops = sfo;
5168 	ASSERT(vlun->svl_fops_name != NULL);
5169 
5170 	/*
5171 	 * Obtain the device-type based mpxio options as specified in
5172 	 * scsi_vhci.conf file.
5173 	 *
5174 	 * NOTE: currently, the end result is a call to
5175 	 * mdi_set_lb_region_size().
5176 	 */
5177 	tgt_dip = psd->sd_dev;
5178 	ASSERT(tgt_dip != NULL);
5179 	vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd);
5180 
5181 	/*
5182 	 * if PGR is active, revalidate key and register on this path also,
5183 	 * if key is still valid
5184 	 */
5185 	sema_p(&vlun->svl_pgr_sema);
5186 	if (vlun->svl_pgr_active) {
5187 		rval = vhci_pgr_validate_and_register(svp);
5188 		if (rval != 1) {
5189 			rval = MDI_FAILURE;
5190 			sema_v(&vlun->svl_pgr_sema);
5191 			goto failure;
5192 		}
5193 	}
5194 	sema_v(&vlun->svl_pgr_sema);
5195 
5196 	if (svp->svp_new_path) {
5197 		/*
5198 		 * Last chance to perform any cleanup operations on this
5199 		 * new path before making this path completely online.
5200 		 */
5201 		svp->svp_new_path = 0;
5202 
5203 		/*
5204 		 * If scsi_vhci knows the lun is alread RESERVE'd,
5205 		 * then skip the issue of RELEASE on new path.
5206 		 */
5207 		if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) {
5208 			/*
5209 			 * Issue SCSI-2 RELEASE only for the first time on
5210 			 * a new path just in case the host rebooted and
5211 			 * a reservation is still pending on this path.
5212 			 * IBM Shark storage does not clear RESERVE upon
5213 			 * host reboot.
5214 			 */
5215 			pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0,
5216 			    sizeof (struct scsi_arq_status), 0, 0,
5217 			    SLEEP_FUNC, NULL);
5218 			if (pkt == NULL) {
5219 				VHCI_DEBUG(1, (CE_NOTE, NULL,
5220 				    "!vhci_pathinfo_online: "
5221 				    "Release init_pkt failed :%p\n",
5222 				    (void *)pip));
5223 				rval = MDI_FAILURE;
5224 				goto failure;
5225 			}
5226 			pkt->pkt_cdbp[0] = SCMD_RELEASE;
5227 			pkt->pkt_time = 60;
5228 
5229 			VHCI_DEBUG(1, (CE_NOTE, NULL,
5230 			    "!vhci_path_online: path:%p "
5231 			    "Issued SCSI-2 RELEASE\n", (void *)pip));
5232 
5233 			/* Ignore the return value */
5234 			(void) vhci_do_scsi_cmd(pkt);
5235 			scsi_destroy_pkt(pkt);
5236 		}
5237 	}
5238 
5239 	rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci);
5240 	if (rval == MDI_FAILURE) {
5241 		goto failure;
5242 	}
5243 
5244 	/* Initialize MP-API data */
5245 	vhci_update_mpapi_data(vhci, vlun, pip);
5246 
5247 	/*
5248 	 * MP-API also needs the Inquiry data to be maintained in the
5249 	 * mp_vendor_prop_t structure, so find the lun and update its
5250 	 * structure with this data.
5251 	 */
5252 	list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL,
5253 	    MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun);
5254 	ld = (mpapi_lu_data_t *)list_ptr->item->idata;
5255 	if (ld != NULL) {
5256 		bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8);
5257 		bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16);
5258 		bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4);
5259 	} else {
5260 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: "
5261 		    "mpapi_lu_data_t is NULL"));
5262 	}
5263 
5264 	/* create kstats for path */
5265 	vhci_kstat_create_pathinfo(pip);
5266 
5267 done:
5268 	mutex_enter(&vhci_global_mutex);
5269 	cv_broadcast(&vhci_cv);
5270 	mutex_exit(&vhci_global_mutex);
5271 
5272 	if (vlun->svl_setcap_done) {
5273 		(void) vhci_pHCI_cap(ap, "sector-size",
5274 		    vlun->svl_sector_size, 1, pip);
5275 	}
5276 
5277 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n",
5278 	    (void *)pip));
5279 
5280 failure:
5281 	return (rval);
5282 }
5283 
5284 /*
5285  * path offline handler.  Release all bindings that will not be
5286  * released by the normal packet transport/completion code path.
5287  * Since we don't (presently) keep any bindings alive outside of
5288  * the in-transport packets (which will be released on completion)
5289  * there is not much to do here.
5290  */
5291 /* ARGSUSED */
5292 static int
5293 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5294 {
5295 	scsi_hba_tran_t		*hba = NULL;
5296 	struct scsi_device	*psd = NULL;
5297 	dev_info_t		*pdip = NULL;
5298 	dev_info_t		*cdip = NULL;
5299 	scsi_vhci_priv_t	*svp = NULL;
5300 
5301 	ASSERT(vdip != NULL);
5302 	ASSERT(pip != NULL);
5303 
5304 	pdip = mdi_pi_get_phci(pip);
5305 	ASSERT(pdip != NULL);
5306 	if (pdip == NULL) {
5307 		VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5308 		    "phci dip", (void *)pip));
5309 		return (MDI_FAILURE);
5310 	}
5311 
5312 	cdip = mdi_pi_get_client(pip);
5313 	ASSERT(cdip != NULL);
5314 	if (cdip == NULL) {
5315 		VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5316 		    "client dip", (void *)pip));
5317 		return (MDI_FAILURE);
5318 	}
5319 
5320 	hba = ddi_get_driver_private(pdip);
5321 	ASSERT(hba != NULL);
5322 
5323 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5324 	if (svp == NULL) {
5325 		/*
5326 		 * mdi_pathinfo node in INIT state can have vHCI private
5327 		 * information set to null
5328 		 */
5329 		VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5330 		    "svp is NULL for pip 0x%p\n", (void *)pip));
5331 		return (MDI_SUCCESS);
5332 	}
5333 
5334 	psd = svp->svp_psd;
5335 	ASSERT(psd != NULL);
5336 
5337 	mutex_enter(&svp->svp_mutex);
5338 
5339 	VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5340 	    "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip));
5341 	while (svp->svp_cmds != 0) {
5342 		if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
5343 		    drv_usectohz(vhci_path_quiesce_timeout * 1000000),
5344 		    TR_CLOCK_TICK) == -1) {
5345 			/*
5346 			 * The timeout time reached without the condition
5347 			 * being signaled.
5348 			 */
5349 			VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5350 			    "Timeout reached on path 0x%p without the cond\n",
5351 			    (void *)pip));
5352 			VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5353 			    "%d cmds still pending on path: 0x%p\n",
5354 			    svp->svp_cmds, (void *)pip));
5355 			break;
5356 		}
5357 	}
5358 	mutex_exit(&svp->svp_mutex);
5359 
5360 	/*
5361 	 * Check to see if this vlun has an active SCSI-II RESERVE. And this
5362 	 * is the pip for the path that has been reserved.
5363 	 * If so clear the reservation by sending a reset, so the host will not
5364 	 * get a reservation conflict.  Reset the flag VLUN_RESERVE_ACTIVE_FLG
5365 	 * for this lun.  Also a reset notify is sent to the target driver
5366 	 * just in case the POR check condition is cleared by some other layer
5367 	 * in the stack.
5368 	 */
5369 	if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
5370 		if (pip == svp->svp_svl->svl_resrv_pip) {
5371 			if (vhci_recovery_reset(svp->svp_svl,
5372 			    &svp->svp_psd->sd_address, TRUE,
5373 			    VHCI_DEPTH_TARGET) == 0) {
5374 				VHCI_DEBUG(1, (CE_NOTE, NULL,
5375 				    "!vhci_pathinfo_offline (pip:%p):"
5376 				    "reset failed, retrying\n", (void *)pip));
5377 				delay(1 * drv_usectohz(1000000));
5378 				if (vhci_recovery_reset(svp->svp_svl,
5379 				    &svp->svp_psd->sd_address, TRUE,
5380 				    VHCI_DEPTH_TARGET) == 0) {
5381 					VHCI_DEBUG(1, (CE_NOTE, NULL,
5382 					    "!vhci_pathinfo_offline "
5383 					    "(pip:%p): reset failed, "
5384 					    "giving up!\n", (void *)pip));
5385 				}
5386 			}
5387 			svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
5388 		}
5389 	}
5390 
5391 	mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE);
5392 	vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED);
5393 
5394 	VHCI_DEBUG(1, (CE_NOTE, NULL,
5395 	    "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip));
5396 	return (MDI_SUCCESS);
5397 }
5398 
5399 
5400 /*
5401  * routine for SCSI VHCI IOCTL implementation.
5402  */
5403 /* ARGSUSED */
5404 static int
5405 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
5406 {
5407 	struct scsi_vhci		*vhci;
5408 	dev_info_t			*vdip;
5409 	mdi_pathinfo_t			*pip;
5410 	int				instance, held;
5411 	int				retval = 0;
5412 	caddr_t				phci_path = NULL, client_path = NULL;
5413 	caddr_t				paddr = NULL;
5414 	sv_iocdata_t			ioc;
5415 	sv_iocdata_t			*pioc = &ioc;
5416 	sv_switch_to_cntlr_iocdata_t	iocsc;
5417 	sv_switch_to_cntlr_iocdata_t	*piocsc = &iocsc;
5418 	caddr_t				s;
5419 	scsi_vhci_lun_t			*vlun;
5420 	struct scsi_failover_ops	*fo;
5421 	char				*pclass;
5422 
5423 	/* Check for validity of vhci structure */
5424 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
5425 	if (vhci == NULL) {
5426 		return (ENXIO);
5427 	}
5428 
5429 	mutex_enter(&vhci->vhci_mutex);
5430 	if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
5431 		mutex_exit(&vhci->vhci_mutex);
5432 		return (ENXIO);
5433 	}
5434 	mutex_exit(&vhci->vhci_mutex);
5435 
5436 	/* Get the vhci dip */
5437 	vdip = vhci->vhci_dip;
5438 	ASSERT(vdip != NULL);
5439 	instance = ddi_get_instance(vdip);
5440 
5441 	/* Allocate memory for getting parameters from userland */
5442 	phci_path	= kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5443 	client_path	= kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5444 	paddr		= kmem_zalloc(MAXNAMELEN, KM_SLEEP);
5445 
5446 	/*
5447 	 * Set a local variable indicating the ioctl name. Used for
5448 	 * printing debug strings.
5449 	 */
5450 	switch (cmd) {
5451 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5452 		s = "GET_CLIENT_MULTIPATH_INFO";
5453 		break;
5454 
5455 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5456 		s = "GET_PHCI_MULTIPATH_INFO";
5457 		break;
5458 
5459 	case SCSI_VHCI_GET_CLIENT_NAME:
5460 		s = "GET_CLIENT_NAME";
5461 		break;
5462 
5463 	case SCSI_VHCI_PATH_ONLINE:
5464 		s = "PATH_ONLINE";
5465 		break;
5466 
5467 	case SCSI_VHCI_PATH_OFFLINE:
5468 		s = "PATH_OFFLINE";
5469 		break;
5470 
5471 	case SCSI_VHCI_PATH_STANDBY:
5472 		s = "PATH_STANDBY";
5473 		break;
5474 
5475 	case SCSI_VHCI_PATH_TEST:
5476 		s = "PATH_TEST";
5477 		break;
5478 
5479 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5480 		s = "SWITCH_TO_CNTLR";
5481 		break;
5482 	case SCSI_VHCI_PATH_DISABLE:
5483 		s = "PATH_DISABLE";
5484 		break;
5485 	case SCSI_VHCI_PATH_ENABLE:
5486 		s = "PATH_ENABLE";
5487 		break;
5488 
5489 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5490 		s = "GET_TARGET_LONGNAME";
5491 		break;
5492 
5493 #ifdef	DEBUG
5494 	case SCSI_VHCI_CONFIGURE_PHCI:
5495 		s = "CONFIGURE_PHCI";
5496 		break;
5497 
5498 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5499 		s = "UNCONFIGURE_PHCI";
5500 		break;
5501 #endif
5502 
5503 	default:
5504 		s = "Unknown";
5505 		vhci_log(CE_NOTE, vdip,
5506 		    "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd);
5507 		retval = ENOTSUP;
5508 		break;
5509 	}
5510 	if (retval != 0) {
5511 		goto end;
5512 	}
5513 
5514 	VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s));
5515 
5516 	/*
5517 	 * Get IOCTL parameters from userland
5518 	 */
5519 	switch (cmd) {
5520 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5521 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5522 	case SCSI_VHCI_GET_CLIENT_NAME:
5523 	case SCSI_VHCI_PATH_ONLINE:
5524 	case SCSI_VHCI_PATH_OFFLINE:
5525 	case SCSI_VHCI_PATH_STANDBY:
5526 	case SCSI_VHCI_PATH_TEST:
5527 	case SCSI_VHCI_PATH_DISABLE:
5528 	case SCSI_VHCI_PATH_ENABLE:
5529 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5530 #ifdef	DEBUG
5531 	case SCSI_VHCI_CONFIGURE_PHCI:
5532 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5533 #endif
5534 		retval = vhci_get_iocdata((const void *)data, pioc, mode, s);
5535 		break;
5536 
5537 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5538 		retval = vhci_get_iocswitchdata((const void *)data, piocsc,
5539 		    mode, s);
5540 		break;
5541 	}
5542 	if (retval != 0) {
5543 		goto end;
5544 	}
5545 
5546 
5547 	/*
5548 	 * Process the IOCTL
5549 	 */
5550 	switch (cmd) {
5551 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5552 	{
5553 		uint_t		num_paths;	/* Num paths to client dev */
5554 		sv_path_info_t	*upibuf = NULL;	/* To keep userland values */
5555 		sv_path_info_t	*kpibuf = NULL; /* Kernel data for ioctls */
5556 		dev_info_t	*cdip;		/* Client device dip */
5557 
5558 		if (pioc->ret_elem == NULL) {
5559 			retval = EINVAL;
5560 			break;
5561 		}
5562 
5563 		/* Get client device path from user land */
5564 		if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5565 			retval = EFAULT;
5566 			break;
5567 		}
5568 
5569 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5570 		    "client <%s>", s, client_path));
5571 
5572 		/* Get number of paths to this client device */
5573 		if ((cdip = mdi_client_path2devinfo(vdip, client_path))
5574 		    == NULL) {
5575 			retval = ENXIO;
5576 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5577 			    "client dip doesn't exist. invalid path <%s>",
5578 			    s, client_path));
5579 			break;
5580 		}
5581 		num_paths = mdi_client_get_path_count(cdip);
5582 
5583 		if (ddi_copyout(&num_paths, pioc->ret_elem,
5584 		    sizeof (num_paths), mode)) {
5585 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5586 			    "num_paths copyout failed", s));
5587 			retval = EFAULT;
5588 			break;
5589 		}
5590 
5591 		/* If  user just wanted num_paths, then return */
5592 		if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5593 		    num_paths == 0) {
5594 			break;
5595 		}
5596 
5597 		/* Set num_paths to value as much as can be sent to userland */
5598 		if (num_paths > pioc->buf_elem) {
5599 			num_paths = pioc->buf_elem;
5600 		}
5601 
5602 		/* Allocate memory and get userland pointers */
5603 		if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5604 		    pioc, mode, s) != 0) {
5605 			retval = EFAULT;
5606 			break;
5607 		}
5608 		ASSERT(upibuf != NULL);
5609 		ASSERT(kpibuf != NULL);
5610 
5611 		/*
5612 		 * Get the path information and send it to userland.
5613 		 */
5614 		if (vhci_get_client_path_list(cdip, kpibuf, num_paths)
5615 		    != MDI_SUCCESS) {
5616 			retval = ENXIO;
5617 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5618 			break;
5619 		}
5620 
5621 		if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5622 		    pioc, mode, s)) {
5623 			retval = EFAULT;
5624 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5625 			break;
5626 		}
5627 
5628 		/* Free the memory allocated for path information */
5629 		vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5630 		break;
5631 	}
5632 
5633 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5634 	{
5635 		uint_t		num_paths;	/* Num paths to client dev */
5636 		sv_path_info_t	*upibuf = NULL;	/* To keep userland values */
5637 		sv_path_info_t	*kpibuf = NULL; /* Kernel data for ioctls */
5638 		dev_info_t	*pdip;		/* PHCI device dip */
5639 
5640 		if (pioc->ret_elem == NULL) {
5641 			retval = EINVAL;
5642 			break;
5643 		}
5644 
5645 		/* Get PHCI device path from user land */
5646 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5647 			retval = EFAULT;
5648 			break;
5649 		}
5650 
5651 		VHCI_DEBUG(6, (CE_WARN, vdip,
5652 		    "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path));
5653 
5654 		/* Get number of devices associated with this PHCI device */
5655 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5656 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5657 			    "phci dip doesn't exist. invalid path <%s>",
5658 			    s, phci_path));
5659 			retval = ENXIO;
5660 			break;
5661 		}
5662 
5663 		num_paths = mdi_phci_get_path_count(pdip);
5664 
5665 		if (ddi_copyout(&num_paths, pioc->ret_elem,
5666 		    sizeof (num_paths), mode)) {
5667 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5668 			    "num_paths copyout failed", s));
5669 			retval = EFAULT;
5670 			break;
5671 		}
5672 
5673 		/* If  user just wanted num_paths, then return */
5674 		if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5675 		    num_paths == 0) {
5676 			break;
5677 		}
5678 
5679 		/* Set num_paths to value as much as can be sent to userland */
5680 		if (num_paths > pioc->buf_elem) {
5681 			num_paths = pioc->buf_elem;
5682 		}
5683 
5684 		/* Allocate memory and get userland pointers */
5685 		if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5686 		    pioc, mode, s) != 0) {
5687 			retval = EFAULT;
5688 			break;
5689 		}
5690 		ASSERT(upibuf != NULL);
5691 		ASSERT(kpibuf != NULL);
5692 
5693 		/*
5694 		 * Get the path information and send it to userland.
5695 		 */
5696 		if (vhci_get_phci_path_list(pdip, kpibuf, num_paths)
5697 		    != MDI_SUCCESS) {
5698 			retval = ENXIO;
5699 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5700 			break;
5701 		}
5702 
5703 		if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5704 		    pioc, mode, s)) {
5705 			retval = EFAULT;
5706 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5707 			break;
5708 		}
5709 
5710 		/* Free the memory allocated for path information */
5711 		vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5712 		break;
5713 	}
5714 
5715 	case SCSI_VHCI_GET_CLIENT_NAME:
5716 	{
5717 		dev_info_t		*cdip, *pdip;
5718 
5719 		/* Get PHCI path and device address from user land */
5720 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5721 		    vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5722 			retval = EFAULT;
5723 			break;
5724 		}
5725 
5726 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5727 		    "phci <%s>, paddr <%s>", s, phci_path, paddr));
5728 
5729 		/* Get the PHCI dip */
5730 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5731 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5732 			    "phci dip doesn't exist. invalid path <%s>",
5733 			    s, phci_path));
5734 			retval = ENXIO;
5735 			break;
5736 		}
5737 
5738 		if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5739 			VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5740 			    "pathinfo doesn't exist. invalid device addr", s));
5741 			retval = ENXIO;
5742 			break;
5743 		}
5744 
5745 		/* Get the client device pathname and send to userland */
5746 		cdip = mdi_pi_get_client(pip);
5747 		vhci_ioc_devi_to_path(cdip, client_path);
5748 
5749 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5750 		    "client <%s>", s, client_path));
5751 
5752 		if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) {
5753 			retval = EFAULT;
5754 			break;
5755 		}
5756 		break;
5757 	}
5758 
5759 	case SCSI_VHCI_PATH_ONLINE:
5760 	case SCSI_VHCI_PATH_OFFLINE:
5761 	case SCSI_VHCI_PATH_STANDBY:
5762 	case SCSI_VHCI_PATH_TEST:
5763 	{
5764 		dev_info_t		*pdip;	/* PHCI dip */
5765 
5766 		/* Get PHCI path and device address from user land */
5767 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5768 		    vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5769 			retval = EFAULT;
5770 			break;
5771 		}
5772 
5773 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5774 		    "phci <%s>, paddr <%s>", s, phci_path, paddr));
5775 
5776 		/* Get the PHCI dip */
5777 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5778 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5779 			    "phci dip doesn't exist. invalid path <%s>",
5780 			    s, phci_path));
5781 			retval = ENXIO;
5782 			break;
5783 		}
5784 
5785 		if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5786 			VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5787 			    "pathinfo doesn't exist. invalid device addr", s));
5788 			retval = ENXIO;
5789 			break;
5790 		}
5791 
5792 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5793 		    "Calling MDI function to change device state", s));
5794 
5795 		switch (cmd) {
5796 		case SCSI_VHCI_PATH_ONLINE:
5797 			retval = mdi_pi_online(pip, 0);
5798 			break;
5799 
5800 		case SCSI_VHCI_PATH_OFFLINE:
5801 			retval = mdi_pi_offline(pip, 0);
5802 			break;
5803 
5804 		case SCSI_VHCI_PATH_STANDBY:
5805 			retval = mdi_pi_standby(pip, 0);
5806 			break;
5807 
5808 		case SCSI_VHCI_PATH_TEST:
5809 			break;
5810 		}
5811 		break;
5812 	}
5813 
5814 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5815 	{
5816 		dev_info_t *cdip;
5817 		struct scsi_device *devp;
5818 
5819 		/* Get the client device pathname */
5820 		if (ddi_copyin(piocsc->client, client_path,
5821 		    MAXPATHLEN, mode)) {
5822 			VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5823 			    "client_path copyin failed", s));
5824 			retval = EFAULT;
5825 			break;
5826 		}
5827 
5828 		/* Get the path class to which user wants to switch */
5829 		if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) {
5830 			VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5831 			    "controller_class copyin failed", s));
5832 			retval = EFAULT;
5833 			break;
5834 		}
5835 
5836 		/* Perform validity checks */
5837 		if ((cdip = mdi_client_path2devinfo(vdip,
5838 		    client_path)) == NULL) {
5839 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5840 			    "client dip doesn't exist. invalid path <%s>",
5841 			    s, client_path));
5842 			retval = ENXIO;
5843 			break;
5844 		}
5845 
5846 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func "
5847 		    "to switch controller"));
5848 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> "
5849 		    "class <%s>", client_path, paddr));
5850 
5851 		if (strcmp(paddr, PCLASS_PRIMARY) &&
5852 		    strcmp(paddr, PCLASS_SECONDARY)) {
5853 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5854 			    "invalid path class <%s>", s, paddr));
5855 			retval = ENXIO;
5856 			break;
5857 		}
5858 
5859 		devp = ddi_get_driver_private(cdip);
5860 		if (devp == NULL) {
5861 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5862 			    "invalid scsi device <%s>", s, client_path));
5863 			retval = ENXIO;
5864 			break;
5865 		}
5866 		vlun = ADDR2VLUN(&devp->sd_address);
5867 		ASSERT(vlun);
5868 
5869 		/*
5870 		 * Checking to see if device has only one pclass, PRIMARY.
5871 		 * If so this device doesn't support failovers.  Assumed
5872 		 * that the devices with one pclass is PRIMARY, as thats the
5873 		 * case today.  If this is not true and in future other
5874 		 * symmetric devices are supported with other pclass, this
5875 		 * IOCTL shall have to be overhauled anyways as now the only
5876 		 * arguments it accepts are PRIMARY and SECONDARY.
5877 		 */
5878 		fo = vlun->svl_fops;
5879 		if (fo->sfo_pathclass_next(PCLASS_PRIMARY, &pclass,
5880 		    vlun->svl_fops_ctpriv)) {
5881 			retval = ENOTSUP;
5882 			break;
5883 		}
5884 
5885 		VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
5886 		mutex_enter(&vlun->svl_mutex);
5887 		if (vlun->svl_active_pclass != NULL) {
5888 			if (strcmp(vlun->svl_active_pclass, paddr) == 0) {
5889 				mutex_exit(&vlun->svl_mutex);
5890 				retval = EALREADY;
5891 				VHCI_RELEASE_LUN(vlun);
5892 				break;
5893 			}
5894 		}
5895 		mutex_exit(&vlun->svl_mutex);
5896 		/* Call mdi function to cause  a switch over */
5897 		retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC);
5898 		if (retval == MDI_SUCCESS) {
5899 			retval = 0;
5900 		} else if (retval == MDI_BUSY) {
5901 			retval = EBUSY;
5902 		} else {
5903 			retval = EIO;
5904 		}
5905 		VHCI_RELEASE_LUN(vlun);
5906 		break;
5907 	}
5908 
5909 	case SCSI_VHCI_PATH_ENABLE:
5910 	case SCSI_VHCI_PATH_DISABLE:
5911 	{
5912 		dev_info_t	*cdip, *pdip;
5913 
5914 		/*
5915 		 * Get client device path from user land
5916 		 */
5917 		if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5918 			retval = EFAULT;
5919 			break;
5920 		}
5921 
5922 		/*
5923 		 * Get Phci device path from user land
5924 		 */
5925 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5926 			retval = EFAULT;
5927 			break;
5928 		}
5929 
5930 		/*
5931 		 * Get the devinfo for the Phci.
5932 		 */
5933 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5934 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5935 			    "phci dip doesn't exist. invalid path <%s>",
5936 			    s, phci_path));
5937 			retval = ENXIO;
5938 			break;
5939 		}
5940 
5941 		/*
5942 		 * If the client path is set to /scsi_vhci then we need
5943 		 * to do the operation on all the clients so set cdip to NULL.
5944 		 * Else, try to get the client dip.
5945 		 */
5946 		if (strcmp(client_path, "/scsi_vhci") == 0) {
5947 			cdip = NULL;
5948 		} else {
5949 			if ((cdip = mdi_client_path2devinfo(vdip,
5950 			    client_path)) == NULL) {
5951 				retval = ENXIO;
5952 				VHCI_DEBUG(1, (CE_WARN, NULL,
5953 				    "!vhci_ioctl: ioctl <%s> client dip "
5954 				    "doesn't exist. invalid path <%s>",
5955 				    s, client_path));
5956 				break;
5957 			}
5958 		}
5959 
5960 		if (cmd == SCSI_VHCI_PATH_ENABLE)
5961 			retval = mdi_pi_enable(cdip, pdip, USER_DISABLE);
5962 		else
5963 			retval = mdi_pi_disable(cdip, pdip, USER_DISABLE);
5964 
5965 		break;
5966 	}
5967 
5968 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5969 	{
5970 		uint_t		pid = pioc->buf_elem;
5971 		char		*target_port;
5972 		mod_hash_val_t	hv;
5973 
5974 		/* targetmap lookup of 'target-port' by <pid> */
5975 		if (mod_hash_find(vhci_targetmap_bypid,
5976 		    (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) {
5977 			/*
5978 			 * NOTE: failure to find the mapping is OK for guid
5979 			 * based 'target-port' values.
5980 			 */
5981 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5982 			    "targetport mapping doesn't exist: pid %d",
5983 			    s, pid));
5984 			retval = ENXIO;
5985 			break;
5986 		}
5987 
5988 		/* copyout 'target-port' result */
5989 		target_port = (char *)hv;
5990 		if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) {
5991 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5992 			    "targetport copyout failed: len: %d",
5993 			    s, (int)strlen(target_port)));
5994 			retval = EFAULT;
5995 		}
5996 		break;
5997 	}
5998 
5999 #ifdef	DEBUG
6000 	case SCSI_VHCI_CONFIGURE_PHCI:
6001 	{
6002 		dev_info_t		*pdip;
6003 
6004 		/* Get PHCI path and device address from user land */
6005 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
6006 			retval = EFAULT;
6007 			break;
6008 		}
6009 
6010 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
6011 		    "phci <%s>", s, phci_path));
6012 
6013 		/* Get the PHCI dip */
6014 		if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
6015 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
6016 			    "phci dip doesn't exist. invalid path <%s>",
6017 			    s, phci_path));
6018 			retval = ENXIO;
6019 			break;
6020 		}
6021 
6022 		if (ndi_devi_config(pdip,
6023 		    NDI_DEVFS_CLEAN | NDI_DEVI_PERSIST) != NDI_SUCCESS) {
6024 			retval = EIO;
6025 		}
6026 
6027 		ddi_release_devi(pdip);
6028 		break;
6029 	}
6030 
6031 	case SCSI_VHCI_UNCONFIGURE_PHCI:
6032 	{
6033 		dev_info_t		*pdip;
6034 
6035 		/* Get PHCI path and device address from user land */
6036 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
6037 			retval = EFAULT;
6038 			break;
6039 		}
6040 
6041 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
6042 		    "phci <%s>", s, phci_path));
6043 
6044 		/* Get the PHCI dip */
6045 		if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
6046 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
6047 			    "phci dip doesn't exist. invalid path <%s>",
6048 			    s, phci_path));
6049 			retval = ENXIO;
6050 			break;
6051 		}
6052 
6053 		if (ndi_devi_unconfig(pdip,
6054 		    NDI_DEVI_REMOVE | NDI_DEVFS_CLEAN) != NDI_SUCCESS) {
6055 			retval = EBUSY;
6056 		}
6057 
6058 		ddi_release_devi(pdip);
6059 		break;
6060 	}
6061 #endif
6062 	}
6063 
6064 end:
6065 	/* Free the memory allocated above */
6066 	if (phci_path != NULL) {
6067 		kmem_free(phci_path, MAXPATHLEN);
6068 	}
6069 	if (client_path != NULL) {
6070 		kmem_free(client_path, MAXPATHLEN);
6071 	}
6072 	if (paddr != NULL) {
6073 		kmem_free(paddr, MAXNAMELEN);
6074 	}
6075 	return (retval);
6076 }
6077 
6078 /*
6079  * devctl IOCTL support for client device DR
6080  */
6081 /* ARGSUSED */
6082 int
6083 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
6084     int *rvalp)
6085 {
6086 	dev_info_t *self;
6087 	dev_info_t *child;
6088 	scsi_hba_tran_t *hba;
6089 	struct devctl_iocdata *dcp;
6090 	struct scsi_vhci *vhci;
6091 	int rv = 0;
6092 	int retval = 0;
6093 	scsi_vhci_priv_t *svp;
6094 	mdi_pathinfo_t  *pip;
6095 
6096 	if ((vhci = ddi_get_soft_state(vhci_softstate,
6097 	    MINOR2INST(getminor(dev)))) == NULL)
6098 		return (ENXIO);
6099 
6100 	/*
6101 	 * check if :devctl minor device has been opened
6102 	 */
6103 	mutex_enter(&vhci->vhci_mutex);
6104 	if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
6105 		mutex_exit(&vhci->vhci_mutex);
6106 		return (ENXIO);
6107 	}
6108 	mutex_exit(&vhci->vhci_mutex);
6109 
6110 	self = vhci->vhci_dip;
6111 	hba = ddi_get_driver_private(self);
6112 	if (hba == NULL)
6113 		return (ENXIO);
6114 
6115 	/*
6116 	 * We can use the generic implementation for these ioctls
6117 	 */
6118 	switch (cmd) {
6119 	case DEVCTL_DEVICE_GETSTATE:
6120 	case DEVCTL_DEVICE_ONLINE:
6121 	case DEVCTL_DEVICE_OFFLINE:
6122 	case DEVCTL_DEVICE_REMOVE:
6123 	case DEVCTL_BUS_GETSTATE:
6124 		return (ndi_devctl_ioctl(self, cmd, arg, mode, 0));
6125 	}
6126 
6127 	/*
6128 	 * read devctl ioctl data
6129 	 */
6130 	if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
6131 		return (EFAULT);
6132 
6133 	switch (cmd) {
6134 
6135 	case DEVCTL_DEVICE_RESET:
6136 		/*
6137 		 * lookup and hold child device
6138 		 */
6139 		if ((child = ndi_devi_find(self, ndi_dc_getname(dcp),
6140 		    ndi_dc_getaddr(dcp))) == NULL) {
6141 			rv = ENXIO;
6142 			break;
6143 		}
6144 		retval = mdi_select_path(child, NULL,
6145 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
6146 		    NULL, &pip);
6147 		if ((retval != MDI_SUCCESS) || (pip == NULL)) {
6148 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:"
6149 			    "Unable to get a path, dip 0x%p", (void *)child));
6150 			rv = ENXIO;
6151 			break;
6152 		}
6153 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
6154 		if (vhci_recovery_reset(svp->svp_svl,
6155 		    &svp->svp_psd->sd_address, TRUE,
6156 		    VHCI_DEPTH_TARGET) == 0) {
6157 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6158 			    "!vhci_ioctl(pip:%p): "
6159 			    "reset failed\n", (void *)pip));
6160 			rv = ENXIO;
6161 		}
6162 		mdi_rele_path(pip);
6163 		break;
6164 
6165 	case DEVCTL_BUS_QUIESCE:
6166 	case DEVCTL_BUS_UNQUIESCE:
6167 	case DEVCTL_BUS_RESET:
6168 	case DEVCTL_BUS_RESETALL:
6169 #ifdef	DEBUG
6170 	case DEVCTL_BUS_CONFIGURE:
6171 	case DEVCTL_BUS_UNCONFIGURE:
6172 #endif
6173 		rv = ENOTSUP;
6174 		break;
6175 
6176 	default:
6177 		rv = ENOTTY;
6178 	} /* end of outer switch */
6179 
6180 	ndi_dc_freehdl(dcp);
6181 	return (rv);
6182 }
6183 
6184 /*
6185  * Routine to get the PHCI pathname from ioctl structures in userland
6186  */
6187 /* ARGSUSED */
6188 static int
6189 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path,
6190     int mode, caddr_t s)
6191 {
6192 	int retval = 0;
6193 
6194 	if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) {
6195 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> "
6196 		    "phci_path copyin failed", s));
6197 		retval = EFAULT;
6198 	}
6199 	return (retval);
6200 
6201 }
6202 
6203 
6204 /*
6205  * Routine to get the Client device pathname from ioctl structures in userland
6206  */
6207 /* ARGSUSED */
6208 static int
6209 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path,
6210     int mode, caddr_t s)
6211 {
6212 	int retval = 0;
6213 
6214 	if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) {
6215 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: "
6216 		    "ioctl <%s> client_path copyin failed", s));
6217 		retval = EFAULT;
6218 	}
6219 	return (retval);
6220 }
6221 
6222 
6223 /*
6224  * Routine to get physical device address from ioctl structure in userland
6225  */
6226 /* ARGSUSED */
6227 static int
6228 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s)
6229 {
6230 	int retval = 0;
6231 
6232 	if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) {
6233 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: "
6234 		    "ioctl <%s> device addr copyin failed", s));
6235 		retval = EFAULT;
6236 	}
6237 	return (retval);
6238 }
6239 
6240 
6241 /*
6242  * Routine to send client device pathname to userland.
6243  */
6244 /* ARGSUSED */
6245 static int
6246 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc,
6247     int mode, caddr_t s)
6248 {
6249 	int retval = 0;
6250 
6251 	if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) {
6252 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: "
6253 		    "ioctl <%s> client_path copyout failed", s));
6254 		retval = EFAULT;
6255 	}
6256 	return (retval);
6257 }
6258 
6259 
6260 /*
6261  * Routine to translated dev_info pointer (dip) to device pathname.
6262  */
6263 static void
6264 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path)
6265 {
6266 	(void) ddi_pathname(dip, path);
6267 }
6268 
6269 
6270 /*
6271  * vhci_get_phci_path_list:
6272  *		get information about devices associated with a
6273  *		given PHCI device.
6274  *
6275  * Return Values:
6276  *		path information elements
6277  */
6278 int
6279 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf,
6280     uint_t num_elems)
6281 {
6282 	uint_t			count, done;
6283 	mdi_pathinfo_t		*pip;
6284 	sv_path_info_t		*ret_pip;
6285 	int			status;
6286 	size_t			prop_size;
6287 
6288 	/*
6289 	 * Get the PHCI structure and retrieve the path information
6290 	 * from the GUID hash table.
6291 	 */
6292 
6293 	ret_pip = pibuf;
6294 	count = 0;
6295 
6296 	ndi_devi_enter(pdip);
6297 
6298 	done = (count >= num_elems);
6299 	pip = mdi_get_next_client_path(pdip, NULL);
6300 	while (pip && !done) {
6301 		mdi_pi_lock(pip);
6302 		(void) ddi_pathname(mdi_pi_get_phci(pip),
6303 		    ret_pip->device.ret_phci);
6304 		(void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6305 		(void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6306 		    &ret_pip->ret_ext_state);
6307 
6308 		status = mdi_prop_size(pip, &prop_size);
6309 		if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6310 			*ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6311 		}
6312 
6313 #ifdef DEBUG
6314 		if (status != MDI_SUCCESS) {
6315 			VHCI_DEBUG(2, (CE_WARN, NULL,
6316 			    "!vhci_get_phci_path_list: "
6317 			    "phci <%s>, prop size failure 0x%x",
6318 			    ret_pip->device.ret_phci, status));
6319 		}
6320 #endif /* DEBUG */
6321 
6322 
6323 		if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6324 		    prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6325 			status = mdi_prop_pack(pip,
6326 			    &ret_pip->ret_prop.buf,
6327 			    ret_pip->ret_prop.buf_size);
6328 
6329 #ifdef DEBUG
6330 			if (status != MDI_SUCCESS) {
6331 				VHCI_DEBUG(2, (CE_WARN, NULL,
6332 				    "!vhci_get_phci_path_list: "
6333 				    "phci <%s>, prop pack failure 0x%x",
6334 				    ret_pip->device.ret_phci, status));
6335 			}
6336 #endif /* DEBUG */
6337 		}
6338 
6339 		mdi_pi_unlock(pip);
6340 		pip = mdi_get_next_client_path(pdip, pip);
6341 		ret_pip++;
6342 		count++;
6343 		done = (count >= num_elems);
6344 	}
6345 
6346 	ndi_devi_exit(pdip);
6347 
6348 	return (MDI_SUCCESS);
6349 }
6350 
6351 
6352 /*
6353  * vhci_get_client_path_list:
6354  *		get information about various paths associated with a
6355  *		given client device.
6356  *
6357  * Return Values:
6358  *		path information elements
6359  */
6360 int
6361 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf,
6362     uint_t num_elems)
6363 {
6364 	uint_t			count, done;
6365 	mdi_pathinfo_t		*pip;
6366 	sv_path_info_t		*ret_pip;
6367 	int			status;
6368 	size_t			prop_size;
6369 
6370 	ret_pip = pibuf;
6371 	count = 0;
6372 
6373 	ndi_devi_enter(cdip);
6374 
6375 	done = (count >= num_elems);
6376 	pip = mdi_get_next_phci_path(cdip, NULL);
6377 	while (pip && !done) {
6378 		mdi_pi_lock(pip);
6379 		(void) ddi_pathname(mdi_pi_get_phci(pip),
6380 		    ret_pip->device.ret_phci);
6381 		(void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6382 		(void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6383 		    &ret_pip->ret_ext_state);
6384 
6385 		status = mdi_prop_size(pip, &prop_size);
6386 		if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6387 			*ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6388 		}
6389 
6390 #ifdef DEBUG
6391 		if (status != MDI_SUCCESS) {
6392 			VHCI_DEBUG(2, (CE_WARN, NULL,
6393 			    "!vhci_get_client_path_list: "
6394 			    "phci <%s>, prop size failure 0x%x",
6395 			    ret_pip->device.ret_phci, status));
6396 		}
6397 #endif /* DEBUG */
6398 
6399 
6400 		if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6401 		    prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6402 			status = mdi_prop_pack(pip,
6403 			    &ret_pip->ret_prop.buf,
6404 			    ret_pip->ret_prop.buf_size);
6405 
6406 #ifdef DEBUG
6407 			if (status != MDI_SUCCESS) {
6408 				VHCI_DEBUG(2, (CE_WARN, NULL,
6409 				    "!vhci_get_client_path_list: "
6410 				    "phci <%s>, prop pack failure 0x%x",
6411 				    ret_pip->device.ret_phci, status));
6412 			}
6413 #endif /* DEBUG */
6414 		}
6415 
6416 		mdi_pi_unlock(pip);
6417 		pip = mdi_get_next_phci_path(cdip, pip);
6418 		ret_pip++;
6419 		count++;
6420 		done = (count >= num_elems);
6421 	}
6422 
6423 	ndi_devi_exit(cdip);
6424 
6425 	return (MDI_SUCCESS);
6426 }
6427 
6428 
6429 /*
6430  * Routine to get ioctl argument structure from userland.
6431  */
6432 /* ARGSUSED */
6433 static int
6434 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s)
6435 {
6436 	int	retval = 0;
6437 
6438 #ifdef  _MULTI_DATAMODEL
6439 	switch (ddi_model_convert_from(mode & FMODELS)) {
6440 	case DDI_MODEL_ILP32:
6441 	{
6442 		sv_iocdata32_t	ioc32;
6443 
6444 		if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6445 			retval = EFAULT;
6446 			break;
6447 		}
6448 		pioc->client	= (caddr_t)(uintptr_t)ioc32.client;
6449 		pioc->phci	= (caddr_t)(uintptr_t)ioc32.phci;
6450 		pioc->addr	= (caddr_t)(uintptr_t)ioc32.addr;
6451 		pioc->buf_elem	= (uint_t)ioc32.buf_elem;
6452 		pioc->ret_buf	= (sv_path_info_t *)(uintptr_t)ioc32.ret_buf;
6453 		pioc->ret_elem	= (uint_t *)(uintptr_t)ioc32.ret_elem;
6454 		break;
6455 	}
6456 
6457 	case DDI_MODEL_NONE:
6458 		if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6459 			retval = EFAULT;
6460 			break;
6461 		}
6462 		break;
6463 	}
6464 #else   /* _MULTI_DATAMODEL */
6465 	if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6466 		retval = EFAULT;
6467 	}
6468 #endif  /* _MULTI_DATAMODEL */
6469 
6470 #ifdef DEBUG
6471 	if (retval) {
6472 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6473 		    "iocdata copyin failed", s));
6474 	}
6475 #endif
6476 
6477 	return (retval);
6478 }
6479 
6480 
6481 /*
6482  * Routine to get the ioctl argument for ioctl causing controller switchover.
6483  */
6484 /* ARGSUSED */
6485 static int
6486 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc,
6487     int mode, caddr_t s)
6488 {
6489 	int	retval = 0;
6490 
6491 #ifdef  _MULTI_DATAMODEL
6492 	switch (ddi_model_convert_from(mode & FMODELS)) {
6493 	case DDI_MODEL_ILP32:
6494 	{
6495 		sv_switch_to_cntlr_iocdata32_t	ioc32;
6496 
6497 		if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6498 			retval = EFAULT;
6499 			break;
6500 		}
6501 		piocsc->client	= (caddr_t)(uintptr_t)ioc32.client;
6502 		piocsc->class	= (caddr_t)(uintptr_t)ioc32.class;
6503 		break;
6504 	}
6505 
6506 	case DDI_MODEL_NONE:
6507 		if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6508 			retval = EFAULT;
6509 		}
6510 		break;
6511 	}
6512 #else   /* _MULTI_DATAMODEL */
6513 	if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6514 		retval = EFAULT;
6515 	}
6516 #endif  /* _MULTI_DATAMODEL */
6517 
6518 #ifdef DEBUG
6519 	if (retval) {
6520 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6521 		    "switch_to_cntlr_iocdata copyin failed", s));
6522 	}
6523 #endif
6524 
6525 	return (retval);
6526 }
6527 
6528 
6529 /*
6530  * Routine to allocate memory for the path information structures.
6531  * It allocates two chunks of memory - one for keeping userland
6532  * pointers/values for path information and path properties, second for
6533  * keeping allocating kernel memory for path properties. These path
6534  * properties are finally copied to userland.
6535  */
6536 /* ARGSUSED */
6537 static int
6538 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf,
6539     uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6540 {
6541 	sv_path_info_t	*pi;
6542 	uint_t		bufsize;
6543 	int		retval = 0;
6544 	int		index;
6545 
6546 	/* Allocate memory */
6547 	*upibuf = (sv_path_info_t *)
6548 	    kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6549 	ASSERT(*upibuf != NULL);
6550 	*kpibuf = (sv_path_info_t *)
6551 	    kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6552 	ASSERT(*kpibuf != NULL);
6553 
6554 	/*
6555 	 * Get the path info structure from the user space.
6556 	 * We are interested in the following fields:
6557 	 *	- user size of buffer for per path properties.
6558 	 *	- user address of buffer for path info properties.
6559 	 *	- user pointer for returning actual buffer size
6560 	 * Keep these fields in the 'upibuf' structures.
6561 	 * Allocate buffer for per path info properties in kernel
6562 	 * structure ('kpibuf').
6563 	 * Size of these buffers will be equal to the size of buffers
6564 	 * in the user space.
6565 	 */
6566 #ifdef  _MULTI_DATAMODEL
6567 	switch (ddi_model_convert_from(mode & FMODELS)) {
6568 	case DDI_MODEL_ILP32:
6569 	{
6570 		sv_path_info32_t	*src;
6571 		sv_path_info32_t	pi32;
6572 
6573 		src  = (sv_path_info32_t *)pioc->ret_buf;
6574 		pi = (sv_path_info_t *)*upibuf;
6575 		for (index = 0; index < num_paths; index++, src++, pi++) {
6576 			if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) {
6577 				retval = EFAULT;
6578 				break;
6579 			}
6580 
6581 			pi->ret_prop.buf_size	=
6582 			    (uint_t)pi32.ret_prop.buf_size;
6583 			pi->ret_prop.ret_buf_size =
6584 			    (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size;
6585 			pi->ret_prop.buf	=
6586 			    (caddr_t)(uintptr_t)pi32.ret_prop.buf;
6587 		}
6588 		break;
6589 	}
6590 
6591 	case DDI_MODEL_NONE:
6592 		if (ddi_copyin(pioc->ret_buf, *upibuf,
6593 		    sizeof (sv_path_info_t) * num_paths, mode)) {
6594 			retval = EFAULT;
6595 		}
6596 		break;
6597 	}
6598 #else   /* _MULTI_DATAMODEL */
6599 	if (ddi_copyin(pioc->ret_buf, *upibuf,
6600 	    sizeof (sv_path_info_t) * num_paths, mode)) {
6601 		retval = EFAULT;
6602 	}
6603 #endif  /* _MULTI_DATAMODEL */
6604 
6605 	if (retval != 0) {
6606 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: "
6607 		    "ioctl <%s> normal: path_info copyin failed", s));
6608 		kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths);
6609 		kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths);
6610 		*upibuf = NULL;
6611 		*kpibuf = NULL;
6612 		return (retval);
6613 	}
6614 
6615 	/*
6616 	 * Allocate memory for per path properties.
6617 	 */
6618 	for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) {
6619 		bufsize = (*upibuf)[index].ret_prop.buf_size;
6620 
6621 		if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) {
6622 			pi->ret_prop.buf_size = bufsize;
6623 			pi->ret_prop.buf = (caddr_t)
6624 			    kmem_zalloc(bufsize, KM_SLEEP);
6625 			ASSERT(pi->ret_prop.buf != NULL);
6626 		} else {
6627 			pi->ret_prop.buf_size = 0;
6628 			pi->ret_prop.buf = NULL;
6629 		}
6630 
6631 		if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) {
6632 			pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc(
6633 			    sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP);
6634 			ASSERT(pi->ret_prop.ret_buf_size != NULL);
6635 		} else {
6636 			pi->ret_prop.ret_buf_size = NULL;
6637 		}
6638 	}
6639 
6640 	return (0);
6641 }
6642 
6643 
6644 /*
6645  * Routine to free memory for the path information structures.
6646  * This is the memory which was allocated earlier.
6647  */
6648 /* ARGSUSED */
6649 static void
6650 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6651     uint_t num_paths)
6652 {
6653 	sv_path_info_t	*pi;
6654 	int		index;
6655 
6656 	/* Free memory for per path properties */
6657 	for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) {
6658 		if (pi->ret_prop.ret_buf_size != NULL) {
6659 			kmem_free(pi->ret_prop.ret_buf_size,
6660 			    sizeof (*pi->ret_prop.ret_buf_size));
6661 		}
6662 
6663 		if (pi->ret_prop.buf != NULL) {
6664 			kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size);
6665 		}
6666 	}
6667 
6668 	/* Free memory for path info structures */
6669 	kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths);
6670 	kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths);
6671 }
6672 
6673 
6674 /*
6675  * Routine to copy path information and path properties to userland.
6676  */
6677 /* ARGSUSED */
6678 static int
6679 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6680     uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6681 {
6682 	int			retval = 0, index;
6683 	sv_path_info_t		*upi_ptr;
6684 	sv_path_info32_t	*upi32_ptr;
6685 
6686 #ifdef  _MULTI_DATAMODEL
6687 	switch (ddi_model_convert_from(mode & FMODELS)) {
6688 	case DDI_MODEL_ILP32:
6689 		goto copy_32bit;
6690 
6691 	case DDI_MODEL_NONE:
6692 		goto copy_normal;
6693 	}
6694 #else   /* _MULTI_DATAMODEL */
6695 
6696 	goto copy_normal;
6697 
6698 #endif  /* _MULTI_DATAMODEL */
6699 
6700 copy_normal:
6701 
6702 	/*
6703 	 * Copy path information and path properties to user land.
6704 	 * Pointer fields inside the path property structure were
6705 	 * saved in the 'upibuf' structure earlier.
6706 	 */
6707 	upi_ptr = pioc->ret_buf;
6708 	for (index = 0; index < num_paths; index++) {
6709 		if (ddi_copyout(kpibuf[index].device.ret_ct,
6710 		    upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6711 			retval = EFAULT;
6712 			break;
6713 		}
6714 
6715 		if (ddi_copyout(kpibuf[index].ret_addr,
6716 		    upi_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6717 			retval = EFAULT;
6718 			break;
6719 		}
6720 
6721 		if (ddi_copyout(&kpibuf[index].ret_state,
6722 		    &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state),
6723 		    mode)) {
6724 			retval = EFAULT;
6725 			break;
6726 		}
6727 
6728 		if (ddi_copyout(&kpibuf[index].ret_ext_state,
6729 		    &upi_ptr[index].ret_ext_state,
6730 		    sizeof (kpibuf[index].ret_ext_state), mode)) {
6731 			retval = EFAULT;
6732 			break;
6733 		}
6734 
6735 		if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6736 		    ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6737 		    upibuf[index].ret_prop.ret_buf_size,
6738 		    sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6739 			retval = EFAULT;
6740 			break;
6741 		}
6742 
6743 		if ((kpibuf[index].ret_prop.buf != NULL) &&
6744 		    ddi_copyout(kpibuf[index].ret_prop.buf,
6745 		    upibuf[index].ret_prop.buf,
6746 		    upibuf[index].ret_prop.buf_size, mode)) {
6747 			retval = EFAULT;
6748 			break;
6749 		}
6750 	}
6751 
6752 #ifdef DEBUG
6753 	if (retval) {
6754 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6755 		    "normal: path_info copyout failed", s));
6756 	}
6757 #endif
6758 
6759 	return (retval);
6760 
6761 copy_32bit:
6762 	/*
6763 	 * Copy path information and path properties to user land.
6764 	 * Pointer fields inside the path property structure were
6765 	 * saved in the 'upibuf' structure earlier.
6766 	 */
6767 	upi32_ptr = (sv_path_info32_t *)pioc->ret_buf;
6768 	for (index = 0; index < num_paths; index++) {
6769 		if (ddi_copyout(kpibuf[index].device.ret_ct,
6770 		    upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6771 			retval = EFAULT;
6772 			break;
6773 		}
6774 
6775 		if (ddi_copyout(kpibuf[index].ret_addr,
6776 		    upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6777 			retval = EFAULT;
6778 			break;
6779 		}
6780 
6781 		if (ddi_copyout(&kpibuf[index].ret_state,
6782 		    &upi32_ptr[index].ret_state,
6783 		    sizeof (kpibuf[index].ret_state), mode)) {
6784 			retval = EFAULT;
6785 			break;
6786 		}
6787 
6788 		if (ddi_copyout(&kpibuf[index].ret_ext_state,
6789 		    &upi32_ptr[index].ret_ext_state,
6790 		    sizeof (kpibuf[index].ret_ext_state), mode)) {
6791 			retval = EFAULT;
6792 			break;
6793 		}
6794 		if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6795 		    ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6796 		    upibuf[index].ret_prop.ret_buf_size,
6797 		    sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6798 			retval = EFAULT;
6799 			break;
6800 		}
6801 
6802 		if ((kpibuf[index].ret_prop.buf != NULL) &&
6803 		    ddi_copyout(kpibuf[index].ret_prop.buf,
6804 		    upibuf[index].ret_prop.buf,
6805 		    upibuf[index].ret_prop.buf_size, mode)) {
6806 			retval = EFAULT;
6807 			break;
6808 		}
6809 	}
6810 
6811 #ifdef DEBUG
6812 	if (retval) {
6813 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6814 		    "normal: path_info copyout failed", s));
6815 	}
6816 #endif
6817 
6818 	return (retval);
6819 }
6820 
6821 
6822 /*
6823  * vhci_failover()
6824  * This routine expects VHCI_HOLD_LUN before being invoked.  It can be invoked
6825  * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC.  For Asynchronous failovers
6826  * this routine shall VHCI_RELEASE_LUN on exiting.  For synchronous failovers
6827  * it is the callers responsibility to release lun.
6828  */
6829 
6830 /* ARGSUSED */
6831 static int
6832 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags)
6833 {
6834 	char			*guid;
6835 	scsi_vhci_lun_t		*vlun = NULL;
6836 	struct scsi_vhci	*vhci;
6837 	mdi_pathinfo_t		*pip, *npip;
6838 	char			*s_pclass, *pclass1, *pclass2, *pclass;
6839 	char			active_pclass_copy[255], *active_pclass_ptr;
6840 	char			*ptr1, *ptr2;
6841 	mdi_pathinfo_state_t	pi_state;
6842 	uint32_t		pi_ext_state;
6843 	scsi_vhci_priv_t	*svp;
6844 	struct scsi_device	*sd;
6845 	struct scsi_failover_ops	*sfo;
6846 	int			sps; /* mdi_select_path() status */
6847 	int			activation_done = 0;
6848 	int			rval, retval = MDI_FAILURE;
6849 	int			reserve_pending, check_condition, UA_condition;
6850 	struct scsi_pkt		*pkt;
6851 	struct buf		*bp;
6852 
6853 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
6854 	sd = ddi_get_driver_private(cdip);
6855 	vlun = ADDR2VLUN(&sd->sd_address);
6856 	ASSERT(vlun != 0);
6857 	ASSERT(VHCI_LUN_IS_HELD(vlun));
6858 	guid = vlun->svl_lun_wwn;
6859 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid));
6860 	vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s "
6861 	    "(GUID %s)", ddi_node_name(cdip), guid);
6862 
6863 	/*
6864 	 * Lets maintain a local copy of the vlun->svl_active_pclass
6865 	 * for the rest of the processing. Accessing the field
6866 	 * directly in the loop below causes loop logic to break
6867 	 * especially when the field gets updated by other threads
6868 	 * update path status etc and causes 'paths are not currently
6869 	 * available' condition to be declared prematurely.
6870 	 */
6871 	mutex_enter(&vlun->svl_mutex);
6872 	if (vlun->svl_active_pclass != NULL) {
6873 		(void) strlcpy(active_pclass_copy, vlun->svl_active_pclass,
6874 		    sizeof (active_pclass_copy));
6875 		active_pclass_ptr = &active_pclass_copy[0];
6876 		mutex_exit(&vlun->svl_mutex);
6877 		if (vhci_quiesce_paths(vdip, cdip, vlun, guid,
6878 		    active_pclass_ptr) != 0) {
6879 			retval = MDI_FAILURE;
6880 		}
6881 	} else {
6882 		/*
6883 		 * can happen only when the available path to device
6884 		 * discovered is a STANDBY path.
6885 		 */
6886 		mutex_exit(&vlun->svl_mutex);
6887 		active_pclass_copy[0] = '\0';
6888 		active_pclass_ptr = NULL;
6889 	}
6890 
6891 	sfo = vlun->svl_fops;
6892 	ASSERT(sfo != NULL);
6893 	pclass1 = s_pclass = active_pclass_ptr;
6894 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid,
6895 	    (s_pclass == NULL ? "<none>" : s_pclass)));
6896 
6897 next_pathclass:
6898 
6899 	rval = sfo->sfo_pathclass_next(pclass1, &pclass2,
6900 	    vlun->svl_fops_ctpriv);
6901 	if (rval == ENOENT) {
6902 		if (s_pclass == NULL) {
6903 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): "
6904 			    "failed, no more pathclasses\n", guid));
6905 			goto done;
6906 		} else {
6907 			(void) sfo->sfo_pathclass_next(NULL, &pclass2,
6908 			    vlun->svl_fops_ctpriv);
6909 		}
6910 	} else if (rval == EINVAL) {
6911 		vhci_log(CE_NOTE, vdip, "!Failover operation failed for "
6912 		    "device %s (GUID %s): Invalid path-class %s",
6913 		    ddi_node_name(cdip), guid,
6914 		    ((pclass1 == NULL) ? "<none>" : pclass1));
6915 		goto done;
6916 	}
6917 	if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) {
6918 		/*
6919 		 * paths are not currently available
6920 		 */
6921 		vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable"
6922 		    " for device %s (GUID %s)",
6923 		    ddi_node_name(cdip), guid);
6924 		goto done;
6925 	}
6926 	pip = npip = NULL;
6927 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering "
6928 	    "%s as failover destination\n", guid, pclass2));
6929 	sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip);
6930 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
6931 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no "
6932 		    "STANDBY paths found (status:%x)!\n", guid, sps));
6933 		pclass1 = pclass2;
6934 		goto next_pathclass;
6935 	}
6936 	do {
6937 		pclass = NULL;
6938 		if ((mdi_prop_lookup_string(npip, "path-class",
6939 		    &pclass) != MDI_SUCCESS) || (strcmp(pclass2,
6940 		    pclass) != 0)) {
6941 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6942 			    "!vhci_failover(5.5)(%s): skipping path "
6943 			    "%p(%s)...\n", guid, (void *)npip, pclass));
6944 			pip = npip;
6945 			sps = mdi_select_path(cdip, NULL,
6946 			    MDI_SELECT_STANDBY_PATH, pip, &npip);
6947 			mdi_rele_path(pip);
6948 			(void) mdi_prop_free(pclass);
6949 			continue;
6950 		}
6951 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
6952 
6953 		/*
6954 		 * Issue READ at non-zer block on this STANDBY path.
6955 		 * Purple returns
6956 		 * 1. RESERVATION_CONFLICT if reservation is pending
6957 		 * 2. POR check condition if it reset happened.
6958 		 * 2. failover Check Conditions if one is already in progress.
6959 		 */
6960 		reserve_pending = 0;
6961 		check_condition = 0;
6962 		UA_condition = 0;
6963 
6964 		bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
6965 		    (struct buf *)NULL, DEV_BSIZE, B_READ, NULL, NULL);
6966 		if (!bp) {
6967 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6968 			    "vhci_failover !No resources (buf)\n"));
6969 			mdi_rele_path(npip);
6970 			goto done;
6971 		}
6972 		pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
6973 		    CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
6974 		    PKT_CONSISTENT, NULL, NULL);
6975 		if (pkt) {
6976 			(void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t)
6977 			    pkt->pkt_cdbp, SCMD_READ, 1, 1, 0);
6978 			pkt->pkt_flags = FLAG_NOINTR;
6979 check_path_again:
6980 			pkt->pkt_path_instance = mdi_pi_get_path_instance(npip);
6981 			pkt->pkt_time = 3 * 30;
6982 
6983 			if (scsi_transport(pkt) == TRAN_ACCEPT) {
6984 				switch (pkt->pkt_reason) {
6985 				case CMD_CMPLT:
6986 					switch (SCBP_C(pkt)) {
6987 					case STATUS_GOOD:
6988 						/* Already failed over */
6989 						activation_done = 1;
6990 						break;
6991 					case STATUS_RESERVATION_CONFLICT:
6992 						reserve_pending = 1;
6993 						break;
6994 					case STATUS_CHECK:
6995 						check_condition = 1;
6996 						break;
6997 					}
6998 				}
6999 			}
7000 			if (check_condition &&
7001 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
7002 				uint8_t *sns, skey, asc, ascq;
7003 				sns = (uint8_t *)
7004 				    &(((struct scsi_arq_status *)(uintptr_t)
7005 				    (pkt->pkt_scbp))->sts_sensedata);
7006 				skey = scsi_sense_key(sns);
7007 				asc = scsi_sense_asc(sns);
7008 				ascq = scsi_sense_ascq(sns);
7009 				if (skey == KEY_UNIT_ATTENTION &&
7010 				    asc == 0x29) {
7011 					/* Already failed over */
7012 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7013 					    "!vhci_failover(7)(%s): "
7014 					    "path 0x%p POR UA condition\n",
7015 					    guid, (void *)npip));
7016 					if (UA_condition == 0) {
7017 						UA_condition = 1;
7018 						goto check_path_again;
7019 					}
7020 				} else {
7021 					activation_done = 0;
7022 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7023 					    "!vhci_failover(%s): path 0x%p "
7024 					    "unhandled chkcond %x %x %x\n",
7025 					    guid, (void *)npip, skey,
7026 					    asc, ascq));
7027 				}
7028 			}
7029 			scsi_destroy_pkt(pkt);
7030 		}
7031 		scsi_free_consistent_buf(bp);
7032 
7033 		if (activation_done) {
7034 			mdi_rele_path(npip);
7035 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
7036 			    "path 0x%p already failedover\n", guid,
7037 			    (void *)npip));
7038 			break;
7039 		}
7040 		if (reserve_pending && (vlun->svl_xlf_capable == 0)) {
7041 			(void) vhci_recovery_reset(vlun,
7042 			    &svp->svp_psd->sd_address,
7043 			    FALSE, VHCI_DEPTH_ALL);
7044 		}
7045 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): "
7046 		    "activating path 0x%p(psd:%p)\n", guid, (void *)npip,
7047 		    (void *)svp->svp_psd));
7048 		if (sfo->sfo_path_activate(svp->svp_psd, pclass2,
7049 		    vlun->svl_fops_ctpriv) == 0) {
7050 			activation_done = 1;
7051 			mdi_rele_path(npip);
7052 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
7053 			    "path 0x%p successfully activated\n", guid,
7054 			    (void *)npip));
7055 			break;
7056 		}
7057 		pip = npip;
7058 		sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH,
7059 		    pip, &npip);
7060 		mdi_rele_path(pip);
7061 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
7062 	if (activation_done == 0) {
7063 		pclass1 = pclass2;
7064 		goto next_pathclass;
7065 	}
7066 
7067 	/*
7068 	 * if we are here, we have succeeded in activating path npip of
7069 	 * pathclass pclass2; let us validate all paths of pclass2 by
7070 	 * "ping"-ing each one and mark the good ones ONLINE
7071 	 * Also, set the state of the paths belonging to the previously
7072 	 * active pathclass to STANDBY
7073 	 */
7074 	pip = npip = NULL;
7075 	sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
7076 	    MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH),
7077 	    NULL, &npip);
7078 	if (npip == NULL || sps != MDI_SUCCESS) {
7079 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for "
7080 		    "device %s (GUID %s): paths may be busy\n",
7081 		    ddi_node_name(cdip), guid));
7082 		goto done;
7083 	}
7084 	do {
7085 		(void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state);
7086 		if (mdi_prop_lookup_string(npip, "path-class", &pclass)
7087 		    != MDI_SUCCESS) {
7088 			pip = npip;
7089 			sps = mdi_select_path(cdip, NULL,
7090 			    (MDI_SELECT_ONLINE_PATH |
7091 			    MDI_SELECT_STANDBY_PATH |
7092 			    MDI_SELECT_USER_DISABLE_PATH),
7093 			    pip, &npip);
7094 			mdi_rele_path(pip);
7095 			continue;
7096 		}
7097 		if (strcmp(pclass, pclass2) == 0) {
7098 			if (pi_state == MDI_PATHINFO_STATE_STANDBY) {
7099 				svp = (scsi_vhci_priv_t *)
7100 				    mdi_pi_get_vhci_private(npip);
7101 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7102 				    "!vhci_failover(8)(%s): "
7103 				    "pinging path 0x%p\n",
7104 				    guid, (void *)npip));
7105 				if (sfo->sfo_path_ping(svp->svp_psd,
7106 				    vlun->svl_fops_ctpriv) == 1) {
7107 					mdi_pi_set_state(npip,
7108 					    MDI_PATHINFO_STATE_ONLINE);
7109 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7110 					    "!vhci_failover(9)(%s): "
7111 					    "path 0x%p ping successful, "
7112 					    "marked online\n", guid,
7113 					    (void *)npip));
7114 					MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO);
7115 				}
7116 			}
7117 		} else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass)
7118 		    == 0)) {
7119 			if (pi_state == MDI_PATHINFO_STATE_ONLINE) {
7120 				mdi_pi_set_state(npip,
7121 				    MDI_PATHINFO_STATE_STANDBY);
7122 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7123 				    "!vhci_failover(10)(%s): path 0x%p marked "
7124 				    "STANDBY\n", guid, (void *)npip));
7125 				MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM);
7126 			}
7127 		}
7128 		(void) mdi_prop_free(pclass);
7129 		pip = npip;
7130 		sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
7131 		    MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH),
7132 		    pip, &npip);
7133 		mdi_rele_path(pip);
7134 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
7135 
7136 	/*
7137 	 * Update the AccessState of related MP-API TPGs
7138 	 */
7139 	(void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
7140 
7141 	vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully "
7142 	    "for device %s (GUID %s): failed over from %s to %s",
7143 	    ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" :
7144 	    s_pclass), pclass2);
7145 	ptr1 = kmem_alloc(strlen(pclass2) + 1, KM_SLEEP);
7146 	(void) strlcpy(ptr1, pclass2, (strlen(pclass2) + 1));
7147 	mutex_enter(&vlun->svl_mutex);
7148 	ptr2 = vlun->svl_active_pclass;
7149 	vlun->svl_active_pclass = ptr1;
7150 	mutex_exit(&vlun->svl_mutex);
7151 	if (ptr2) {
7152 		kmem_free(ptr2, strlen(ptr2) + 1);
7153 	}
7154 	mutex_enter(&vhci->vhci_mutex);
7155 	scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
7156 	    &vhci->vhci_reset_notify_listf);
7157 	/* All reservations are cleared upon these resets. */
7158 	vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
7159 	mutex_exit(&vhci->vhci_mutex);
7160 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active "
7161 	    "pathclass for %s is now %s\n", guid, pclass2));
7162 	retval = MDI_SUCCESS;
7163 
7164 done:
7165 	vlun->svl_failover_status = retval;
7166 	if (flags == MDI_FAILOVER_ASYNC) {
7167 		VHCI_RELEASE_LUN(vlun);
7168 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
7169 		    "releasing lun, as failover was ASYNC\n"));
7170 	} else {
7171 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
7172 		    "NOT releasing lun, as failover was SYNC\n"));
7173 	}
7174 	return (retval);
7175 }
7176 
7177 /*
7178  * vhci_client_attached is called after the successful attach of a
7179  * client devinfo node.
7180  */
7181 static void
7182 vhci_client_attached(dev_info_t *cdip)
7183 {
7184 	mdi_pathinfo_t	*pip;
7185 
7186 	/*
7187 	 * At this point the client has attached and it's instance number is
7188 	 * valid, so we can set up kstats.  We need to do this here because it
7189 	 * is possible for paths to go online prior to client attach, in which
7190 	 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online
7191 	 * was a noop.
7192 	 */
7193 	ndi_devi_enter(cdip);
7194 	for (pip = mdi_get_next_phci_path(cdip, NULL); pip;
7195 	    pip = mdi_get_next_phci_path(cdip, pip))
7196 		vhci_kstat_create_pathinfo(pip);
7197 	ndi_devi_exit(cdip);
7198 }
7199 
7200 /*
7201  * quiesce all of the online paths
7202  */
7203 static int
7204 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun,
7205     char *guid, char *active_pclass_ptr)
7206 {
7207 	scsi_vhci_priv_t	*svp;
7208 	char			*s_pclass = NULL;
7209 	mdi_pathinfo_t		*npip, *pip;
7210 	int			sps;
7211 
7212 	/* quiesce currently active paths */
7213 	s_pclass = NULL;
7214 	pip = npip = NULL;
7215 	sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip);
7216 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
7217 		return (1);
7218 	}
7219 	do {
7220 		if (mdi_prop_lookup_string(npip, "path-class",
7221 		    &s_pclass) != MDI_SUCCESS) {
7222 			mdi_rele_path(npip);
7223 			vhci_log(CE_NOTE, vdip, "!Failover operation failed "
7224 			    "for device %s (GUID %s) due to an internal "
7225 			    "error", ddi_node_name(cdip), guid);
7226 			return (1);
7227 		}
7228 		if (strcmp(s_pclass, active_pclass_ptr) == 0) {
7229 			/*
7230 			 * quiesce path. Free s_pclass since
7231 			 * we don't need it anymore
7232 			 */
7233 			VHCI_DEBUG(1, (CE_NOTE, NULL,
7234 			    "!vhci_failover(2)(%s): failing over "
7235 			    "from %s; quiescing path %p\n",
7236 			    guid, s_pclass, (void *)npip));
7237 			(void) mdi_prop_free(s_pclass);
7238 			svp = (scsi_vhci_priv_t *)
7239 			    mdi_pi_get_vhci_private(npip);
7240 			if (svp == NULL) {
7241 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7242 				    "!vhci_failover(2.5)(%s): no "
7243 				    "client priv! %p offlined?\n",
7244 				    guid, (void *)npip));
7245 				pip = npip;
7246 				sps = mdi_select_path(cdip, NULL,
7247 				    MDI_SELECT_ONLINE_PATH, pip, &npip);
7248 				mdi_rele_path(pip);
7249 				continue;
7250 			}
7251 			if (scsi_abort(&svp->svp_psd->sd_address, NULL)
7252 			    == 0) {
7253 				(void) vhci_recovery_reset(vlun,
7254 				    &svp->svp_psd->sd_address, FALSE,
7255 				    VHCI_DEPTH_TARGET);
7256 			}
7257 			mutex_enter(&svp->svp_mutex);
7258 			if (svp->svp_cmds == 0) {
7259 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7260 				    "!vhci_failover(3)(%s):"
7261 				    "quiesced path %p\n", guid, (void *)npip));
7262 			} else {
7263 				while (svp->svp_cmds != 0) {
7264 					cv_wait(&svp->svp_cv, &svp->svp_mutex);
7265 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7266 					    "!vhci_failover(3.cv)(%s):"
7267 					    "quiesced path %p\n", guid,
7268 					    (void *)npip));
7269 				}
7270 			}
7271 			mutex_exit(&svp->svp_mutex);
7272 		} else {
7273 			/*
7274 			 * make sure we freeup the memory
7275 			 */
7276 			(void) mdi_prop_free(s_pclass);
7277 		}
7278 		pip = npip;
7279 		sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH,
7280 		    pip, &npip);
7281 		mdi_rele_path(pip);
7282 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
7283 	return (0);
7284 }
7285 
7286 static struct scsi_vhci_lun *
7287 vhci_lun_lookup(dev_info_t *tgt_dip)
7288 {
7289 	return ((struct scsi_vhci_lun *)
7290 	    mdi_client_get_vhci_private(tgt_dip));
7291 }
7292 
7293 static struct scsi_vhci_lun *
7294 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc)
7295 {
7296 	struct scsi_vhci_lun *svl;
7297 
7298 	if (svl = vhci_lun_lookup(tgt_dip)) {
7299 		return (svl);
7300 	}
7301 
7302 	svl = kmem_zalloc(sizeof (*svl), KM_SLEEP);
7303 	svl->svl_lun_wwn = kmem_zalloc(strlen(guid) + 1, KM_SLEEP);
7304 	(void) strcpy(svl->svl_lun_wwn,  guid);
7305 	mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL);
7306 	cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL);
7307 	sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL);
7308 	svl->svl_waiting_for_activepath = 1;
7309 	svl->svl_sector_size = 1;
7310 	mdi_client_set_vhci_private(tgt_dip, svl);
7311 	*didalloc = 1;
7312 	VHCI_DEBUG(1, (CE_NOTE, NULL,
7313 	    "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n",
7314 	    guid, (void *)svl));
7315 	return (svl);
7316 }
7317 
7318 static void
7319 vhci_lun_free(struct scsi_vhci_lun *dvlp, struct scsi_device *sd)
7320 {
7321 	char *guid;
7322 
7323 	guid = dvlp->svl_lun_wwn;
7324 	ASSERT(guid != NULL);
7325 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid));
7326 
7327 	mutex_enter(&dvlp->svl_mutex);
7328 	if (dvlp->svl_active_pclass != NULL) {
7329 		kmem_free(dvlp->svl_active_pclass,
7330 		    strlen(dvlp->svl_active_pclass) + 1);
7331 	}
7332 	dvlp->svl_active_pclass = NULL;
7333 	mutex_exit(&dvlp->svl_mutex);
7334 
7335 	if (dvlp->svl_lun_wwn != NULL) {
7336 		kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn) + 1);
7337 	}
7338 	dvlp->svl_lun_wwn = NULL;
7339 
7340 	if (dvlp->svl_fops_name) {
7341 		kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name) + 1);
7342 	}
7343 	dvlp->svl_fops_name = NULL;
7344 
7345 	if (dvlp->svl_fops_ctpriv != NULL &&
7346 	    dvlp->svl_fops != NULL) {
7347 		dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv);
7348 	}
7349 
7350 	if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG)
7351 		taskq_destroy(dvlp->svl_taskq);
7352 
7353 	mutex_destroy(&dvlp->svl_mutex);
7354 	cv_destroy(&dvlp->svl_cv);
7355 	sema_destroy(&dvlp->svl_pgr_sema);
7356 	kmem_free(dvlp, sizeof (*dvlp));
7357 	/*
7358 	 * vhci_lun_free may be called before the tgt_dip
7359 	 * initialization so check if the sd is NULL.
7360 	 */
7361 	if (sd != NULL)
7362 		scsi_device_hba_private_set(sd, NULL);
7363 }
7364 
7365 int
7366 vhci_do_scsi_cmd(struct scsi_pkt *pkt)
7367 {
7368 	int	err = 0;
7369 	int	retry_cnt = 0;
7370 	uint8_t	*sns, skey;
7371 
7372 #ifdef DEBUG
7373 	if (vhci_debug > 5) {
7374 		vhci_print_cdb(pkt->pkt_address.a_hba_tran->tran_hba_dip,
7375 		    CE_WARN, "Vhci command", pkt->pkt_cdbp);
7376 	}
7377 #endif
7378 
7379 retry:
7380 	err = scsi_poll(pkt);
7381 	if (err) {
7382 		if (pkt->pkt_cdbp[0] == SCMD_RELEASE) {
7383 			if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) {
7384 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7385 				    "!v_s_do_s_c: RELEASE conflict\n"));
7386 				return (0);
7387 			}
7388 		}
7389 		if (retry_cnt++ < 6) {
7390 			VHCI_DEBUG(1, (CE_WARN, NULL,
7391 			    "!v_s_do_s_c:retry packet 0x%p "
7392 			    "status 0x%x reason %s",
7393 			    (void *)pkt, SCBP_C(pkt),
7394 			    scsi_rname(pkt->pkt_reason)));
7395 			if ((pkt->pkt_reason == CMD_CMPLT) &&
7396 			    (SCBP_C(pkt) == STATUS_CHECK) &&
7397 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
7398 				sns = (uint8_t *)
7399 				    &(((struct scsi_arq_status *)(uintptr_t)
7400 				    (pkt->pkt_scbp))->sts_sensedata);
7401 				skey = scsi_sense_key(sns);
7402 				VHCI_DEBUG(1, (CE_WARN, NULL,
7403 				    "!v_s_do_s_c:retry "
7404 				    "packet 0x%p  sense data %s", (void *)pkt,
7405 				    scsi_sname(skey)));
7406 			}
7407 			goto retry;
7408 		}
7409 		VHCI_DEBUG(1, (CE_WARN, NULL,
7410 		    "!v_s_do_s_c: failed transport 0x%p 0x%x",
7411 		    (void *)pkt, SCBP_C(pkt)));
7412 		return (0);
7413 	}
7414 
7415 	switch (pkt->pkt_reason) {
7416 		case CMD_TIMEOUT:
7417 			VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed "
7418 			    "out (pkt 0x%p)", (void *)pkt));
7419 			return (0);
7420 		case CMD_CMPLT:
7421 			switch (SCBP_C(pkt)) {
7422 				case STATUS_GOOD:
7423 					break;
7424 				case STATUS_CHECK:
7425 					if (pkt->pkt_state & STATE_ARQ_DONE) {
7426 						sns = (uint8_t *)&(((
7427 						    struct scsi_arq_status *)
7428 						    (uintptr_t)
7429 						    (pkt->pkt_scbp))->
7430 						    sts_sensedata);
7431 						skey = scsi_sense_key(sns);
7432 						if ((skey ==
7433 						    KEY_UNIT_ATTENTION) ||
7434 						    (skey ==
7435 						    KEY_NOT_READY)) {
7436 							/*
7437 							 * clear unit attn.
7438 							 */
7439 
7440 							VHCI_DEBUG(1,
7441 							    (CE_WARN, NULL,
7442 							    "!v_s_do_s_c: "
7443 							    "retry "
7444 							    "packet 0x%p sense "
7445 							    "data %s",
7446 							    (void *)pkt,
7447 							    scsi_sname
7448 							    (skey)));
7449 							goto retry;
7450 						}
7451 						VHCI_DEBUG(4, (CE_WARN, NULL,
7452 						    "!ARQ while "
7453 						    "transporting "
7454 						    "(pkt 0x%p)",
7455 						    (void *)pkt));
7456 						return (0);
7457 					}
7458 					return (0);
7459 				default:
7460 					VHCI_DEBUG(1, (CE_WARN, NULL,
7461 					    "!Bad status returned "
7462 					    "(pkt 0x%p, status %x)",
7463 					    (void *)pkt, SCBP_C(pkt)));
7464 					return (0);
7465 			}
7466 			break;
7467 		case CMD_INCOMPLETE:
7468 		case CMD_RESET:
7469 		case CMD_ABORTED:
7470 		case CMD_TRAN_ERR:
7471 			if (retry_cnt++ < 1) {
7472 				VHCI_DEBUG(1, (CE_WARN, NULL,
7473 				    "!v_s_do_s_c: retry packet 0x%p %s",
7474 				    (void *)pkt, scsi_rname(pkt->pkt_reason)));
7475 				goto retry;
7476 			}
7477 			/* FALLTHROUGH */
7478 		default:
7479 			VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not "
7480 			    "complete successfully (pkt 0x%p,"
7481 			    "reason %x)", (void *)pkt, pkt->pkt_reason));
7482 			return (0);
7483 	}
7484 	return (1);
7485 }
7486 
7487 static int
7488 vhci_quiesce_lun(struct scsi_vhci_lun *vlun)
7489 {
7490 	mdi_pathinfo_t		*pip, *spip;
7491 	dev_info_t		*cdip;
7492 	struct scsi_vhci_priv	*svp;
7493 	mdi_pathinfo_state_t	pstate;
7494 	uint32_t		p_ext_state;
7495 
7496 	cdip = vlun->svl_dip;
7497 	pip = spip = NULL;
7498 	ndi_devi_enter(cdip);
7499 	pip = mdi_get_next_phci_path(cdip, NULL);
7500 	while (pip != NULL) {
7501 		(void) mdi_pi_get_state2(pip, &pstate, &p_ext_state);
7502 		if (pstate != MDI_PATHINFO_STATE_ONLINE) {
7503 			spip = pip;
7504 			pip = mdi_get_next_phci_path(cdip, spip);
7505 			continue;
7506 		}
7507 		mdi_hold_path(pip);
7508 		ndi_devi_exit(cdip);
7509 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
7510 		mutex_enter(&svp->svp_mutex);
7511 		while (svp->svp_cmds != 0) {
7512 			if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
7513 			    drv_usectohz(vhci_path_quiesce_timeout * 1000000),
7514 			    TR_CLOCK_TICK) == -1) {
7515 				mutex_exit(&svp->svp_mutex);
7516 				mdi_rele_path(pip);
7517 				VHCI_DEBUG(1, (CE_WARN, NULL,
7518 				    "Quiesce of lun is not successful "
7519 				    "vlun: 0x%p.", (void *)vlun));
7520 				return (0);
7521 			}
7522 		}
7523 		mutex_exit(&svp->svp_mutex);
7524 		ndi_devi_enter(cdip);
7525 		spip = pip;
7526 		pip = mdi_get_next_phci_path(cdip, spip);
7527 		mdi_rele_path(spip);
7528 	}
7529 	ndi_devi_exit(cdip);
7530 	return (1);
7531 }
7532 
7533 static int
7534 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp)
7535 {
7536 	scsi_vhci_lun_t		*vlun;
7537 	vhci_prout_t		*prout;
7538 	int			rval, success;
7539 	mdi_pathinfo_t		*pip, *npip;
7540 	scsi_vhci_priv_t	*osvp;
7541 	dev_info_t		*cdip;
7542 	uchar_t			cdb_1;
7543 	uchar_t			temp_res_key[MHIOC_RESV_KEY_SIZE];
7544 
7545 
7546 	/*
7547 	 * see if there are any other paths available; if none,
7548 	 * then there is nothing to do.
7549 	 */
7550 	cdip = svp->svp_svl->svl_dip;
7551 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7552 	    MDI_SELECT_STANDBY_PATH, NULL, &pip);
7553 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7554 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7555 		    "%s%d: vhci_pgr_validate_and_register: first path\n",
7556 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7557 		return (1);
7558 	}
7559 
7560 	vlun = svp->svp_svl;
7561 	prout = &vlun->svl_prout;
7562 	ASSERT(vlun->svl_pgr_active != 0);
7563 
7564 	/*
7565 	 * When the path was busy/offlined, some other host might have
7566 	 * cleared this key. Validate key on some other path first.
7567 	 * If it fails, return failure.
7568 	 */
7569 
7570 	npip = pip;
7571 	pip = NULL;
7572 	success = 0;
7573 
7574 	/* Save the res key */
7575 	bcopy(prout->res_key, temp_res_key, MHIOC_RESV_KEY_SIZE);
7576 
7577 	/*
7578 	 * Sometimes CDB from application can be a Register_And_Ignore.
7579 	 * Instead of validation, this cdb would result in force registration.
7580 	 * Convert it to normal cdb for validation.
7581 	 * After that be sure to restore the cdb.
7582 	 */
7583 	cdb_1 = vlun->svl_cdb[1];
7584 	vlun->svl_cdb[1] &= 0xe0;
7585 
7586 	do {
7587 		osvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
7588 		if (osvp == NULL) {
7589 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7590 			    "vhci_pgr_validate_and_register: no "
7591 			    "client priv! 0x%p offlined?\n",
7592 			    (void *)npip));
7593 			goto next_path_1;
7594 		}
7595 
7596 		if (osvp == svp) {
7597 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7598 			    "vhci_pgr_validate_and_register: same svp 0x%p"
7599 			    " npip 0x%p vlun 0x%p\n",
7600 			    (void *)svp, (void *)npip, (void *)vlun));
7601 			goto next_path_1;
7602 		}
7603 
7604 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7605 		    "vhci_pgr_validate_and_register: First validate on"
7606 		    " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy"
7607 		    " cdb1 %x\n", (void *)osvp, (void *)vlun,
7608 		    (void *)curthread, vlun->svl_cdb[1]));
7609 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:");
7610 
7611 		bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7612 
7613 		VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy",
7614 		    (void *)vlun));
7615 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7616 
7617 		rval = vhci_do_prout(osvp);
7618 		if (rval == 1) {
7619 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7620 			    "%s%d: vhci_pgr_validate_and_register: key"
7621 			    " validated thread 0x%p\n", ddi_driver_name(cdip),
7622 			    ddi_get_instance(cdip), (void *)curthread));
7623 			pip = npip;
7624 			success = 1;
7625 			break;
7626 		} else {
7627 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7628 			    "vhci_pgr_validate_and_register: First validation"
7629 			    " on osvp 0x%p failed %x\n", (void *)osvp, rval));
7630 			vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:");
7631 		}
7632 
7633 		/*
7634 		 * Try other paths
7635 		 */
7636 next_path_1:
7637 		pip = npip;
7638 		rval = mdi_select_path(cdip, NULL,
7639 		    MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
7640 		    pip, &npip);
7641 		mdi_rele_path(pip);
7642 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
7643 
7644 
7645 	/* Be sure to restore original cdb */
7646 	vlun->svl_cdb[1] = cdb_1;
7647 
7648 	/* Restore the res_key */
7649 	bcopy(temp_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7650 
7651 	/*
7652 	 * If key could not be registered on any path for the first time,
7653 	 * return success as online should still continue.
7654 	 */
7655 	if (success == 0) {
7656 		return (1);
7657 	}
7658 
7659 	ASSERT(pip != NULL);
7660 
7661 	/*
7662 	 * Force register on new path
7663 	 */
7664 	cdb_1 = vlun->svl_cdb[1];		/* store the cdb */
7665 
7666 	vlun->svl_cdb[1] &= 0xe0;
7667 	vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
7668 
7669 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: ");
7670 
7671 	bcopy(prout->active_service_key, prout->service_key,
7672 	    MHIOC_RESV_KEY_SIZE);
7673 	bcopy(prout->active_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7674 
7675 	vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: ");
7676 
7677 	rval = vhci_do_prout(svp);
7678 	vlun->svl_cdb[1] = cdb_1;		/* restore the cdb */
7679 	if (rval != 1) {
7680 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7681 		    "vhci_pgr_validate_and_register: register on new"
7682 		    " path 0x%p svp 0x%p failed %x\n",
7683 		    (void *)pip, (void *)svp, rval));
7684 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: ");
7685 		mdi_rele_path(pip);
7686 		return (0);
7687 	}
7688 
7689 	if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) {
7690 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7691 		    "vhci_pgr_validate_and_register: zero service key\n"));
7692 		mdi_rele_path(pip);
7693 		return (rval);
7694 	}
7695 
7696 	/*
7697 	 * While the key was force registered, some other host might have
7698 	 * cleared the key. Re-validate key on another pre-existing path
7699 	 * before declaring success.
7700 	 */
7701 	npip = pip;
7702 	pip = NULL;
7703 
7704 	/*
7705 	 * Sometimes CDB from application can be Register and Ignore.
7706 	 * Instead of validation, it would result in force registration.
7707 	 * Convert it to normal cdb for validation.
7708 	 * After that be sure to restore the cdb.
7709 	 */
7710 	cdb_1 = vlun->svl_cdb[1];
7711 	vlun->svl_cdb[1] &= 0xe0;
7712 	success = 0;
7713 
7714 	do {
7715 		osvp = (scsi_vhci_priv_t *)
7716 		    mdi_pi_get_vhci_private(npip);
7717 		if (osvp == NULL) {
7718 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7719 			    "vhci_pgr_validate_and_register: no "
7720 			    "client priv! 0x%p offlined?\n",
7721 			    (void *)npip));
7722 			goto next_path_2;
7723 		}
7724 
7725 		if (osvp == svp) {
7726 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7727 			    "vhci_pgr_validate_and_register: same osvp 0x%p"
7728 			    " npip 0x%p vlun 0x%p\n",
7729 			    (void *)svp, (void *)npip, (void *)vlun));
7730 			goto next_path_2;
7731 		}
7732 
7733 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7734 		    "vhci_pgr_validate_and_register: Re-validation on"
7735 		    " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n",
7736 		    (void *)osvp, (void *)vlun, vlun->svl_cdb[1]));
7737 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7738 
7739 		bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7740 
7741 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7742 
7743 		rval = vhci_do_prout(osvp);
7744 		if (rval == 1) {
7745 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7746 			    "%s%d: vhci_pgr_validate_and_register: key"
7747 			    " validated thread 0x%p\n", ddi_driver_name(cdip),
7748 			    ddi_get_instance(cdip), (void *)curthread));
7749 			pip = npip;
7750 			success = 1;
7751 			break;
7752 		} else {
7753 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7754 			    "vhci_pgr_validate_and_register: Re-validation on"
7755 			    " osvp 0x%p failed %x\n", (void *)osvp, rval));
7756 			vhci_print_prout_keys(vlun,
7757 			    "v_pgr_val_reg: reval failed: ");
7758 		}
7759 
7760 		/*
7761 		 * Try other paths
7762 		 */
7763 next_path_2:
7764 		pip = npip;
7765 		rval = mdi_select_path(cdip, NULL,
7766 		    MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
7767 		    pip, &npip);
7768 		mdi_rele_path(pip);
7769 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
7770 
7771 	/* Be sure to restore original cdb */
7772 	vlun->svl_cdb[1] = cdb_1;
7773 
7774 	if (success == 1) {
7775 		/* Successfully validated registration */
7776 		mdi_rele_path(pip);
7777 		return (1);
7778 	}
7779 
7780 	VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed"));
7781 
7782 	/*
7783 	 * key invalid, back out by registering key value of 0
7784 	 */
7785 	VHCI_DEBUG(4, (CE_NOTE, NULL,
7786 	    "vhci_pgr_validate_and_register: backout on"
7787 	    " svp 0x%p being done\n", (void *)svp));
7788 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7789 
7790 	bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7791 	bzero(prout->service_key, MHIOC_RESV_KEY_SIZE);
7792 
7793 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7794 
7795 	/*
7796 	 * Get a new path
7797 	 */
7798 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7799 	    MDI_SELECT_STANDBY_PATH, NULL, &pip);
7800 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7801 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7802 		    "%s%d: vhci_pgr_validate_and_register: no valid pip\n",
7803 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7804 		return (0);
7805 	}
7806 
7807 	if ((rval = vhci_do_prout(svp)) != 1) {
7808 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7809 		    "vhci_pgr_validate_and_register: backout on"
7810 		    " svp 0x%p failed\n", (void *)svp));
7811 		vhci_print_prout_keys(vlun, "backout failed");
7812 
7813 		VHCI_DEBUG(4, (CE_WARN, NULL,
7814 		    "%s%d: vhci_pgr_validate_and_register: key"
7815 		    " validation and backout failed", ddi_driver_name(cdip),
7816 		    ddi_get_instance(cdip)));
7817 		if (rval == VHCI_PGR_ILLEGALOP) {
7818 			VHCI_DEBUG(4, (CE_WARN, NULL,
7819 			    "%s%d: vhci_pgr_validate_and_register: key"
7820 			    " already cleared", ddi_driver_name(cdip),
7821 			    ddi_get_instance(cdip)));
7822 			rval = 1;
7823 		} else
7824 			rval = 0;
7825 	} else {
7826 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7827 		    "%s%d: vhci_pgr_validate_and_register: key"
7828 		    " validation failed, key backed out\n",
7829 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7830 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: ");
7831 	}
7832 	mdi_rele_path(pip);
7833 
7834 	return (rval);
7835 }
7836 
7837 /*
7838  * taskq routine to dispatch a scsi cmd to vhci_scsi_start.  This ensures
7839  * that vhci_scsi_start is not called in interrupt context.
7840  * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we
7841  * need to complete the command if something goes wrong.
7842  */
7843 static void
7844 vhci_dispatch_scsi_start(void *arg)
7845 {
7846 	struct vhci_pkt *vpkt	= (struct vhci_pkt *)arg;
7847 	struct scsi_pkt *tpkt	= vpkt->vpkt_tgt_pkt;
7848 	int rval		= TRAN_BUSY;
7849 
7850 	VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending"
7851 	    " scsi-2 reserve for 0x%p\n",
7852 	    (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7853 
7854 	/*
7855 	 * To prevent the taskq from being called recursively we set the
7856 	 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states.
7857 	 */
7858 	vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ;
7859 
7860 	/*
7861 	 * Wait for the transport to get ready to send packets
7862 	 * and if it times out, it will return something other than
7863 	 * TRAN_BUSY. The vhci_reserve_delay may want to
7864 	 * get tuned for other transports and is therefore a global.
7865 	 * Using delay since this routine is called by taskq dispatch
7866 	 * and not called during interrupt context.
7867 	 */
7868 	while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address),
7869 	    vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) {
7870 		delay(drv_usectohz(vhci_reserve_delay));
7871 	}
7872 
7873 	switch (rval) {
7874 	case TRAN_ACCEPT:
7875 		return;
7876 
7877 	default:
7878 		/*
7879 		 * This pkt shall be retried, and to ensure another taskq
7880 		 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ
7881 		 * flag.
7882 		 */
7883 		vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
7884 
7885 		/* Ensure that the pkt is retried without a reset */
7886 		tpkt->pkt_reason = CMD_ABORTED;
7887 		tpkt->pkt_statistics |= STAT_ABORTED;
7888 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: "
7889 		    "TRAN_rval %d returned for dip 0x%p", rval,
7890 		    (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7891 		break;
7892 	}
7893 
7894 	/*
7895 	 * vpkt_org_vpkt should always be NULL here if the retry command
7896 	 * has been successfully dispatched.  If vpkt_org_vpkt != NULL at
7897 	 * this point, it is an error so restore the original vpkt and
7898 	 * return an error to the target driver so it can retry the
7899 	 * command as appropriate.
7900 	 */
7901 	if (vpkt->vpkt_org_vpkt != NULL) {
7902 		struct vhci_pkt		*new_vpkt = vpkt;
7903 		scsi_vhci_priv_t	*svp = (scsi_vhci_priv_t *)
7904 		    mdi_pi_get_vhci_private(vpkt->vpkt_path);
7905 
7906 		vpkt = vpkt->vpkt_org_vpkt;
7907 
7908 		vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason;
7909 		vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics;
7910 
7911 		vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
7912 		    new_vpkt->vpkt_tgt_pkt);
7913 
7914 		tpkt = vpkt->vpkt_tgt_pkt;
7915 	}
7916 
7917 	scsi_hba_pkt_comp(tpkt);
7918 }
7919 
7920 static void
7921 vhci_initiate_auto_failback(void *arg)
7922 {
7923 	struct scsi_vhci_lun	*vlun = (struct scsi_vhci_lun *)arg;
7924 	dev_info_t		*vdip, *cdip;
7925 	int			held;
7926 
7927 	cdip = vlun->svl_dip;
7928 	vdip = ddi_get_parent(cdip);
7929 
7930 	VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
7931 
7932 	/*
7933 	 * Perform a final check to see if the active path class is indeed
7934 	 * not the preferred path class.  As in the time the auto failback
7935 	 * was dispatched, an external failover could have been detected.
7936 	 * [Some other host could have detected this condition and triggered
7937 	 *  the auto failback before].
7938 	 * In such a case if we go ahead with failover we will be negating the
7939 	 * whole purpose of auto failback.
7940 	 */
7941 	mutex_enter(&vlun->svl_mutex);
7942 	if (vlun->svl_active_pclass != NULL) {
7943 		char				*best_pclass;
7944 		struct scsi_failover_ops	*fo;
7945 
7946 		fo = vlun->svl_fops;
7947 
7948 		(void) fo->sfo_pathclass_next(NULL, &best_pclass,
7949 		    vlun->svl_fops_ctpriv);
7950 		if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) {
7951 			mutex_exit(&vlun->svl_mutex);
7952 			VHCI_RELEASE_LUN(vlun);
7953 			VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating "
7954 			    "auto failback for %s as %s pathclass already "
7955 			    "active.\n", vlun->svl_lun_wwn, best_pclass));
7956 			return;
7957 		}
7958 	}
7959 	mutex_exit(&vlun->svl_mutex);
7960 	if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC)
7961 	    == MDI_SUCCESS) {
7962 		vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7963 		    "succeeded for device %s (GUID %s)",
7964 		    ddi_node_name(cdip), vlun->svl_lun_wwn);
7965 	} else {
7966 		vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7967 		    "failed for device %s (GUID %s)",
7968 		    ddi_node_name(cdip), vlun->svl_lun_wwn);
7969 	}
7970 	VHCI_RELEASE_LUN(vlun);
7971 }
7972 
7973 #ifdef DEBUG
7974 static void
7975 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys)
7976 {
7977 	vhci_clean_print(NULL, 5, "Current PGR Keys",
7978 	    (uchar_t *)prin, numkeys * 8);
7979 }
7980 #endif
7981 
7982 static void
7983 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg)
7984 {
7985 	int			i;
7986 	vhci_prout_t		*prout;
7987 	char			buf1[4 * MHIOC_RESV_KEY_SIZE + 1];
7988 	char			buf2[4 * MHIOC_RESV_KEY_SIZE + 1];
7989 	char			buf3[4 * MHIOC_RESV_KEY_SIZE + 1];
7990 	char			buf4[4 * MHIOC_RESV_KEY_SIZE + 1];
7991 
7992 	prout = &vlun->svl_prout;
7993 
7994 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7995 		(void) sprintf(&buf1[4 * i], "[%02x]", prout->res_key[i]);
7996 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7997 		(void) sprintf(&buf2[(4 * i)], "[%02x]", prout->service_key[i]);
7998 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7999 		(void) sprintf(&buf3[4 * i], "[%02x]",
8000 		    prout->active_res_key[i]);
8001 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
8002 		(void) sprintf(&buf4[4 * i], "[%02x]",
8003 		    prout->active_service_key[i]);
8004 
8005 	/* Printing all in one go. Otherwise it will jumble up */
8006 	VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n"
8007 	    "res_key:          : %s\n"
8008 	    "service_key       : %s\n"
8009 	    "active_res_key    : %s\n"
8010 	    "active_service_key: %s\n",
8011 	    msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4));
8012 }
8013 
8014 /*
8015  * Called from vhci_scsi_start to update the pHCI pkt with target packet.
8016  */
8017 static void
8018 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt)
8019 {
8020 
8021 	ASSERT(vpkt->vpkt_hba_pkt);
8022 
8023 	vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags;
8024 	vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE;
8025 
8026 	if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) ||
8027 	    MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) {
8028 		/*
8029 		 * Polled Command is requested or HBA is in
8030 		 * suspended state
8031 		 */
8032 		vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR;
8033 		vpkt->vpkt_hba_pkt->pkt_comp = NULL;
8034 	} else {
8035 		vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr;
8036 	}
8037 	vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time;
8038 	bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp,
8039 	    vpkt->vpkt_tgt_init_cdblen);
8040 	vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid;
8041 
8042 	/* Re-initialize the following pHCI packet state information */
8043 	vpkt->vpkt_hba_pkt->pkt_state = 0;
8044 	vpkt->vpkt_hba_pkt->pkt_statistics = 0;
8045 	vpkt->vpkt_hba_pkt->pkt_reason = 0;
8046 }
8047 
8048 static int
8049 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op,
8050     void *arg, void *result)
8051 {
8052 	int ret = DDI_SUCCESS;
8053 
8054 	/*
8055 	 * Generic processing in MPxIO framework
8056 	 */
8057 	ret = mdi_bus_power(parent, impl_arg, op, arg, result);
8058 
8059 	switch (ret) {
8060 	case MDI_SUCCESS:
8061 		ret = DDI_SUCCESS;
8062 		break;
8063 	case MDI_FAILURE:
8064 		ret = DDI_FAILURE;
8065 		break;
8066 	default:
8067 		break;
8068 	}
8069 
8070 	return (ret);
8071 }
8072 
8073 static int
8074 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
8075     mdi_pathinfo_t *pip)
8076 {
8077 	dev_info_t		*cdip;
8078 	mdi_pathinfo_t		*npip = NULL;
8079 	scsi_vhci_priv_t	*svp = NULL;
8080 	struct scsi_address	*pap = NULL;
8081 	scsi_hba_tran_t		*hba = NULL;
8082 	int			sps;
8083 	int			mps_flag;
8084 	int			rval = 0;
8085 
8086 	mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH);
8087 	if (pip) {
8088 		/*
8089 		 * If the call is from vhci_pathinfo_state_change,
8090 		 * then this path was busy and is becoming ready to accept IO.
8091 		 */
8092 		ASSERT(ap != NULL);
8093 		hba = ap->a_hba_tran;
8094 		ASSERT(hba != NULL);
8095 		rval = scsi_ifsetcap(ap, cap, val, whom);
8096 
8097 		VHCI_DEBUG(2, (CE_NOTE, NULL,
8098 		    "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n",
8099 		    (void *)pip, (void *)ap, rval));
8100 
8101 		return (rval);
8102 	}
8103 
8104 	/*
8105 	 * Set capability on all the pHCIs.
8106 	 * If any path is busy, then the capability would be set by
8107 	 * vhci_pathinfo_state_change.
8108 	 */
8109 
8110 	cdip = ADDR2DIP(ap);
8111 	ASSERT(cdip != NULL);
8112 	sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip);
8113 	if ((sps != MDI_SUCCESS) || (pip == NULL)) {
8114 		VHCI_DEBUG(2, (CE_WARN, NULL,
8115 		    "!vhci_pHCI_cap: Unable to get a path, dip 0x%p",
8116 		    (void *)cdip));
8117 		return (0);
8118 	}
8119 
8120 again:
8121 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
8122 	if (svp == NULL) {
8123 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
8124 		    "priv is NULL, pip 0x%p", (void *)pip));
8125 		mdi_rele_path(pip);
8126 		return (rval);
8127 	}
8128 
8129 	if (svp->svp_psd == NULL) {
8130 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
8131 		    "psd is NULL, pip 0x%p, svp 0x%p",
8132 		    (void *)pip, (void *)svp));
8133 		mdi_rele_path(pip);
8134 		return (rval);
8135 	}
8136 
8137 	pap = &svp->svp_psd->sd_address;
8138 	ASSERT(pap != NULL);
8139 	hba = pap->a_hba_tran;
8140 	ASSERT(hba != NULL);
8141 
8142 	if (hba->tran_setcap != NULL) {
8143 		rval = scsi_ifsetcap(pap, cap, val, whom);
8144 
8145 		VHCI_DEBUG(2, (CE_NOTE, NULL,
8146 		    "!vhci_pHCI_cap: path %p, ap %p, rval %x\n",
8147 		    (void *)pip, (void *)ap, rval));
8148 
8149 		/*
8150 		 * Select next path and issue the setcap, repeat
8151 		 * until all paths are exhausted
8152 		 */
8153 		sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip);
8154 		if ((sps != MDI_SUCCESS) || (npip == NULL)) {
8155 			mdi_rele_path(pip);
8156 			return (1);
8157 		}
8158 		mdi_rele_path(pip);
8159 		pip = npip;
8160 		goto again;
8161 	}
8162 	mdi_rele_path(pip);
8163 	return (rval);
8164 }
8165 
8166 static int
8167 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8168     void *arg, dev_info_t **child)
8169 {
8170 	char *guid;
8171 
8172 	if (vhci_bus_config_debug)
8173 		flags |= NDI_DEVI_DEBUG;
8174 
8175 	if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE)
8176 		guid = vhci_devnm_to_guid((char *)arg);
8177 	else
8178 		guid = NULL;
8179 
8180 	if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid)
8181 	    == MDI_SUCCESS)
8182 		return (NDI_SUCCESS);
8183 	else
8184 		return (NDI_FAILURE);
8185 }
8186 
8187 static int
8188 vhci_scsi_bus_unconfig(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8189     void *arg)
8190 {
8191 	if (vhci_bus_config_debug)
8192 		flags |= NDI_DEVI_DEBUG;
8193 
8194 	return (ndi_busop_bus_unconfig(pdip, flags, op, arg));
8195 }
8196 
8197 /*
8198  * Take the original vhci_pkt, create a duplicate of the pkt for resending
8199  * as though it originated in ssd.
8200  */
8201 static struct scsi_pkt *
8202 vhci_create_retry_pkt(struct vhci_pkt *vpkt)
8203 {
8204 	struct vhci_pkt *new_vpkt = NULL;
8205 	struct scsi_pkt	*pkt = NULL;
8206 
8207 	scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
8208 	    mdi_pi_get_vhci_private(vpkt->vpkt_path);
8209 
8210 	/*
8211 	 * Ensure consistent data at completion time by setting PKT_CONSISTENT
8212 	 */
8213 	pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt,
8214 	    vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
8215 	    vpkt->vpkt_tgt_init_scblen, 0, PKT_CONSISTENT, NULL_FUNC, NULL);
8216 	if (pkt != NULL) {
8217 		new_vpkt = TGTPKT2VHCIPKT(pkt);
8218 
8219 		pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address;
8220 		pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags;
8221 		pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time;
8222 		pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp;
8223 
8224 		pkt->pkt_resid = 0;
8225 		pkt->pkt_statistics = 0;
8226 		pkt->pkt_reason = 0;
8227 
8228 		bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp,
8229 		    pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen);
8230 
8231 		/*
8232 		 * Save a pointer to the original vhci_pkt
8233 		 */
8234 		new_vpkt->vpkt_org_vpkt = vpkt;
8235 	}
8236 
8237 	return (pkt);
8238 }
8239 
8240 /*
8241  * Copy the successful completion information from the hba packet into
8242  * the original target pkt from the upper layer.  Returns the original
8243  * vpkt and destroys the new vpkt from the internal retry.
8244  */
8245 static struct vhci_pkt *
8246 vhci_sync_retry_pkt(struct vhci_pkt *vpkt)
8247 {
8248 	struct vhci_pkt		*ret_vpkt = NULL;
8249 	struct scsi_pkt		*tpkt = NULL;
8250 	struct scsi_pkt		*hba_pkt = NULL;
8251 	scsi_vhci_priv_t	*svp = (scsi_vhci_priv_t *)
8252 	    mdi_pi_get_vhci_private(vpkt->vpkt_path);
8253 
8254 	ASSERT(vpkt->vpkt_org_vpkt != NULL);
8255 	VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt "
8256 	    "completed successfully!\n"));
8257 
8258 	ret_vpkt = vpkt->vpkt_org_vpkt;
8259 	tpkt = ret_vpkt->vpkt_tgt_pkt;
8260 	hba_pkt = vpkt->vpkt_hba_pkt;
8261 
8262 	/*
8263 	 * Copy the good status into the target driver's packet
8264 	 */
8265 	*(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp);
8266 	tpkt->pkt_resid = hba_pkt->pkt_resid;
8267 	tpkt->pkt_state = hba_pkt->pkt_state;
8268 	tpkt->pkt_statistics = hba_pkt->pkt_statistics;
8269 	tpkt->pkt_reason = hba_pkt->pkt_reason;
8270 
8271 	/*
8272 	 * Destroy the internally created vpkt for the retry
8273 	 */
8274 	vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
8275 	    vpkt->vpkt_tgt_pkt);
8276 
8277 	return (ret_vpkt);
8278 }
8279 
8280 /* restart the request sense request */
8281 static void
8282 vhci_uscsi_restart_sense(void *arg)
8283 {
8284 	struct buf	*rqbp;
8285 	struct buf	*bp;
8286 	struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg;
8287 	mp_uscsi_cmd_t	*mp_uscmdp;
8288 
8289 	VHCI_DEBUG(4, (CE_WARN, NULL,
8290 	    "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt));
8291 
8292 	if (scsi_transport(rqpkt) != TRAN_ACCEPT) {
8293 		/* if it fails - need to wakeup the original command */
8294 		mp_uscmdp = rqpkt->pkt_private;
8295 		bp = mp_uscmdp->cmdbp;
8296 		rqbp = mp_uscmdp->rqbp;
8297 		ASSERT(mp_uscmdp && bp && rqbp);
8298 		scsi_free_consistent_buf(rqbp);
8299 		scsi_destroy_pkt(rqpkt);
8300 		bp->b_resid = bp->b_bcount;
8301 		bioerror(bp, EIO);
8302 		biodone(bp);
8303 	}
8304 }
8305 
8306 /*
8307  * auto-rqsense is not enabled so we have to retrieve the request sense
8308  * manually.
8309  */
8310 static int
8311 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp)
8312 {
8313 	struct buf		*rqbp, *cmdbp;
8314 	struct scsi_pkt		*rqpkt;
8315 	int			rval = 0;
8316 
8317 	cmdbp = mp_uscmdp->cmdbp;
8318 	ASSERT(cmdbp != NULL);
8319 
8320 	VHCI_DEBUG(4, (CE_WARN, NULL,
8321 	    "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p",
8322 	    (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp));
8323 	/* set up the packet information and cdb */
8324 	if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL,
8325 	    SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) {
8326 		return (-1);
8327 	}
8328 
8329 	if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp,
8330 	    CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) {
8331 		scsi_free_consistent_buf(rqbp);
8332 		return (-1);
8333 	}
8334 
8335 	(void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp,
8336 	    SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0);
8337 
8338 	mp_uscmdp->rqbp = rqbp;
8339 	rqbp->b_private = mp_uscmdp;
8340 	rqpkt->pkt_flags |= FLAG_SENSING;
8341 	rqpkt->pkt_time = 60;
8342 	rqpkt->pkt_comp = vhci_uscsi_iodone;
8343 	rqpkt->pkt_private = mp_uscmdp;
8344 
8345 	/*
8346 	 * NOTE: This code path is related to MPAPI uscsi(4I), so path
8347 	 * selection is not based on path_instance.
8348 	 */
8349 	if (scsi_pkt_allocated_correctly(rqpkt))
8350 		rqpkt->pkt_path_instance = 0;
8351 
8352 	switch (scsi_transport(rqpkt)) {
8353 	case TRAN_ACCEPT:
8354 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8355 		    "transport accepted."));
8356 		break;
8357 	case TRAN_BUSY:
8358 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8359 		    "transport busy, setting timeout."));
8360 		vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt,
8361 		    (drv_usectohz(5 * 1000000)));
8362 		break;
8363 	default:
8364 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8365 		    "transport failed"));
8366 		scsi_free_consistent_buf(rqbp);
8367 		scsi_destroy_pkt(rqpkt);
8368 		rval = -1;
8369 	}
8370 
8371 	return (rval);
8372 }
8373 
8374 /*
8375  * done routine for the mpapi uscsi command - this is behaving as though
8376  * FLAG_DIAGNOSE is set meaning there are no retries except for a manual
8377  * request sense.
8378  */
8379 void
8380 vhci_uscsi_iodone(struct scsi_pkt *pkt)
8381 {
8382 	struct buf			*bp;
8383 	mp_uscsi_cmd_t			*mp_uscmdp;
8384 	struct uscsi_cmd		*uscmdp;
8385 	struct scsi_arq_status		*arqstat;
8386 	int				err;
8387 
8388 	mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private;
8389 	uscmdp = mp_uscmdp->uscmdp;
8390 	bp = mp_uscmdp->cmdbp;
8391 	ASSERT(bp != NULL);
8392 	VHCI_DEBUG(4, (CE_WARN, NULL,
8393 	    "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p",
8394 	    (void *)bp, (void *)pkt, (void *)mp_uscmdp));
8395 	/* Save the status and the residual into the uscsi_cmd struct */
8396 	uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK);
8397 	uscmdp->uscsi_resid = bp->b_resid;
8398 
8399 	/* return on a very successful command */
8400 	if (pkt->pkt_reason == CMD_CMPLT &&
8401 	    SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) &&
8402 	    pkt->pkt_resid == 0) {
8403 		mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8404 		scsi_destroy_pkt(pkt);
8405 		biodone(bp);
8406 		return;
8407 	}
8408 	VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x "
8409 	    " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld",
8410 	    pkt->pkt_reason, pkt->pkt_resid,
8411 	    pkt->pkt_state, bp->b_bcount, bp->b_resid));
8412 
8413 	err = EIO;
8414 
8415 	arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
8416 	if (pkt->pkt_reason != CMD_CMPLT) {
8417 		/*
8418 		 * The command did not complete.
8419 		 */
8420 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8421 		    "vhci_uscsi_iodone: command did not complete."
8422 		    " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags));
8423 		if (pkt->pkt_flags & FLAG_SENSING) {
8424 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8425 		} else if (pkt->pkt_reason == CMD_TIMEOUT) {
8426 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR);
8427 			err = ETIMEDOUT;
8428 		}
8429 	} else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) {
8430 		/*
8431 		 * The auto-rqsense happened, and the packet has a filled-in
8432 		 * scsi_arq_status structure, pointed to by pkt_scbp.
8433 		 */
8434 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8435 		    "vhci_uscsi_iodone: received auto-requested sense"));
8436 		if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8437 			/* get the amount of data to copy into rqbuf */
8438 			int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid;
8439 			rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8440 			uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8441 			uscmdp->uscsi_rqstatus =
8442 			    *((char *)&arqstat->sts_rqpkt_status);
8443 			if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen &&
8444 			    rqlen != 0) {
8445 				bcopy(&(arqstat->sts_sensedata),
8446 				    uscmdp->uscsi_rqbuf, rqlen);
8447 			}
8448 			mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8449 			VHCI_DEBUG(4, (CE_NOTE, NULL,
8450 			    "vhci_uscsi_iodone: ARQ "
8451 			    "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d "
8452 			    "xfer: %d rqpkt_resid: %d\n",
8453 			    uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid,
8454 			    uscmdp->uscsi_rqlen, rqlen,
8455 			    arqstat->sts_rqpkt_resid));
8456 		}
8457 	} else if (pkt->pkt_flags & FLAG_SENSING) {
8458 		struct buf *rqbp;
8459 		struct scsi_status *rqstatus;
8460 
8461 		rqstatus = (struct scsi_status *)pkt->pkt_scbp;
8462 		/* a manual request sense was done - get the information */
8463 		if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8464 			int rqlen = SENSE_LENGTH - pkt->pkt_resid;
8465 
8466 			rqbp = mp_uscmdp->rqbp;
8467 			/* get the amount of data to copy into rqbuf */
8468 			rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8469 			uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8470 			uscmdp->uscsi_rqstatus = *((char *)rqstatus);
8471 			if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) {
8472 				bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf,
8473 				    rqlen);
8474 			}
8475 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8476 			scsi_free_consistent_buf(rqbp);
8477 		}
8478 		VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING"
8479 		    "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n",
8480 		    uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid));
8481 	} else {
8482 		struct scsi_status *status =
8483 		    (struct scsi_status *)pkt->pkt_scbp;
8484 		/*
8485 		 * Command completed and we're not getting sense. Check for
8486 		 * errors and decide what to do next.
8487 		 */
8488 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8489 		    "vhci_uscsi_iodone: command appears complete: reason: %x",
8490 		    pkt->pkt_reason));
8491 		if (status->sts_chk) {
8492 			/* need to manually get the request sense */
8493 			if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) {
8494 				scsi_destroy_pkt(pkt);
8495 				return;
8496 			}
8497 		} else {
8498 			VHCI_DEBUG(4, (CE_NOTE, NULL,
8499 			    "vhci_chk_err: appears complete"));
8500 			err = 0;
8501 			mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8502 			if (pkt->pkt_resid) {
8503 				bp->b_resid += pkt->pkt_resid;
8504 			}
8505 		}
8506 	}
8507 
8508 	if (err) {
8509 		if (bp->b_resid == 0)
8510 			bp->b_resid = bp->b_bcount;
8511 		bioerror(bp, err);
8512 		bp->b_flags |= B_ERROR;
8513 	}
8514 
8515 	scsi_destroy_pkt(pkt);
8516 	biodone(bp);
8517 
8518 	VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit"));
8519 }
8520 
8521 /*
8522  * start routine for the mpapi uscsi command
8523  */
8524 int
8525 vhci_uscsi_iostart(struct buf *bp)
8526 {
8527 	struct scsi_pkt		*pkt;
8528 	struct uscsi_cmd	*uscmdp;
8529 	mp_uscsi_cmd_t		*mp_uscmdp;
8530 	int			stat_size, rval;
8531 	int			retry = 0;
8532 
8533 	ASSERT(bp->b_private != NULL);
8534 
8535 	mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private;
8536 	uscmdp = mp_uscmdp->uscmdp;
8537 	if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8538 		stat_size = SENSE_LENGTH;
8539 	} else {
8540 		stat_size = 1;
8541 	}
8542 
8543 	pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen,
8544 	    stat_size, 0, 0, SLEEP_FUNC, NULL);
8545 	if (pkt == NULL) {
8546 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8547 		    "vhci_uscsi_iostart: rval: EINVAL"));
8548 		bp->b_resid = bp->b_bcount;
8549 		uscmdp->uscsi_resid = bp->b_bcount;
8550 		bioerror(bp, EINVAL);
8551 		biodone(bp);
8552 		return (EINVAL);
8553 	}
8554 
8555 	pkt->pkt_time = uscmdp->uscsi_timeout;
8556 	bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen);
8557 	pkt->pkt_comp = vhci_uscsi_iodone;
8558 	pkt->pkt_private = mp_uscmdp;
8559 	if (uscmdp->uscsi_flags & USCSI_SILENT)
8560 		pkt->pkt_flags |= FLAG_SILENT;
8561 	if (uscmdp->uscsi_flags & USCSI_ISOLATE)
8562 		pkt->pkt_flags |= FLAG_ISOLATE;
8563 	if (uscmdp->uscsi_flags & USCSI_DIAGNOSE)
8564 		pkt->pkt_flags |= FLAG_DIAGNOSE;
8565 	if (uscmdp->uscsi_flags & USCSI_RENEGOT) {
8566 		pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
8567 	}
8568 	VHCI_DEBUG(4, (CE_WARN, NULL,
8569 	    "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p"
8570 	    " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p"
8571 	    " stat_size: %d",
8572 	    (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp,
8573 	    (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen,
8574 	    (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size));
8575 
8576 	/*
8577 	 * NOTE: This code path is related to MPAPI uscsi(4I), so path
8578 	 * selection is not based on path_instance.
8579 	 */
8580 	if (scsi_pkt_allocated_correctly(pkt))
8581 		pkt->pkt_path_instance = 0;
8582 
8583 	while (((rval = scsi_transport(pkt)) == TRAN_BUSY) &&
8584 	    retry < vhci_uscsi_retry_count) {
8585 		delay(drv_usectohz(vhci_uscsi_delay));
8586 		retry++;
8587 	}
8588 	if (retry >= vhci_uscsi_retry_count) {
8589 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8590 		    "vhci_uscsi_iostart: tran_busy - retry: %d", retry));
8591 	}
8592 	switch (rval) {
8593 	case TRAN_ACCEPT:
8594 		rval =  0;
8595 		break;
8596 
8597 	default:
8598 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8599 		    "vhci_uscsi_iostart: rval: %d count: %ld res: %ld",
8600 		    rval, bp->b_bcount, bp->b_resid));
8601 		bp->b_resid = bp->b_bcount;
8602 		uscmdp->uscsi_resid = bp->b_bcount;
8603 		bioerror(bp, EIO);
8604 		scsi_destroy_pkt(pkt);
8605 		biodone(bp);
8606 		rval = EIO;
8607 		MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8608 		break;
8609 	}
8610 	VHCI_DEBUG(4, (CE_NOTE, NULL,
8611 	    "vhci_uscsi_iostart: exit: rval: %d", rval));
8612 	return (rval);
8613 }
8614 
8615 /* ARGSUSED */
8616 static struct scsi_failover_ops *
8617 vhci_dev_fo(dev_info_t *vdip, struct scsi_device *psd,
8618     void **ctprivp, char **fo_namep)
8619 {
8620 	struct scsi_failover_ops	*sfo;
8621 	char				*sfo_name;
8622 	char				*override;
8623 	struct scsi_failover		*sf;
8624 
8625 	ASSERT(psd && psd->sd_inq);
8626 	if ((psd == NULL) || (psd->sd_inq == NULL)) {
8627 		VHCI_DEBUG(1, (CE_NOTE, NULL,
8628 		    "!vhci_dev_fo:return NULL no scsi_device or inquiry"));
8629 		return (NULL);
8630 	}
8631 
8632 	/*
8633 	 * Determine if device is supported under scsi_vhci, and select
8634 	 * failover module.
8635 	 *
8636 	 * See if there is a scsi_vhci.conf file override for this devices's
8637 	 * VID/PID. The following values can be returned:
8638 	 *
8639 	 * NULL		If the NULL is returned then there is no scsi_vhci.conf
8640 	 *		override.  For NULL, we determine the failover_ops for
8641 	 *		this device by checking the sfo_device_probe entry
8642 	 *		point for each 'fops' module, in order.
8643 	 *
8644 	 *		NOTE: Correct operation may depend on module ordering
8645 	 *		of 'specific' (failover modules that are completely
8646 	 *		VID/PID table based) to 'generic' (failover modules
8647 	 *		that based on T10 standards like TPGS).  Currently,
8648 	 *		the value of 'ddi-forceload' in scsi_vhci.conf is used
8649 	 *		to establish the module list and probe order.
8650 	 *
8651 	 * "NONE"	If value "NONE" is returned then there is a
8652 	 *		scsi_vhci.conf VID/PID override to indicate the device
8653 	 *		should not be supported under scsi_vhci (even if there
8654 	 *		is an 'fops' module supporting the device).
8655 	 *
8656 	 * "<other>"	If another value is returned then that value is the
8657 	 *		name of the 'fops' module that should be used.
8658 	 */
8659 	sfo = NULL;	/* "NONE" */
8660 	override = scsi_get_device_type_string(
8661 	    "scsi-vhci-failover-override", vdip, psd);
8662 	if (override == NULL) {
8663 		/* NULL: default: select based on sfo_device_probe results */
8664 		for (sf = scsi_failover_table; sf->sf_mod; sf++) {
8665 			if ((sf->sf_sfo == NULL) ||
8666 			    sf->sf_sfo->sfo_device_probe(psd, psd->sd_inq,
8667 			    ctprivp) == SFO_DEVICE_PROBE_PHCI)
8668 				continue;
8669 
8670 			/* found failover module, supported under scsi_vhci */
8671 			sfo = sf->sf_sfo;
8672 			if (fo_namep && (*fo_namep == NULL)) {
8673 				sfo_name = i_ddi_strdup(sfo->sfo_name,
8674 				    KM_SLEEP);
8675 				*fo_namep = sfo_name;
8676 			}
8677 			break;
8678 		}
8679 	} else if (strcasecmp(override, "NONE")) {
8680 		/* !"NONE": select based on driver.conf specified name */
8681 		for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) {
8682 			if ((sf->sf_sfo == NULL) ||
8683 			    (sf->sf_sfo->sfo_name == NULL) ||
8684 			    strcmp(override, sf->sf_sfo->sfo_name))
8685 				continue;
8686 
8687 			/*
8688 			 * NOTE: If sfo_device_probe() has side-effects,
8689 			 * including setting *ctprivp, these are not going
8690 			 * to occur with override config.
8691 			 */
8692 
8693 			/* found failover module, supported under scsi_vhci */
8694 			sfo = sf->sf_sfo;
8695 			if (fo_namep && (*fo_namep == NULL)) {
8696 				sfo_name = kmem_alloc(strlen("conf ") +
8697 				    strlen(sfo->sfo_name) + 1, KM_SLEEP);
8698 				(void) sprintf(sfo_name, "conf %s",
8699 				    sfo->sfo_name);
8700 				*fo_namep = sfo_name;
8701 			}
8702 			break;
8703 		}
8704 	}
8705 	if (override)
8706 		kmem_free(override, strlen(override) + 1);
8707 	return (sfo);
8708 }
8709 
8710 /*
8711  * Determine the device described by cinfo should be enumerated under
8712  * the vHCI or the pHCI - if there is a failover ops then device is
8713  * supported under vHCI.  By agreement with SCSA cinfo is a pointer
8714  * to a scsi_device structure associated with a decorated pHCI probe node.
8715  */
8716 /* ARGSUSED */
8717 int
8718 vhci_is_dev_supported(dev_info_t *vdip, dev_info_t *pdip, void *cinfo)
8719 {
8720 	struct scsi_device	*psd = (struct scsi_device *)cinfo;
8721 
8722 	return (vhci_dev_fo(vdip, psd, NULL, NULL) ? MDI_SUCCESS : MDI_FAILURE);
8723 }
8724 
8725 
8726 #ifdef DEBUG
8727 extern struct scsi_key_strings scsi_cmds[];
8728 
8729 static char *
8730 vhci_print_scsi_cmd(char cmd)
8731 {
8732 	char tmp[64];
8733 	char *cpnt;
8734 
8735 	cpnt = scsi_cmd_name(cmd, scsi_cmds, tmp);
8736 	/* tmp goes out of scope on return and caller sees garbage */
8737 	if (cpnt == tmp) {
8738 		cpnt = "Unknown Command";
8739 	}
8740 	return (cpnt);
8741 }
8742 
8743 extern uchar_t	scsi_cdb_size[];
8744 
8745 static void
8746 vhci_print_cdb(dev_info_t *dip, uint_t level, char *title, uchar_t *cdb)
8747 {
8748 	int len = scsi_cdb_size[CDB_GROUPID(cdb[0])];
8749 	char buf[256];
8750 
8751 	if (level == CE_NOTE) {
8752 		vhci_log(level, dip, "path cmd %s\n",
8753 		    vhci_print_scsi_cmd(*cdb));
8754 		return;
8755 	}
8756 
8757 	(void) sprintf(buf, "%s for cmd(%s)", title, vhci_print_scsi_cmd(*cdb));
8758 	vhci_clean_print(dip, level, buf, cdb, len);
8759 }
8760 
8761 static void
8762 vhci_clean_print(dev_info_t *dev, uint_t level, char *title, uchar_t *data,
8763     int len)
8764 {
8765 	int	i;
8766 	int	c;
8767 	char	*format;
8768 	char	buf[256];
8769 	uchar_t	byte;
8770 
8771 	(void) sprintf(buf, "%s:\n", title);
8772 	vhci_log(level, dev, "%s", buf);
8773 	level = CE_CONT;
8774 	for (i = 0; i < len; ) {
8775 		buf[0] = 0;
8776 		for (c = 0; c < 8 && i < len; c++, i++) {
8777 			byte = (uchar_t)data[i];
8778 			if (byte < 0x10)
8779 				format = "0x0%x ";
8780 			else
8781 				format = "0x%x ";
8782 			(void) sprintf(&buf[(int)strlen(buf)], format, byte);
8783 		}
8784 		(void) sprintf(&buf[(int)strlen(buf)], "\n");
8785 
8786 		vhci_log(level, dev, "%s\n", buf);
8787 	}
8788 }
8789 #endif
8790 static void
8791 vhci_invalidate_mpapi_lu(struct scsi_vhci *vhci, scsi_vhci_lun_t *vlun)
8792 {
8793 	char			*svl_wwn;
8794 	mpapi_item_list_t	*ilist;
8795 	mpapi_lu_data_t		*ld;
8796 
8797 	if (vlun == NULL) {
8798 		return;
8799 	} else {
8800 		svl_wwn = vlun->svl_lun_wwn;
8801 	}
8802 
8803 	ilist = vhci->mp_priv->obj_hdr_list[MP_OBJECT_TYPE_MULTIPATH_LU]->head;
8804 
8805 	while (ilist != NULL) {
8806 		ld = (mpapi_lu_data_t *)(ilist->item->idata);
8807 		if ((ld != NULL) && (strncmp(ld->prop.name, svl_wwn,
8808 		    strlen(svl_wwn)) == 0)) {
8809 			ld->valid = 0;
8810 			VHCI_DEBUG(6, (CE_WARN, NULL,
8811 			    "vhci_invalidate_mpapi_lu: "
8812 			    "Invalidated LU(%s)", svl_wwn));
8813 			return;
8814 		}
8815 		ilist = ilist->next;
8816 	}
8817 	VHCI_DEBUG(6, (CE_WARN, NULL, "vhci_invalidate_mpapi_lu: "
8818 	    "Could not find LU(%s) to invalidate.", svl_wwn));
8819 }
8820