xref: /illumos-gate/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c (revision 67dbe2be0c0f1e2eb428b89088bb5667e8f0b9f6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Multiplexed I/O SCSI vHCI implementation
28  */
29 
30 #include <sys/conf.h>
31 #include <sys/file.h>
32 #include <sys/ddi.h>
33 #include <sys/sunddi.h>
34 #include <sys/scsi/scsi.h>
35 #include <sys/scsi/impl/scsi_reset_notify.h>
36 #include <sys/scsi/impl/services.h>
37 #include <sys/sunmdi.h>
38 #include <sys/mdi_impldefs.h>
39 #include <sys/scsi/adapters/scsi_vhci.h>
40 #include <sys/disp.h>
41 #include <sys/byteorder.h>
42 
43 extern uintptr_t scsi_callback_id;
44 extern ddi_dma_attr_t scsi_alloc_attr;
45 
46 #ifdef	DEBUG
47 int	vhci_debug = VHCI_DEBUG_DEFAULT_VAL;
48 #endif
49 
50 /* retry for the vhci_do_prout command when a not ready is returned */
51 int vhci_prout_not_ready_retry = 180;
52 
53 /*
54  * These values are defined to support the internal retry of
55  * SCSI packets for better sense code handling.
56  */
57 #define	VHCI_CMD_CMPLT	0
58 #define	VHCI_CMD_RETRY	1
59 #define	VHCI_CMD_ERROR	-1
60 
61 #define	PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)
62 #define	VHCI_SCSI_PERR		0x47
63 #define	VHCI_PGR_ILLEGALOP	-2
64 #define	VHCI_NUM_UPDATE_TASKQ	8
65 /* changed to 132 to accomodate HDS */
66 
67 /*
68  * Version Macros
69  */
70 #define	VHCI_NAME_VERSION	"SCSI VHCI Driver"
71 char		vhci_version_name[] = VHCI_NAME_VERSION;
72 
73 int		vhci_first_time = 0;
74 clock_t		vhci_to_ticks = 0;
75 int		vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT;
76 kcondvar_t	vhci_cv;
77 kmutex_t	vhci_global_mutex;
78 void		*vhci_softstate = NULL; /* for soft state */
79 
80 /*
81  * Flag to delay the retry of the reserve command
82  */
83 int		vhci_reserve_delay = 100000;
84 static int	vhci_path_quiesce_timeout = 60;
85 static uchar_t	zero_key[MHIOC_RESV_KEY_SIZE];
86 
87 /* uscsi delay for a TRAN_BUSY */
88 static int vhci_uscsi_delay = 100000;
89 static int vhci_uscsi_retry_count = 180;
90 /* uscsi_restart_sense timeout id in case it needs to get canceled */
91 static timeout_id_t vhci_restart_timeid = 0;
92 
93 static int	vhci_bus_config_debug = 0;
94 
95 /*
96  * Bidirectional map of 'target-port' to port id <pid> for support of
97  * iostat(1M) '-Xx' and '-Yx' output.
98  */
99 static kmutex_t		vhci_targetmap_mutex;
100 static uint_t		vhci_targetmap_pid = 1;
101 static mod_hash_t	*vhci_targetmap_bypid;	/* <pid> -> 'target-port' */
102 static mod_hash_t	*vhci_targetmap_byport;	/* 'target-port' -> <pid> */
103 
104 /*
105  * functions exported by scsi_vhci struct cb_ops
106  */
107 static int vhci_open(dev_t *, int, int, cred_t *);
108 static int vhci_close(dev_t, int, int, cred_t *);
109 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
110 
111 /*
112  * functions exported by scsi_vhci struct dev_ops
113  */
114 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
115 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t);
116 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t);
117 
118 /*
119  * functions exported by scsi_vhci scsi_hba_tran_t transport table
120  */
121 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *,
122     scsi_hba_tran_t *, struct scsi_device *);
123 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *,
124     struct scsi_device *);
125 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *);
126 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *);
127 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *);
128 static int vhci_scsi_reset(struct scsi_address *, int);
129 static int vhci_scsi_reset_target(struct scsi_address *, int level,
130     uint8_t select_path);
131 static int vhci_scsi_reset_bus(struct scsi_address *);
132 static int vhci_scsi_getcap(struct scsi_address *, char *, int);
133 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int);
134 static int vhci_commoncap(struct scsi_address *, char *, int, int, int);
135 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
136     mdi_pathinfo_t *pip);
137 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *,
138     struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
139 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
140 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
141 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
142 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t),
143     caddr_t);
144 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int);
145 static int vhci_scsi_get_name(struct scsi_device *, char *, int);
146 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t,
147     void *, void *);
148 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
149     void *, dev_info_t **);
150 static int vhci_scsi_bus_unconfig(dev_info_t *, uint_t, ddi_bus_config_op_t,
151     void *);
152 static struct scsi_failover_ops *vhci_dev_fo(dev_info_t *, struct scsi_device *,
153     void **, char **);
154 
155 /*
156  * functions registered with the mpxio framework via mdi_vhci_ops_t
157  */
158 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int);
159 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int);
160 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *,
161 		mdi_pathinfo_state_t, uint32_t, int);
162 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int);
163 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int);
164 static int vhci_failover(dev_info_t *, dev_info_t *, int);
165 static void vhci_client_attached(dev_info_t *);
166 static int vhci_is_dev_supported(dev_info_t *, dev_info_t *, void *);
167 
168 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
169 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *);
170 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t);
171 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t);
172 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t);
173 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t);
174 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t);
175 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t);
176 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t);
177 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t);
178 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *,
179     int, caddr_t);
180 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **,
181     uint_t, sv_iocdata_t *, int, caddr_t);
182 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t);
183 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t,
184     sv_iocdata_t *, int, caddr_t);
185 static int vhci_handle_ext_fo(struct scsi_pkt *, int);
186 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *);
187 static int vhci_quiesce_lun(struct scsi_vhci_lun *);
188 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *);
189 static void vhci_dispatch_scsi_start(void *);
190 static void vhci_efo_done(void *);
191 static void vhci_initiate_auto_failback(void *);
192 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *);
193 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *,
194     struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *);
195 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *);
196 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *,
197     scsi_vhci_lun_t *, char *, char *);
198 
199 static char *vhci_devnm_to_guid(char *);
200 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *,
201     int, int (*func)(caddr_t));
202 static void vhci_intr(struct scsi_pkt *);
203 static int vhci_do_prout(scsi_vhci_priv_t *);
204 static void vhci_run_cmd(void *);
205 static int vhci_do_prin(struct vhci_pkt **);
206 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *);
207 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *);
208 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *);
209 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *);
210 static void vhci_lun_free(dev_info_t *);
211 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *,
212     uint8_t, uint8_t);
213 void vhci_update_pathstates(void *);
214 
215 #ifdef DEBUG
216 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int);
217 static void vhci_print_cdb(dev_info_t *dip, uint_t level,
218     char *title, uchar_t *cdb);
219 static void vhci_clean_print(dev_info_t *dev, uint_t level,
220     char *title, uchar_t *data, int len);
221 #endif
222 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *);
223 static void vhci_uscsi_iodone(struct scsi_pkt *pkt);
224 static void vhci_invalidate_mpapi_lu(struct scsi_vhci *, scsi_vhci_lun_t *);
225 
226 /*
227  * MP-API related functions
228  */
229 extern int vhci_mpapi_init(struct scsi_vhci *);
230 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *);
231 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
232 extern void vhci_update_mpapi_data(struct scsi_vhci *,
233     scsi_vhci_lun_t *, mdi_pathinfo_t *);
234 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *,
235     uint8_t, void*);
236 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int);
237 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *,
238     scsi_vhci_lun_t *);
239 
240 #define	VHCI_DMA_MAX_XFER_CAP	INT_MAX
241 
242 #define	VHCI_MAX_PGR_RETRIES	3
243 
244 /*
245  * Macros for the device-type mpxio options
246  */
247 #define	LOAD_BALANCE_OPTIONS		"load-balance-options"
248 #define	LOGICAL_BLOCK_REGION_SIZE	"region-size"
249 #define	MPXIO_OPTIONS_LIST		"device-type-mpxio-options-list"
250 #define	DEVICE_TYPE_STR			"device-type"
251 #define	isdigit(ch)			((ch) >= '0' && (ch) <= '9')
252 
253 static struct cb_ops vhci_cb_ops = {
254 	vhci_open,			/* open */
255 	vhci_close,			/* close */
256 	nodev,				/* strategy */
257 	nodev,				/* print */
258 	nodev,				/* dump */
259 	nodev,				/* read */
260 	nodev,				/* write */
261 	vhci_ioctl,			/* ioctl */
262 	nodev,				/* devmap */
263 	nodev,				/* mmap */
264 	nodev,				/* segmap */
265 	nochpoll,			/* chpoll */
266 	ddi_prop_op,			/* cb_prop_op */
267 	0,				/* streamtab */
268 	D_NEW | D_MP,			/* cb_flag */
269 	CB_REV,				/* rev */
270 	nodev,				/* aread */
271 	nodev				/* awrite */
272 };
273 
274 static struct dev_ops vhci_ops = {
275 	DEVO_REV,
276 	0,
277 	vhci_getinfo,
278 	nulldev,		/* identify */
279 	nulldev,		/* probe */
280 	vhci_attach,		/* attach and detach are mandatory */
281 	vhci_detach,
282 	nodev,			/* reset */
283 	&vhci_cb_ops,		/* cb_ops */
284 	NULL,			/* bus_ops */
285 	NULL,			/* power */
286 	ddi_quiesce_not_needed,	/* quiesce */
287 };
288 
289 extern struct mod_ops mod_driverops;
290 
291 static struct modldrv modldrv = {
292 	&mod_driverops,
293 	vhci_version_name,	/* module name */
294 	&vhci_ops
295 };
296 
297 static struct modlinkage modlinkage = {
298 	MODREV_1,
299 	&modldrv,
300 	NULL
301 };
302 
303 static mdi_vhci_ops_t vhci_opinfo = {
304 	MDI_VHCI_OPS_REV,
305 	vhci_pathinfo_init,		/* Pathinfo node init callback */
306 	vhci_pathinfo_uninit,		/* Pathinfo uninit callback */
307 	vhci_pathinfo_state_change,	/* Pathinfo node state change */
308 	vhci_failover,			/* failover callback */
309 	vhci_client_attached,		/* client attached callback	*/
310 	vhci_is_dev_supported		/* is device supported by mdi */
311 };
312 
313 /*
314  * The scsi_failover table defines an ordered set of 'fops' modules supported
315  * by scsi_vhci.  Currently, initialize this table from the 'ddi-forceload'
316  * property specified in scsi_vhci.conf.
317  */
318 static struct scsi_failover {
319 	ddi_modhandle_t			sf_mod;
320 	struct scsi_failover_ops	*sf_sfo;
321 } *scsi_failover_table;
322 static uint_t	scsi_nfailover;
323 
324 int
325 _init(void)
326 {
327 	int	rval;
328 
329 	/*
330 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
331 	 * before registering with the transport first.
332 	 */
333 	if ((rval = ddi_soft_state_init(&vhci_softstate,
334 	    sizeof (struct scsi_vhci), 1)) != 0) {
335 		VHCI_DEBUG(1, (CE_NOTE, NULL,
336 		    "!_init:soft state init failed\n"));
337 		return (rval);
338 	}
339 
340 	if ((rval = scsi_hba_init(&modlinkage)) != 0) {
341 		VHCI_DEBUG(1, (CE_NOTE, NULL,
342 		    "!_init: scsi hba init failed\n"));
343 		ddi_soft_state_fini(&vhci_softstate);
344 		return (rval);
345 	}
346 
347 	mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL);
348 	cv_init(&vhci_cv, NULL, CV_DRIVER, NULL);
349 
350 	mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL);
351 	vhci_targetmap_byport = mod_hash_create_strhash(
352 	    "vhci_targetmap_byport", 256, mod_hash_null_valdtor);
353 	vhci_targetmap_bypid = mod_hash_create_idhash(
354 	    "vhci_targetmap_bypid", 256, mod_hash_null_valdtor);
355 
356 	if ((rval = mod_install(&modlinkage)) != 0) {
357 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n"));
358 		if (vhci_targetmap_bypid)
359 			mod_hash_destroy_idhash(vhci_targetmap_bypid);
360 		if (vhci_targetmap_byport)
361 			mod_hash_destroy_strhash(vhci_targetmap_byport);
362 		mutex_destroy(&vhci_targetmap_mutex);
363 		cv_destroy(&vhci_cv);
364 		mutex_destroy(&vhci_global_mutex);
365 		scsi_hba_fini(&modlinkage);
366 		ddi_soft_state_fini(&vhci_softstate);
367 	}
368 	return (rval);
369 }
370 
371 
372 /*
373  * the system is done with us as a driver, so clean up
374  */
375 int
376 _fini(void)
377 {
378 	int rval;
379 
380 	/*
381 	 * don't start cleaning up until we know that the module remove
382 	 * has worked  -- if this works, then we know that each instance
383 	 * has successfully been DDI_DETACHed
384 	 */
385 	if ((rval = mod_remove(&modlinkage)) != 0) {
386 		VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n"));
387 		return (rval);
388 	}
389 
390 	if (vhci_targetmap_bypid)
391 		mod_hash_destroy_idhash(vhci_targetmap_bypid);
392 	if (vhci_targetmap_byport)
393 		mod_hash_destroy_strhash(vhci_targetmap_byport);
394 	mutex_destroy(&vhci_targetmap_mutex);
395 	cv_destroy(&vhci_cv);
396 	mutex_destroy(&vhci_global_mutex);
397 	scsi_hba_fini(&modlinkage);
398 	ddi_soft_state_fini(&vhci_softstate);
399 
400 	return (rval);
401 }
402 
403 int
404 _info(struct modinfo *modinfop)
405 {
406 	return (mod_info(&modlinkage, modinfop));
407 }
408 
409 /*
410  * Lookup scsi_failover by "short name" of failover module.
411  */
412 struct scsi_failover_ops *
413 vhci_failover_ops_by_name(char *name)
414 {
415 	struct scsi_failover	*sf;
416 
417 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
418 		if (sf->sf_sfo == NULL)
419 			continue;
420 		if (strcmp(sf->sf_sfo->sfo_name, name) == 0)
421 			return (sf->sf_sfo);
422 	}
423 	return (NULL);
424 }
425 
426 /*
427  * Load all scsi_failover_ops 'fops' modules.
428  */
429 static void
430 vhci_failover_modopen(struct scsi_vhci *vhci)
431 {
432 	char			**module;
433 	int			i;
434 	struct scsi_failover	*sf;
435 	char			**dt;
436 	int			e;
437 
438 	if (scsi_failover_table)
439 		return;
440 
441 	/* Get the list of modules from scsi_vhci.conf */
442 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY,
443 	    vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload",
444 	    &module, &scsi_nfailover) != DDI_PROP_SUCCESS) {
445 		cmn_err(CE_WARN, "scsi_vhci: "
446 		    "scsi_vhci.conf is missing 'ddi-forceload'");
447 		return;
448 	}
449 	if (scsi_nfailover == 0) {
450 		cmn_err(CE_WARN, "scsi_vhci: "
451 		    "scsi_vhci.conf has empty 'ddi-forceload'");
452 		ddi_prop_free(module);
453 		return;
454 	}
455 
456 	/* allocate failover table based on number of modules */
457 	scsi_failover_table = (struct scsi_failover *)
458 	    kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1),
459 	    KM_SLEEP);
460 
461 	/* loop over modules specified in scsi_vhci.conf and open each module */
462 	for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) {
463 		if (module[i] == NULL)
464 			continue;
465 
466 		sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e);
467 		if (sf->sf_mod == NULL) {
468 			/*
469 			 * A module returns EEXIST if other software is
470 			 * supporting the intended function: for example
471 			 * the scsi_vhci_f_sum_emc module returns EEXIST
472 			 * from _init if EMC powerpath software is installed.
473 			 */
474 			if (e != EEXIST)
475 				cmn_err(CE_WARN, "scsi_vhci: unable to open "
476 				    "module '%s', error %d", module[i], e);
477 			continue;
478 		}
479 		sf->sf_sfo = ddi_modsym(sf->sf_mod,
480 		    "scsi_vhci_failover_ops", &e);
481 		if (sf->sf_sfo == NULL) {
482 			cmn_err(CE_WARN, "scsi_vhci: "
483 			    "unable to import 'scsi_failover_ops' from '%s', "
484 			    "error %d", module[i], e);
485 			(void) ddi_modclose(sf->sf_mod);
486 			sf->sf_mod = NULL;
487 			continue;
488 		}
489 
490 		/* register vid/pid of devices supported with mpapi */
491 		for (dt = sf->sf_sfo->sfo_devices; *dt; dt++)
492 			vhci_mpapi_add_dev_prod(vhci, *dt);
493 		sf++;
494 	}
495 
496 	/* verify that at least the "well-known" modules were there */
497 	if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL)
498 		cmn_err(CE_WARN, "scsi_vhci: well-known module \""
499 		    SFO_NAME_SYM "\" not defined in scsi_vhci.conf's "
500 		    "'ddi-forceload'");
501 	if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL)
502 		cmn_err(CE_WARN, "scsi_vhci: well-known module \""
503 		    SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's "
504 		    "'ddi-forceload'");
505 
506 	/* call sfo_init for modules that need it */
507 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
508 		if (sf->sf_sfo && sf->sf_sfo->sfo_init)
509 			sf->sf_sfo->sfo_init();
510 	}
511 
512 	ddi_prop_free(module);
513 }
514 
515 /*
516  * unload all loaded scsi_failover_ops modules
517  */
518 static void
519 vhci_failover_modclose()
520 {
521 	struct scsi_failover	*sf;
522 
523 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
524 		if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL))
525 			continue;
526 		(void) ddi_modclose(sf->sf_mod);
527 		sf->sf_mod = NULL;
528 		sf->sf_sfo = NULL;
529 	}
530 
531 	if (scsi_failover_table && scsi_nfailover)
532 		kmem_free(scsi_failover_table,
533 		    sizeof (struct scsi_failover) * (scsi_nfailover + 1));
534 	scsi_failover_table = NULL;
535 	scsi_nfailover = 0;
536 }
537 
538 /* ARGSUSED */
539 static int
540 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp)
541 {
542 	struct scsi_vhci	*vhci;
543 
544 	if (otype != OTYP_CHR) {
545 		return (EINVAL);
546 	}
547 
548 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp)));
549 	if (vhci == NULL) {
550 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n"));
551 		return (ENXIO);
552 	}
553 
554 	mutex_enter(&vhci->vhci_mutex);
555 	if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) {
556 		mutex_exit(&vhci->vhci_mutex);
557 		vhci_log(CE_NOTE, vhci->vhci_dip,
558 		    "!vhci%d: Already open\n", getminor(*devp));
559 		return (EBUSY);
560 	}
561 
562 	vhci->vhci_state |= VHCI_STATE_OPEN;
563 	mutex_exit(&vhci->vhci_mutex);
564 	return (0);
565 }
566 
567 
568 /* ARGSUSED */
569 static int
570 vhci_close(dev_t dev, int flag, int otype, cred_t *credp)
571 {
572 	struct scsi_vhci	*vhci;
573 
574 	if (otype != OTYP_CHR) {
575 		return (EINVAL);
576 	}
577 
578 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
579 	if (vhci == NULL) {
580 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n"));
581 		return (ENXIO);
582 	}
583 
584 	mutex_enter(&vhci->vhci_mutex);
585 	vhci->vhci_state &= ~VHCI_STATE_OPEN;
586 	mutex_exit(&vhci->vhci_mutex);
587 
588 	return (0);
589 }
590 
591 /* ARGSUSED */
592 static int
593 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
594 	cred_t *credp, int *rval)
595 {
596 	if (IS_DEVCTL(cmd)) {
597 		return (vhci_devctl(dev, cmd, data, mode, credp, rval));
598 	} else if (cmd == MP_CMD) {
599 		return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval));
600 	} else {
601 		return (vhci_ctl(dev, cmd, data, mode, credp, rval));
602 	}
603 }
604 
605 /*
606  * attach the module
607  */
608 static int
609 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
610 {
611 	int			rval = DDI_FAILURE;
612 	int			scsi_hba_attached = 0;
613 	int			vhci_attached = 0;
614 	int			mutex_initted = 0;
615 	int			instance;
616 	struct scsi_vhci	*vhci;
617 	scsi_hba_tran_t		*tran;
618 	char			cache_name_buf[64];
619 	char			*data;
620 
621 	VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd));
622 
623 	instance = ddi_get_instance(dip);
624 
625 	switch (cmd) {
626 	case DDI_ATTACH:
627 		break;
628 
629 	case DDI_RESUME:
630 	case DDI_PM_RESUME:
631 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet"
632 		    "implemented\n"));
633 		return (rval);
634 
635 	default:
636 		VHCI_DEBUG(1, (CE_NOTE, NULL,
637 		    "!vhci_attach: unknown ddi command\n"));
638 		return (rval);
639 	}
640 
641 	/*
642 	 * Allocate vhci data structure.
643 	 */
644 	if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) {
645 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
646 		    "soft state alloc failed\n"));
647 		return (DDI_FAILURE);
648 	}
649 
650 	if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) {
651 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
652 		    "bad soft state\n"));
653 		ddi_soft_state_free(vhci_softstate, instance);
654 		return (DDI_FAILURE);
655 	}
656 
657 	/* Allocate packet cache */
658 	(void) snprintf(cache_name_buf, sizeof (cache_name_buf),
659 	    "vhci%d_cache", instance);
660 
661 	mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL);
662 	mutex_initted++;
663 
664 	/*
665 	 * Allocate a transport structure
666 	 */
667 	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
668 	ASSERT(tran != NULL);
669 
670 	vhci->vhci_tran		= tran;
671 	vhci->vhci_dip		= dip;
672 	vhci->vhci_instance	= instance;
673 
674 	tran->tran_hba_private	= vhci;
675 	tran->tran_tgt_init	= vhci_scsi_tgt_init;
676 	tran->tran_tgt_probe	= NULL;
677 	tran->tran_tgt_free	= vhci_scsi_tgt_free;
678 
679 	tran->tran_start	= vhci_scsi_start;
680 	tran->tran_abort	= vhci_scsi_abort;
681 	tran->tran_reset	= vhci_scsi_reset;
682 	tran->tran_getcap	= vhci_scsi_getcap;
683 	tran->tran_setcap	= vhci_scsi_setcap;
684 	tran->tran_init_pkt	= vhci_scsi_init_pkt;
685 	tran->tran_destroy_pkt	= vhci_scsi_destroy_pkt;
686 	tran->tran_dmafree	= vhci_scsi_dmafree;
687 	tran->tran_sync_pkt	= vhci_scsi_sync_pkt;
688 	tran->tran_reset_notify = vhci_scsi_reset_notify;
689 
690 	tran->tran_get_bus_addr	= vhci_scsi_get_bus_addr;
691 	tran->tran_get_name	= vhci_scsi_get_name;
692 	tran->tran_bus_reset	= NULL;
693 	tran->tran_quiesce	= NULL;
694 	tran->tran_unquiesce	= NULL;
695 
696 	/*
697 	 * register event notification routines with scsa
698 	 */
699 	tran->tran_get_eventcookie = NULL;
700 	tran->tran_add_eventcall = NULL;
701 	tran->tran_remove_eventcall = NULL;
702 	tran->tran_post_event	= NULL;
703 
704 	tran->tran_bus_power	= vhci_scsi_bus_power;
705 
706 	tran->tran_bus_config	= vhci_scsi_bus_config;
707 	tran->tran_bus_unconfig	= vhci_scsi_bus_unconfig;
708 
709 	/*
710 	 * Attach this instance with the mpxio framework
711 	 */
712 	if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0)
713 	    != MDI_SUCCESS) {
714 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
715 		    "mdi_vhci_register failed\n"));
716 		goto attach_fail;
717 	}
718 	vhci_attached++;
719 
720 	/*
721 	 * Attach this instance of the hba.
722 	 *
723 	 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA
724 	 * driver, it has nothing to do with DMA. However, when calling
725 	 * scsi_hba_attach_setup() we need to pass something valid in the
726 	 * dma attributes parameter. So we just use scsi_alloc_attr.
727 	 * SCSA itself seems to care only for dma_attr_minxfer and
728 	 * dma_attr_burstsizes fields of dma attributes structure.
729 	 * It expects those fileds to be non-zero.
730 	 */
731 	if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran,
732 	    SCSI_HBA_ADDR_COMPLEX) != DDI_SUCCESS) {
733 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
734 		    "hba attach failed\n"));
735 		goto attach_fail;
736 	}
737 	scsi_hba_attached++;
738 
739 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
740 	    INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
741 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
742 		    " ddi_create_minor_node failed\n"));
743 		goto attach_fail;
744 	}
745 
746 	/*
747 	 * Set pm-want-child-notification property for
748 	 * power management of the phci and client
749 	 */
750 	if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
751 	    "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) {
752 		cmn_err(CE_WARN,
753 		    "%s%d fail to create pm-want-child-notification? prop",
754 		    ddi_driver_name(dip), ddi_get_instance(dip));
755 		goto attach_fail;
756 	}
757 
758 	vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0);
759 	vhci->vhci_update_pathstates_taskq =
760 	    taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ,
761 	    MINCLSYSPRI, 1, 4, 0);
762 	ASSERT(vhci->vhci_taskq);
763 	ASSERT(vhci->vhci_update_pathstates_taskq);
764 
765 	/*
766 	 * Set appropriate configuration flags based on options set in
767 	 * conf file.
768 	 */
769 	vhci->vhci_conf_flags = 0;
770 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS,
771 	    "auto-failback", &data) == DDI_SUCCESS) {
772 		if (strcmp(data, "enable") == 0)
773 			vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK;
774 		ddi_prop_free(data);
775 	}
776 
777 	if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK))
778 		vhci_log(CE_NOTE, dip, "!Auto-failback capability "
779 		    "disabled through scsi_vhci.conf file.");
780 
781 	/*
782 	 * Allocate an mpapi private structure
783 	 */
784 	vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP);
785 	if (vhci_mpapi_init(vhci) != 0) {
786 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: "
787 		    "vhci_mpapi_init() failed"));
788 	}
789 
790 	vhci_failover_modopen(vhci);		/* load failover modules */
791 
792 	ddi_report_dev(dip);
793 	return (DDI_SUCCESS);
794 
795 attach_fail:
796 	if (vhci_attached)
797 		(void) mdi_vhci_unregister(dip, 0);
798 
799 	if (scsi_hba_attached)
800 		(void) scsi_hba_detach(dip);
801 
802 	if (vhci->vhci_tran)
803 		scsi_hba_tran_free(vhci->vhci_tran);
804 
805 	if (mutex_initted) {
806 		mutex_destroy(&vhci->vhci_mutex);
807 	}
808 
809 	ddi_soft_state_free(vhci_softstate, instance);
810 	return (DDI_FAILURE);
811 }
812 
813 
814 /*ARGSUSED*/
815 static int
816 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
817 {
818 	int			instance = ddi_get_instance(dip);
819 	scsi_hba_tran_t		*tran;
820 	struct scsi_vhci	*vhci;
821 
822 	VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd));
823 
824 	if ((tran = ddi_get_driver_private(dip)) == NULL)
825 		return (DDI_FAILURE);
826 
827 	vhci = TRAN2HBAPRIVATE(tran);
828 	if (!vhci) {
829 		return (DDI_FAILURE);
830 	}
831 
832 	switch (cmd) {
833 	case DDI_DETACH:
834 		break;
835 
836 	case DDI_SUSPEND:
837 	case DDI_PM_SUSPEND:
838 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet"
839 		    "implemented\n"));
840 		return (DDI_FAILURE);
841 
842 	default:
843 		VHCI_DEBUG(1, (CE_NOTE, NULL,
844 		    "!vhci_detach: unknown ddi command\n"));
845 		return (DDI_FAILURE);
846 	}
847 
848 	(void) mdi_vhci_unregister(dip, 0);
849 	(void) scsi_hba_detach(dip);
850 	scsi_hba_tran_free(tran);
851 
852 	if (ddi_prop_remove(DDI_DEV_T_NONE, dip,
853 	    "pm-want-child-notification?") != DDI_PROP_SUCCESS) {
854 		cmn_err(CE_WARN,
855 		    "%s%d unable to remove prop pm-want_child_notification?",
856 		    ddi_driver_name(dip), ddi_get_instance(dip));
857 	}
858 	if (vhci_restart_timeid != 0) {
859 		(void) untimeout(vhci_restart_timeid);
860 	}
861 	vhci_restart_timeid = 0;
862 
863 	mutex_destroy(&vhci->vhci_mutex);
864 	vhci->vhci_dip = NULL;
865 	vhci->vhci_tran = NULL;
866 	taskq_destroy(vhci->vhci_taskq);
867 	taskq_destroy(vhci->vhci_update_pathstates_taskq);
868 	ddi_remove_minor_node(dip, NULL);
869 	ddi_soft_state_free(vhci_softstate, instance);
870 
871 	vhci_failover_modclose();		/* unload failover modules */
872 	return (DDI_SUCCESS);
873 }
874 
875 /*
876  * vhci_getinfo()
877  * Given the device number, return the devinfo pointer or the
878  * instance number.
879  * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach.
880  */
881 
882 /*ARGSUSED*/
883 static int
884 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
885 {
886 	struct scsi_vhci	*vhcip;
887 	int			instance = MINOR2INST(getminor((dev_t)arg));
888 
889 	switch (cmd) {
890 	case DDI_INFO_DEVT2DEVINFO:
891 		vhcip = ddi_get_soft_state(vhci_softstate, instance);
892 		if (vhcip != NULL)
893 			*result = vhcip->vhci_dip;
894 		else {
895 			*result = NULL;
896 			return (DDI_FAILURE);
897 		}
898 		break;
899 
900 	case DDI_INFO_DEVT2INSTANCE:
901 		*result = (void *)(uintptr_t)instance;
902 		break;
903 
904 	default:
905 		return (DDI_FAILURE);
906 	}
907 
908 	return (DDI_SUCCESS);
909 }
910 
911 /*ARGSUSED*/
912 static int
913 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
914 	scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
915 {
916 	char			*guid;
917 	scsi_vhci_lun_t		*vlun;
918 	struct scsi_vhci	*vhci;
919 	clock_t			from_ticks;
920 	mdi_pathinfo_t		*pip;
921 	int			rval;
922 
923 	ASSERT(hba_dip != NULL);
924 	ASSERT(tgt_dip != NULL);
925 
926 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
927 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
928 		/*
929 		 * This must be the .conf node without GUID property.
930 		 * The node under fp already inserts a delay, so we
931 		 * just return from here. We rely on this delay to have
932 		 * all dips be posted to the ndi hotplug thread's newdev
933 		 * list. This is necessary for the deferred attach
934 		 * mechanism to work and opens() done soon after boot to
935 		 * succeed.
936 		 */
937 		VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid "
938 		    "property failed"));
939 		return (DDI_NOT_WELL_FORMED);
940 	}
941 
942 	if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
943 		/*
944 		 * This must be .conf node with the GUID property. We don't
945 		 * merge property by ndi_merge_node() here  because the
946 		 * devi_addr_buf of .conf node is "" always according the
947 		 * implementation of vhci_scsi_get_name_bus_addr().
948 		 */
949 		ddi_set_name_addr(tgt_dip, NULL);
950 		return (DDI_FAILURE);
951 	}
952 
953 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip));
954 	ASSERT(vhci != NULL);
955 
956 	VHCI_DEBUG(4, (CE_NOTE, hba_dip,
957 	    "!tgt_init: called for %s (instance %d)\n",
958 	    ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip)));
959 
960 	vlun = vhci_lun_lookup(tgt_dip);
961 
962 	mutex_enter(&vhci_global_mutex);
963 
964 	from_ticks = ddi_get_lbolt();
965 	if (vhci_to_ticks == 0) {
966 		vhci_to_ticks = from_ticks +
967 		    drv_usectohz(vhci_init_wait_timeout);
968 	}
969 
970 #if DEBUG
971 	if (vlun) {
972 		VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
973 		    "vhci_scsi_tgt_init: guid %s : found vlun 0x%p "
974 		    "from_ticks %lx to_ticks %lx",
975 		    guid, (void *)vlun, from_ticks, vhci_to_ticks));
976 	} else {
977 		VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
978 		    "vhci_scsi_tgt_init: guid %s : vlun not found "
979 		    "from_ticks %lx to_ticks %lx", guid, from_ticks,
980 		    vhci_to_ticks));
981 	}
982 #endif
983 
984 	rval = mdi_select_path(tgt_dip, NULL,
985 	    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip);
986 	if (rval == MDI_SUCCESS) {
987 		mdi_rele_path(pip);
988 	}
989 
990 	/*
991 	 * Wait for the following conditions :
992 	 *	1. no vlun available yet
993 	 *	2. no path established
994 	 *	3. timer did not expire
995 	 */
996 	while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) ||
997 	    (rval != MDI_SUCCESS)) {
998 		if (vlun && vlun->svl_not_supported) {
999 			VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
1000 			    "vlun 0x%p lun guid %s not supported!",
1001 			    (void *)vlun, guid));
1002 			mutex_exit(&vhci_global_mutex);
1003 			ddi_prop_free(guid);
1004 			return (DDI_NOT_WELL_FORMED);
1005 		}
1006 		if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) {
1007 			vhci_first_time = 1;
1008 		}
1009 		if (vhci_first_time == 1) {
1010 			VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: "
1011 			    "no wait for %s. from_tick %lx, to_tick %lx",
1012 			    guid, from_ticks, vhci_to_ticks));
1013 			mutex_exit(&vhci_global_mutex);
1014 			ddi_prop_free(guid);
1015 			return (DDI_NOT_WELL_FORMED);
1016 		}
1017 
1018 		if (cv_timedwait(&vhci_cv,
1019 		    &vhci_global_mutex, vhci_to_ticks) == -1) {
1020 			/* Timed out */
1021 #ifdef DEBUG
1022 			if (vlun == NULL) {
1023 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1024 				    "tgt_init: no vlun for %s!", guid));
1025 			} else if (mdi_client_get_path_count(tgt_dip) == 0) {
1026 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1027 				    "tgt_init: client path count is "
1028 				    "zero for %s!", guid));
1029 			} else {
1030 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1031 				    "tgt_init: client path not "
1032 				    "available yet for %s!", guid));
1033 			}
1034 #endif /* DEBUG */
1035 			mutex_exit(&vhci_global_mutex);
1036 			ddi_prop_free(guid);
1037 			return (DDI_NOT_WELL_FORMED);
1038 		}
1039 		vlun = vhci_lun_lookup(tgt_dip);
1040 		rval = mdi_select_path(tgt_dip, NULL,
1041 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
1042 		    NULL, &pip);
1043 		if (rval == MDI_SUCCESS) {
1044 			mdi_rele_path(pip);
1045 		}
1046 		from_ticks = ddi_get_lbolt();
1047 	}
1048 	mutex_exit(&vhci_global_mutex);
1049 
1050 	ASSERT(vlun != NULL);
1051 	ddi_prop_free(guid);
1052 
1053 	scsi_device_hba_private_set(sd, vlun);
1054 
1055 	return (DDI_SUCCESS);
1056 }
1057 
1058 /*ARGSUSED*/
1059 static void
1060 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1061 	scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1062 {
1063 }
1064 
1065 /*
1066  * a PGR register command has started; copy the info we need
1067  */
1068 int
1069 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt)
1070 {
1071 	struct vhci_pkt		*vpkt = TGTPKT2VHCIPKT(pkt);
1072 	void			*addr;
1073 
1074 	if (!vpkt->vpkt_tgt_init_bp)
1075 		return (TRAN_BADPKT);
1076 
1077 	addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp,
1078 	    (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
1079 	if (addr == NULL)
1080 		return (TRAN_BUSY);
1081 
1082 	mutex_enter(&vlun->svl_mutex);
1083 
1084 	vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:");
1085 
1086 	bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) -
1087 	    (2 * MHIOC_RESV_KEY_SIZE*sizeof (char)));
1088 	bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb));
1089 
1090 	vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:");
1091 
1092 	vlun->svl_time = pkt->pkt_time;
1093 	vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount;
1094 	vlun->svl_first_path = vpkt->vpkt_path;
1095 	mutex_exit(&vlun->svl_mutex);
1096 	return (0);
1097 }
1098 
1099 /*
1100  * Function name : vhci_scsi_start()
1101  *
1102  * Return Values : TRAN_FATAL_ERROR	- vhci has been shutdown
1103  *					  or other fatal failure
1104  *					  preventing packet transportation
1105  *		   TRAN_BUSY		- request queue is full
1106  *		   TRAN_ACCEPT		- pkt has been submitted to phci
1107  *					  (or is held in the waitQ)
1108  * Description	 : Implements SCSA's tran_start() entry point for
1109  *		   packet transport
1110  *
1111  */
1112 static int
1113 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1114 {
1115 	int			rval = TRAN_ACCEPT;
1116 	int			instance, held;
1117 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
1118 	struct scsi_vhci_lun	*vlun = ADDR2VLUN(ap);
1119 	struct vhci_pkt		*vpkt = TGTPKT2VHCIPKT(pkt);
1120 	int			flags = 0;
1121 	scsi_vhci_priv_t	*svp;
1122 	dev_info_t 		*cdip;
1123 	client_lb_t		lbp;
1124 	int			restore_lbp = 0;
1125 	/* set if pkt is SCSI-II RESERVE cmd */
1126 	int			pkt_reserve_cmd = 0;
1127 	int			reserve_failed = 0;
1128 
1129 	ASSERT(vhci != NULL);
1130 	ASSERT(vpkt != NULL);
1131 	ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
1132 	cdip = ADDR2DIP(ap);
1133 
1134 	/*
1135 	 * Block IOs if LUN is held or QUIESCED for IOs.
1136 	 */
1137 	if ((VHCI_LUN_IS_HELD(vlun)) ||
1138 	    ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1139 		return (TRAN_BUSY);
1140 	}
1141 
1142 	/*
1143 	 * vhci_lun needs to be quiesced before SCSI-II RESERVE command
1144 	 * can be issued.  This may require a cv_timedwait, which is
1145 	 * dangerous to perform in an interrupt context.  So if this
1146 	 * is a RESERVE command a taskq is dispatched to service it.
1147 	 * This taskq shall again call vhci_scsi_start, but we shall be
1148 	 * sure its not in an interrupt context.
1149 	 */
1150 	if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
1151 	    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
1152 		if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) {
1153 			if (taskq_dispatch(vhci->vhci_taskq,
1154 			    vhci_dispatch_scsi_start, (void *) vpkt,
1155 			    KM_NOSLEEP)) {
1156 				return (TRAN_ACCEPT);
1157 			} else {
1158 				return (TRAN_BUSY);
1159 			}
1160 		}
1161 
1162 		/*
1163 		 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't
1164 		 * get serviced for a lun.
1165 		 */
1166 		VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
1167 		if (!held) {
1168 			return (TRAN_BUSY);
1169 		} else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) ==
1170 		    VLUN_QUIESCED_FLG) {
1171 			VHCI_RELEASE_LUN(vlun);
1172 			return (TRAN_BUSY);
1173 		}
1174 
1175 		/*
1176 		 * To ensure that no IOs occur for this LUN for the duration
1177 		 * of this pkt set the VLUN_QUIESCED_FLG.
1178 		 * In case this routine needs to exit on error make sure that
1179 		 * this flag is cleared.
1180 		 */
1181 		vlun->svl_flags |= VLUN_QUIESCED_FLG;
1182 		pkt_reserve_cmd = 1;
1183 
1184 		/*
1185 		 * if this is a SCSI-II RESERVE command, set load balancing
1186 		 * policy to be ALTERNATE PATH to ensure that all subsequent
1187 		 * IOs are routed on the same path.  This is because if commands
1188 		 * are routed across multiple paths then IOs on paths other than
1189 		 * the one on which the RESERVE was executed will get a
1190 		 * RESERVATION CONFLICT
1191 		 */
1192 		lbp = mdi_get_lb_policy(cdip);
1193 		if (lbp != LOAD_BALANCE_NONE) {
1194 			if (vhci_quiesce_lun(vlun) != 1) {
1195 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1196 				VHCI_RELEASE_LUN(vlun);
1197 				return (TRAN_FATAL_ERROR);
1198 			}
1199 			vlun->svl_lb_policy_save = lbp;
1200 			if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) !=
1201 			    MDI_SUCCESS) {
1202 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1203 				VHCI_RELEASE_LUN(vlun);
1204 				return (TRAN_FATAL_ERROR);
1205 			}
1206 			restore_lbp = 1;
1207 		}
1208 		/*
1209 		 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h
1210 		 * To narrow this window where a reserve command may be sent
1211 		 * down an inactive path the path states first need to be
1212 		 * updated. Before calling vhci_update_pathstates reset
1213 		 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set
1214 		 * for this lun.  This shall prevent an unnecessary reset
1215 		 * from being sent out.
1216 		 */
1217 		vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
1218 		vhci_update_pathstates((void *)vlun);
1219 	}
1220 
1221 	instance = ddi_get_instance(vhci->vhci_dip);
1222 
1223 	/*
1224 	 * If the command is PRIN with action of zero, then the cmd
1225 	 * is reading PR keys which requires filtering on completion.
1226 	 * Data cache sync must be guaranteed.
1227 	 */
1228 	if ((pkt->pkt_cdbp[0] == SCMD_PRIN) && (pkt->pkt_cdbp[1] == 0) &&
1229 	    (vpkt->vpkt_org_vpkt == NULL)) {
1230 		vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT;
1231 	}
1232 
1233 	/*
1234 	 * Do not defer bind for PKT_DMA_PARTIAL
1235 	 */
1236 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1237 
1238 		/* This is a non pkt_dma_partial case */
1239 		if ((rval = vhci_bind_transport(
1240 		    ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC))
1241 		    != TRAN_ACCEPT) {
1242 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1243 			    "!vhci%d %x: failed to bind transport: "
1244 			    "vlun 0x%p pkt_reserved %x restore_lbp %x,"
1245 			    "lbp %x", instance, rval, (void *)vlun,
1246 			    pkt_reserve_cmd, restore_lbp, lbp));
1247 			if (restore_lbp)
1248 				(void) mdi_set_lb_policy(cdip, lbp);
1249 			if (pkt_reserve_cmd)
1250 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1251 			return (rval);
1252 		}
1253 		VHCI_DEBUG(8, (CE_NOTE, NULL,
1254 		    "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt));
1255 	}
1256 	ASSERT(vpkt->vpkt_hba_pkt != NULL);
1257 	ASSERT(vpkt->vpkt_path != NULL);
1258 
1259 	/*
1260 	 * This is the chance to adjust the pHCI's pkt and other information
1261 	 * from target driver's pkt.
1262 	 */
1263 	VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n",
1264 	    (void *)vpkt));
1265 	vhci_update_pHCI_pkt(vpkt, pkt);
1266 
1267 	if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1268 		if (vpkt->vpkt_path != vlun->svl_resrv_pip) {
1269 			VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1270 			    "!vhci_bind: reserve flag set for vlun 0x%p, but, "
1271 			    "pktpath 0x%p resrv path 0x%p differ. lb_policy %x",
1272 			    (void *)vlun, (void *)vpkt->vpkt_path,
1273 			    (void *)vlun->svl_resrv_pip,
1274 			    mdi_get_lb_policy(cdip)));
1275 			reserve_failed = 1;
1276 		}
1277 	}
1278 
1279 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
1280 	if (svp == NULL || reserve_failed) {
1281 		if (pkt_reserve_cmd) {
1282 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1283 			    "!vhci_bind returned null svp vlun 0x%p",
1284 			    (void *)vlun));
1285 			vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1286 			if (restore_lbp)
1287 				(void) mdi_set_lb_policy(cdip, lbp);
1288 		}
1289 pkt_cleanup:
1290 		if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1291 			scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1292 			vpkt->vpkt_hba_pkt = NULL;
1293 			if (vpkt->vpkt_path) {
1294 				mdi_rele_path(vpkt->vpkt_path);
1295 				vpkt->vpkt_path = NULL;
1296 			}
1297 		}
1298 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1299 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1300 		    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1301 			sema_v(&vlun->svl_pgr_sema);
1302 		}
1303 		return (TRAN_BUSY);
1304 	}
1305 
1306 	VHCI_INCR_PATH_CMDCOUNT(svp);
1307 
1308 	/*
1309 	 * Ensure that no other IOs raced ahead, while a RESERVE cmd was
1310 	 * QUIESCING the same lun.
1311 	 */
1312 	if ((!pkt_reserve_cmd) &&
1313 	    ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1314 		VHCI_DECR_PATH_CMDCOUNT(svp);
1315 		goto pkt_cleanup;
1316 	}
1317 
1318 	if ((pkt->pkt_cdbp[0] == SCMD_PRIN) ||
1319 	    (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1320 		/*
1321 		 * currently this thread only handles running PGR
1322 		 * commands, so don't bother creating it unless
1323 		 * something interesting is going to happen (like
1324 		 * either a PGR out, or a PGR in with enough space
1325 		 * to hold the keys that are getting returned)
1326 		 */
1327 		mutex_enter(&vlun->svl_mutex);
1328 		if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) &&
1329 		    (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1330 			vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon",
1331 			    1, MINCLSYSPRI, 1, 4, 0);
1332 			vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG;
1333 		}
1334 		mutex_exit(&vlun->svl_mutex);
1335 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1336 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1337 		    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1338 			if (rval = vhci_pgr_register_start(vlun, pkt)) {
1339 				/* an error */
1340 				sema_v(&vlun->svl_pgr_sema);
1341 				return (rval);
1342 			}
1343 		}
1344 	}
1345 
1346 	/*
1347 	 * SCSI-II RESERVE cmd is not expected in polled mode.
1348 	 * If this changes it needs to be handled for the polled scenario.
1349 	 */
1350 	flags = vpkt->vpkt_hba_pkt->pkt_flags;
1351 
1352 	/*
1353 	 * Set the path_instance *before* sending the scsi_pkt down the path
1354 	 * to mpxio's pHCI so that additional path abstractions at a pHCI
1355 	 * level (like maybe iSCSI at some point in the future) can update
1356 	 * the path_instance.
1357 	 */
1358 	if (scsi_pkt_allocated_correctly(vpkt->vpkt_hba_pkt))
1359 		vpkt->vpkt_hba_pkt->pkt_path_instance =
1360 		    mdi_pi_get_path_instance(vpkt->vpkt_path);
1361 
1362 	rval = scsi_transport(vpkt->vpkt_hba_pkt);
1363 	if (rval == TRAN_ACCEPT) {
1364 		if (flags & FLAG_NOINTR) {
1365 			struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt;
1366 			struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt;
1367 
1368 			ASSERT(tpkt != NULL);
1369 			*(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
1370 			tpkt->pkt_resid = pkt->pkt_resid;
1371 			tpkt->pkt_state = pkt->pkt_state;
1372 			tpkt->pkt_statistics = pkt->pkt_statistics;
1373 			tpkt->pkt_reason = pkt->pkt_reason;
1374 
1375 			if ((*(pkt->pkt_scbp) == STATUS_CHECK) &&
1376 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
1377 				bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
1378 				    vpkt->vpkt_tgt_init_scblen);
1379 			}
1380 
1381 			VHCI_DECR_PATH_CMDCOUNT(svp);
1382 			if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1383 				scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1384 				vpkt->vpkt_hba_pkt = NULL;
1385 				if (vpkt->vpkt_path) {
1386 					mdi_rele_path(vpkt->vpkt_path);
1387 					vpkt->vpkt_path = NULL;
1388 				}
1389 			}
1390 			/*
1391 			 * This path will not automatically retry pkts
1392 			 * internally, therefore, vpkt_org_vpkt should
1393 			 * never be set.
1394 			 */
1395 			ASSERT(vpkt->vpkt_org_vpkt == NULL);
1396 			scsi_hba_pkt_comp(tpkt);
1397 		}
1398 		return (rval);
1399 	} else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1400 	    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1401 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1402 		/* the command exited with bad status */
1403 		sema_v(&vlun->svl_pgr_sema);
1404 	} else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
1405 		/* the command exited with bad status */
1406 		sema_v(&vlun->svl_pgr_sema);
1407 	} else if (pkt_reserve_cmd) {
1408 		VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1409 		    "!vhci_scsi_start: reserve failed vlun 0x%p",
1410 		    (void *)vlun));
1411 		vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1412 		if (restore_lbp)
1413 			(void) mdi_set_lb_policy(cdip, lbp);
1414 	}
1415 
1416 	ASSERT(vpkt->vpkt_hba_pkt != NULL);
1417 	VHCI_DECR_PATH_CMDCOUNT(svp);
1418 
1419 	/* Do not destroy phci packet information for PKT_DMA_PARTIAL */
1420 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1421 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1422 		vpkt->vpkt_hba_pkt = NULL;
1423 		if (vpkt->vpkt_path) {
1424 			MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR);
1425 			mdi_rele_path(vpkt->vpkt_path);
1426 			vpkt->vpkt_path = NULL;
1427 		}
1428 	}
1429 	return (TRAN_BUSY);
1430 }
1431 
1432 /*
1433  * Function name : vhci_scsi_reset()
1434  *
1435  * Return Values : 0 - reset failed
1436  *		   1 - reset succeeded
1437  */
1438 
1439 /* ARGSUSED */
1440 static int
1441 vhci_scsi_reset(struct scsi_address *ap, int level)
1442 {
1443 	int rval = 0;
1444 
1445 	cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level);
1446 	if ((level == RESET_TARGET) || (level == RESET_LUN)) {
1447 		return (vhci_scsi_reset_target(ap, level, TRUE));
1448 	} else if (level == RESET_ALL) {
1449 		return (vhci_scsi_reset_bus(ap));
1450 	}
1451 
1452 	return (rval);
1453 }
1454 
1455 /*
1456  * vhci_recovery_reset:
1457  *	Issues reset to the device
1458  * Input:
1459  *	vlun - vhci lun pointer of the device
1460  *	ap - address of the device
1461  *	select_path:
1462  *		If select_path is FALSE, then the address specified in ap is
1463  *		the path on which reset will be issued.
1464  *		If select_path is TRUE, then path is obtained by calling
1465  *		mdi_select_path.
1466  *
1467  *	recovery_depth:
1468  *		Caller can specify the level of reset.
1469  *		VHCI_DEPTH_LUN -
1470  *			Issues LUN RESET if device supports lun reset.
1471  *		VHCI_DEPTH_TARGET -
1472  *			If Lun Reset fails or the device does not support
1473  *			Lun Reset, issues TARGET RESET
1474  *		VHCI_DEPTH_ALL -
1475  *			If Lun Reset fails or the device does not support
1476  *			Lun Reset, issues TARGET RESET.
1477  *			If TARGET RESET does not succeed, issues Bus Reset.
1478  */
1479 
1480 static int
1481 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap,
1482 	uint8_t select_path, uint8_t recovery_depth)
1483 {
1484 	int	ret = 0;
1485 
1486 	ASSERT(ap != NULL);
1487 
1488 	if (vlun && vlun->svl_support_lun_reset == 1) {
1489 		ret = vhci_scsi_reset_target(ap, RESET_LUN,
1490 		    select_path);
1491 	}
1492 
1493 	recovery_depth--;
1494 
1495 	if ((ret == 0) && recovery_depth) {
1496 		ret = vhci_scsi_reset_target(ap, RESET_TARGET,
1497 		    select_path);
1498 		recovery_depth--;
1499 	}
1500 
1501 	if ((ret == 0) && recovery_depth) {
1502 		(void) scsi_reset(ap, RESET_ALL);
1503 	}
1504 
1505 	return (ret);
1506 }
1507 
1508 /*
1509  * Note: The scsi_address passed to this routine could be the scsi_address
1510  * for the virtual device or the physical device. No assumptions should be
1511  * made in this routine about the contents of the ap structure.
1512  * Further, note that the child dip would be the dip of the ssd node regardless
1513  * of the scsi_address passed in.
1514  */
1515 static int
1516 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path)
1517 {
1518 	dev_info_t		*vdip, *cdip;
1519 	mdi_pathinfo_t		*pip = NULL;
1520 	mdi_pathinfo_t		*npip = NULL;
1521 	int			rval = -1;
1522 	scsi_vhci_priv_t	*svp = NULL;
1523 	struct scsi_address	*pap = NULL;
1524 	scsi_hba_tran_t		*hba = NULL;
1525 	int			sps;
1526 	struct scsi_vhci	*vhci = NULL;
1527 
1528 	if (select_path != TRUE) {
1529 		ASSERT(ap != NULL);
1530 		if (level == RESET_LUN) {
1531 			hba = ap->a_hba_tran;
1532 			ASSERT(hba != NULL);
1533 			return (hba->tran_reset(ap, RESET_LUN));
1534 		}
1535 		return (scsi_reset(ap, level));
1536 	}
1537 
1538 	cdip = ADDR2DIP(ap);
1539 	ASSERT(cdip != NULL);
1540 	vdip = ddi_get_parent(cdip);
1541 	ASSERT(vdip != NULL);
1542 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
1543 	ASSERT(vhci != NULL);
1544 
1545 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip);
1546 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
1547 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1548 		    "Unable to get a path, dip 0x%p", (void *)cdip));
1549 		return (0);
1550 	}
1551 again:
1552 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
1553 	if (svp == NULL) {
1554 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1555 		    "priv is NULL, pip 0x%p", (void *)pip));
1556 		mdi_rele_path(pip);
1557 		return (0);
1558 	}
1559 
1560 	if (svp->svp_psd == NULL) {
1561 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1562 		    "psd is NULL, pip 0x%p, svp 0x%p",
1563 		    (void *)pip, (void *)svp));
1564 		mdi_rele_path(pip);
1565 		return (0);
1566 	}
1567 
1568 	pap = &svp->svp_psd->sd_address;
1569 	hba = pap->a_hba_tran;
1570 
1571 	ASSERT(pap != NULL);
1572 	ASSERT(hba != NULL);
1573 
1574 	if (hba->tran_reset != NULL) {
1575 		if (hba->tran_reset(pap, level) == 0) {
1576 			vhci_log(CE_WARN, vdip, "!%s%d: "
1577 			    "path %s, reset %d failed",
1578 			    ddi_driver_name(cdip), ddi_get_instance(cdip),
1579 			    mdi_pi_spathname(pip), level);
1580 
1581 			/*
1582 			 * Select next path and issue the reset, repeat
1583 			 * until all paths are exhausted
1584 			 */
1585 			sps = mdi_select_path(cdip, NULL,
1586 			    MDI_SELECT_ONLINE_PATH, pip, &npip);
1587 			if ((sps != MDI_SUCCESS) || (npip == NULL)) {
1588 				mdi_rele_path(pip);
1589 				return (0);
1590 			}
1591 			mdi_rele_path(pip);
1592 			pip = npip;
1593 			goto again;
1594 		}
1595 		mdi_rele_path(pip);
1596 		mutex_enter(&vhci->vhci_mutex);
1597 		scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
1598 		    &vhci->vhci_reset_notify_listf);
1599 		mutex_exit(&vhci->vhci_mutex);
1600 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: "
1601 		    "reset %d sent down pip:%p for cdip:%p\n", level,
1602 		    (void *)pip, (void *)cdip));
1603 		return (1);
1604 	}
1605 	mdi_rele_path(pip);
1606 	return (0);
1607 }
1608 
1609 
1610 /* ARGSUSED */
1611 static int
1612 vhci_scsi_reset_bus(struct scsi_address *ap)
1613 {
1614 	return (1);
1615 }
1616 
1617 
1618 /*
1619  * called by vhci_getcap and vhci_setcap to get and set (respectively)
1620  * SCSI capabilities
1621  */
1622 /* ARGSUSED */
1623 static int
1624 vhci_commoncap(struct scsi_address *ap, char *cap,
1625     int val, int tgtonly, int doset)
1626 {
1627 	struct scsi_vhci		*vhci = ADDR2VHCI(ap);
1628 	struct scsi_vhci_lun		*vlun = ADDR2VLUN(ap);
1629 	int			cidx;
1630 	int			rval = 0;
1631 
1632 	if (cap == (char *)0) {
1633 		VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1634 		    "!vhci_commoncap: invalid arg"));
1635 		return (rval);
1636 	}
1637 
1638 	if (vlun == NULL) {
1639 		VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1640 		    "!vhci_commoncap: vlun is null"));
1641 		return (rval);
1642 	}
1643 
1644 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
1645 		return (UNDEFINED);
1646 	}
1647 
1648 	/*
1649 	 * Process setcap request.
1650 	 */
1651 	if (doset) {
1652 		/*
1653 		 * At present, we can only set binary (0/1) values
1654 		 */
1655 		switch (cidx) {
1656 		case SCSI_CAP_ARQ:
1657 			if (val == 0) {
1658 				rval = 0;
1659 			} else {
1660 				rval = 1;
1661 			}
1662 			break;
1663 
1664 		case SCSI_CAP_LUN_RESET:
1665 			if (tgtonly == 0) {
1666 				VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1667 				    "scsi_vhci_setcap: "
1668 				    "Returning error since whom = 0"));
1669 				rval = -1;
1670 				break;
1671 			}
1672 			/*
1673 			 * Set the capability accordingly.
1674 			 */
1675 			mutex_enter(&vlun->svl_mutex);
1676 			vlun->svl_support_lun_reset = val;
1677 			rval = val;
1678 			mutex_exit(&vlun->svl_mutex);
1679 			break;
1680 
1681 		case SCSI_CAP_SECTOR_SIZE:
1682 			mutex_enter(&vlun->svl_mutex);
1683 			vlun->svl_sector_size = val;
1684 			vlun->svl_setcap_done = 1;
1685 			mutex_exit(&vlun->svl_mutex);
1686 			(void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL);
1687 
1688 			/* Always return success */
1689 			rval = 1;
1690 			break;
1691 
1692 		default:
1693 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1694 			    "!vhci_setcap: unsupported %d", cidx));
1695 			rval = UNDEFINED;
1696 			break;
1697 		}
1698 
1699 		VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1700 		    "!set cap: cap=%s, val/tgtonly/doset/rval = "
1701 		    "0x%x/0x%x/0x%x/%d\n",
1702 		    cap, val, tgtonly, doset, rval));
1703 
1704 	} else {
1705 		/*
1706 		 * Process getcap request.
1707 		 */
1708 		switch (cidx) {
1709 		case SCSI_CAP_DMA_MAX:
1710 			/*
1711 			 * For X86 this capability is caught in scsi_ifgetcap().
1712 			 * XXX Should this be getting the value from the pHCI?
1713 			 */
1714 			rval = (int)VHCI_DMA_MAX_XFER_CAP;
1715 			break;
1716 
1717 		case SCSI_CAP_INITIATOR_ID:
1718 			rval = 0x00;
1719 			break;
1720 
1721 		case SCSI_CAP_ARQ:
1722 		case SCSI_CAP_RESET_NOTIFICATION:
1723 		case SCSI_CAP_TAGGED_QING:
1724 			rval = 1;
1725 			break;
1726 
1727 		case SCSI_CAP_SCSI_VERSION:
1728 			rval = 3;
1729 			break;
1730 
1731 		case SCSI_CAP_INTERCONNECT_TYPE:
1732 			rval = INTERCONNECT_FABRIC;
1733 			break;
1734 
1735 		case SCSI_CAP_LUN_RESET:
1736 			/*
1737 			 * scsi_vhci will always return success for LUN reset.
1738 			 * When request for doing LUN reset comes
1739 			 * through scsi_reset entry point, at that time attempt
1740 			 * will be made to do reset through all the possible
1741 			 * paths.
1742 			 */
1743 			mutex_enter(&vlun->svl_mutex);
1744 			rval = vlun->svl_support_lun_reset;
1745 			mutex_exit(&vlun->svl_mutex);
1746 			VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip,
1747 			    "scsi_vhci_getcap:"
1748 			    "Getting the Lun reset capability %d", rval));
1749 			break;
1750 
1751 		case SCSI_CAP_SECTOR_SIZE:
1752 			mutex_enter(&vlun->svl_mutex);
1753 			rval = vlun->svl_sector_size;
1754 			mutex_exit(&vlun->svl_mutex);
1755 			break;
1756 
1757 		case SCSI_CAP_CDB_LEN:
1758 			rval = VHCI_SCSI_CDB_SIZE;
1759 			break;
1760 
1761 		case SCSI_CAP_DMA_MAX_ARCH:
1762 			/*
1763 			 * For X86 this capability is caught in scsi_ifgetcap().
1764 			 * XXX Should this be getting the value from the pHCI?
1765 			 */
1766 			rval = 0;
1767 			break;
1768 
1769 		default:
1770 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1771 			    "!vhci_getcap: unsupported %d", cidx));
1772 			rval = UNDEFINED;
1773 			break;
1774 		}
1775 
1776 		VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1777 		    "!get cap: cap=%s, val/tgtonly/doset/rval = "
1778 		    "0x%x/0x%x/0x%x/%d\n",
1779 		    cap, val, tgtonly, doset, rval));
1780 	}
1781 	return (rval);
1782 }
1783 
1784 
1785 /*
1786  * Function name : vhci_scsi_getcap()
1787  *
1788  */
1789 static int
1790 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
1791 {
1792 	return (vhci_commoncap(ap, cap, 0, whom, 0));
1793 }
1794 
1795 static int
1796 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1797 {
1798 	return (vhci_commoncap(ap, cap, value, whom, 1));
1799 }
1800 
1801 /*
1802  * Function name : vhci_scsi_abort()
1803  */
1804 /* ARGSUSED */
1805 static int
1806 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1807 {
1808 	return (0);
1809 }
1810 
1811 /*
1812  * Function name : vhci_scsi_init_pkt
1813  *
1814  * Return Values : pointer to scsi_pkt, or NULL
1815  */
1816 /* ARGSUSED */
1817 static struct scsi_pkt *
1818 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1819 	struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1820 	int flags, int (*callback)(caddr_t), caddr_t arg)
1821 {
1822 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
1823 	struct vhci_pkt		*vpkt;
1824 	int			rval;
1825 	int			newpkt = 0;
1826 	struct scsi_pkt		*pktp;
1827 
1828 
1829 	if (pkt == NULL) {
1830 		if (cmdlen > VHCI_SCSI_CDB_SIZE) {
1831 			if ((cmdlen != VHCI_SCSI_OSD_CDB_SIZE) ||
1832 			    ((flags & VHCI_SCSI_OSD_PKT_FLAGS) !=
1833 			    VHCI_SCSI_OSD_PKT_FLAGS)) {
1834 				VHCI_DEBUG(1, (CE_NOTE, NULL,
1835 				    "!init pkt: cdb size not supported\n"));
1836 				return (NULL);
1837 			}
1838 		}
1839 
1840 		pktp = scsi_hba_pkt_alloc(vhci->vhci_dip,
1841 		    ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback,
1842 		    arg);
1843 
1844 		if (pktp == NULL) {
1845 			return (NULL);
1846 		}
1847 
1848 		/* Get the vhci's private structure */
1849 		vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private);
1850 		ASSERT(vpkt);
1851 
1852 		/* Save the target driver's packet */
1853 		vpkt->vpkt_tgt_pkt = pktp;
1854 
1855 		/*
1856 		 * Save pkt_tgt_init_pkt fields if deferred binding
1857 		 * is needed or for other purposes.
1858 		 */
1859 		vpkt->vpkt_tgt_init_pkt_flags = flags;
1860 		vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0;
1861 		vpkt->vpkt_state = VHCI_PKT_IDLE;
1862 		vpkt->vpkt_tgt_init_cdblen = cmdlen;
1863 		vpkt->vpkt_tgt_init_scblen = statuslen;
1864 		newpkt = 1;
1865 	} else { /* pkt not NULL */
1866 		vpkt = pkt->pkt_ha_private;
1867 	}
1868 
1869 	VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt "
1870 	    "vpkt %p flags %x\n", (void *)vpkt, flags));
1871 
1872 	/* Clear any stale error flags */
1873 	if (bp) {
1874 		bioerror(bp, 0);
1875 	}
1876 
1877 	vpkt->vpkt_tgt_init_bp = bp;
1878 
1879 	if (flags & PKT_DMA_PARTIAL) {
1880 
1881 		/*
1882 		 * Immediate binding is needed.
1883 		 * Target driver may not set this flag in next invocation.
1884 		 * vhci has to remember this flag was set during first
1885 		 * invocation of vhci_scsi_init_pkt.
1886 		 */
1887 		vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL;
1888 	}
1889 
1890 	if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) {
1891 
1892 		/*
1893 		 * Re-initialize some of the target driver packet state
1894 		 * information.
1895 		 */
1896 		vpkt->vpkt_tgt_pkt->pkt_state = 0;
1897 		vpkt->vpkt_tgt_pkt->pkt_statistics = 0;
1898 		vpkt->vpkt_tgt_pkt->pkt_reason = 0;
1899 
1900 		/*
1901 		 * Binding a vpkt->vpkt_path for this IO at init_time.
1902 		 * If an IO error happens later, target driver will clear
1903 		 * this vpkt->vpkt_path binding before re-init IO again.
1904 		 */
1905 		VHCI_DEBUG(8, (CE_NOTE, NULL,
1906 		    "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n",
1907 		    (void *)vpkt, newpkt));
1908 		if (pkt && vpkt->vpkt_hba_pkt) {
1909 			VHCI_DEBUG(4, (CE_NOTE, NULL,
1910 			    "v_s_i_p calling update_pHCI_pkt resid %ld\n",
1911 			    pkt->pkt_resid));
1912 			vhci_update_pHCI_pkt(vpkt, pkt);
1913 		}
1914 		if (callback == SLEEP_FUNC) {
1915 			rval = vhci_bind_transport(
1916 			    ap, vpkt, flags, callback);
1917 		} else {
1918 			rval = vhci_bind_transport(
1919 			    ap, vpkt, flags, NULL_FUNC);
1920 		}
1921 		VHCI_DEBUG(8, (CE_NOTE, NULL,
1922 		    "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n",
1923 		    (void *)vpkt, rval));
1924 		if (bp) {
1925 			if (rval == TRAN_FATAL_ERROR) {
1926 				/*
1927 				 * No paths available. Could not bind
1928 				 * any pHCI. Setting EFAULT as a way
1929 				 * to indicate no DMA is mapped.
1930 				 */
1931 				bioerror(bp, EFAULT);
1932 			} else {
1933 				/*
1934 				 * Do not indicate any pHCI errors to
1935 				 * target driver otherwise.
1936 				 */
1937 				bioerror(bp, 0);
1938 			}
1939 		}
1940 		if (rval != TRAN_ACCEPT) {
1941 			VHCI_DEBUG(8, (CE_NOTE, NULL,
1942 			    "vhci_scsi_init_pkt: "
1943 			    "v_b_t failed 0x%p newpkt %x\n",
1944 			    (void *)vpkt, newpkt));
1945 			if (newpkt) {
1946 				scsi_hba_pkt_free(ap,
1947 				    vpkt->vpkt_tgt_pkt);
1948 			}
1949 			return (NULL);
1950 		}
1951 		ASSERT(vpkt->vpkt_hba_pkt != NULL);
1952 		ASSERT(vpkt->vpkt_path != NULL);
1953 
1954 		/* Update the resid for the target driver */
1955 		vpkt->vpkt_tgt_pkt->pkt_resid =
1956 		    vpkt->vpkt_hba_pkt->pkt_resid;
1957 	}
1958 
1959 	return (vpkt->vpkt_tgt_pkt);
1960 }
1961 
1962 /*
1963  * Function name : vhci_scsi_destroy_pkt
1964  *
1965  * Return Values : none
1966  */
1967 static void
1968 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1969 {
1970 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
1971 
1972 	VHCI_DEBUG(8, (CE_NOTE, NULL,
1973 	    "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt));
1974 
1975 	vpkt->vpkt_tgt_init_pkt_flags = 0;
1976 	if (vpkt->vpkt_hba_pkt) {
1977 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1978 		vpkt->vpkt_hba_pkt = NULL;
1979 	}
1980 	if (vpkt->vpkt_path) {
1981 		mdi_rele_path(vpkt->vpkt_path);
1982 		vpkt->vpkt_path = NULL;
1983 	}
1984 
1985 	ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
1986 	scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt);
1987 }
1988 
1989 /*
1990  * Function name : vhci_scsi_dmafree()
1991  *
1992  * Return Values : none
1993  */
1994 /*ARGSUSED*/
1995 static void
1996 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1997 {
1998 	struct vhci_pkt	*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
1999 
2000 	VHCI_DEBUG(6, (CE_NOTE, NULL,
2001 	    "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt));
2002 
2003 	ASSERT(vpkt != NULL);
2004 	if (vpkt->vpkt_hba_pkt) {
2005 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
2006 		vpkt->vpkt_hba_pkt = NULL;
2007 	}
2008 	if (vpkt->vpkt_path) {
2009 		mdi_rele_path(vpkt->vpkt_path);
2010 		vpkt->vpkt_path = NULL;
2011 	}
2012 }
2013 
2014 /*
2015  * Function name : vhci_scsi_sync_pkt()
2016  *
2017  * Return Values : none
2018  */
2019 /*ARGSUSED*/
2020 static void
2021 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2022 {
2023 	struct vhci_pkt	*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2024 
2025 	ASSERT(vpkt != NULL);
2026 	if (vpkt->vpkt_hba_pkt) {
2027 		scsi_sync_pkt(vpkt->vpkt_hba_pkt);
2028 	}
2029 }
2030 
2031 /*
2032  * routine for reset notification setup, to register or cancel.
2033  */
2034 static int
2035 vhci_scsi_reset_notify(struct scsi_address *ap, int flag,
2036     void (*callback)(caddr_t), caddr_t arg)
2037 {
2038 	struct scsi_vhci *vhci = ADDR2VHCI(ap);
2039 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
2040 	    &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf));
2041 }
2042 
2043 static int
2044 vhci_scsi_get_name_bus_addr(struct scsi_device *sd,
2045     char *name, int len, int bus_addr)
2046 {
2047 	dev_info_t		*cdip;
2048 	char			*guid;
2049 	scsi_vhci_lun_t		*vlun;
2050 
2051 	ASSERT(sd != NULL);
2052 	ASSERT(name != NULL);
2053 
2054 	*name = 0;
2055 	cdip = sd->sd_dev;
2056 
2057 	ASSERT(cdip != NULL);
2058 
2059 	if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS)
2060 		return (1);
2061 
2062 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
2063 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS)
2064 		return (1);
2065 
2066 	/*
2067 	 * Message is "sd# at scsi_vhci0: unit-address <guid>: <bus_addr>".
2068 	 *	<guid>		bus_addr argument == 0
2069 	 *	<bus_addr>	bus_addr argument != 0
2070 	 * Since the <guid> is already provided with unit-address, we just
2071 	 * provide failover module in <bus_addr> to keep output shorter.
2072 	 */
2073 	vlun = ADDR2VLUN(&sd->sd_address);
2074 	if (bus_addr == 0) {
2075 		/* report the guid:  */
2076 		(void) snprintf(name, len, "g%s", guid);
2077 	} else if (vlun && vlun->svl_fops_name) {
2078 		/* report the name of the failover module */
2079 		(void) snprintf(name, len, "%s", vlun->svl_fops_name);
2080 	}
2081 
2082 	ddi_prop_free(guid);
2083 	return (1);
2084 }
2085 
2086 static int
2087 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
2088 {
2089 	return (vhci_scsi_get_name_bus_addr(sd, name, len, 1));
2090 }
2091 
2092 static int
2093 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len)
2094 {
2095 	return (vhci_scsi_get_name_bus_addr(sd, name, len, 0));
2096 }
2097 
2098 /*
2099  * Return a pointer to the guid part of the devnm.
2100  * devnm format is "nodename@busaddr", busaddr format is "gGUID".
2101  */
2102 static char *
2103 vhci_devnm_to_guid(char *devnm)
2104 {
2105 	char *cp = devnm;
2106 
2107 	if (devnm == NULL)
2108 		return (NULL);
2109 
2110 	while (*cp != '\0' && *cp != '@')
2111 		cp++;
2112 	if (*cp == '@' && *(cp + 1) == 'g')
2113 		return (cp + 2);
2114 	return (NULL);
2115 }
2116 
2117 static int
2118 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags,
2119     int (*func)(caddr_t))
2120 {
2121 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
2122 	dev_info_t		*cdip = ADDR2DIP(ap);
2123 	mdi_pathinfo_t		*pip = NULL;
2124 	mdi_pathinfo_t		*npip = NULL;
2125 	scsi_vhci_priv_t	*svp = NULL;
2126 	struct scsi_device	*psd = NULL;
2127 	struct scsi_address	*address = NULL;
2128 	struct scsi_pkt		*pkt = NULL;
2129 	int			rval = -1;
2130 	int			pgr_sema_held = 0;
2131 	int			held;
2132 	int			mps_flag = MDI_SELECT_ONLINE_PATH;
2133 	struct scsi_vhci_lun	*vlun;
2134 	time_t			tnow;
2135 	int			path_instance = 0;
2136 
2137 	vlun = ADDR2VLUN(ap);
2138 	ASSERT(vlun != 0);
2139 
2140 	if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) &&
2141 	    (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2142 	    VHCI_PROUT_REGISTER) ||
2143 	    ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2144 	    VHCI_PROUT_R_AND_IGNORE))) {
2145 		if (!sema_tryp(&vlun->svl_pgr_sema))
2146 			return (TRAN_BUSY);
2147 		pgr_sema_held = 1;
2148 		if (vlun->svl_first_path != NULL) {
2149 			rval = mdi_select_path(cdip, NULL,
2150 			    MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
2151 			    NULL, &pip);
2152 			if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2153 				VHCI_DEBUG(4, (CE_NOTE, NULL,
2154 				    "vhci_bind_transport: path select fail\n"));
2155 			} else {
2156 				npip = pip;
2157 				do {
2158 					if (npip == vlun->svl_first_path) {
2159 						VHCI_DEBUG(4, (CE_NOTE, NULL,
2160 						    "vhci_bind_transport: "
2161 						    "valid first path 0x%p\n",
2162 						    (void *)
2163 						    vlun->svl_first_path));
2164 						pip = vlun->svl_first_path;
2165 						goto bind_path;
2166 					}
2167 					pip = npip;
2168 					rval = mdi_select_path(cdip, NULL,
2169 					    MDI_SELECT_ONLINE_PATH |
2170 					    MDI_SELECT_STANDBY_PATH,
2171 					    pip, &npip);
2172 					mdi_rele_path(pip);
2173 				} while ((rval == MDI_SUCCESS) &&
2174 				    (npip != NULL));
2175 			}
2176 		}
2177 
2178 		if (vlun->svl_first_path) {
2179 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2180 			    "vhci_bind_transport: invalid first path 0x%p\n",
2181 			    (void *)vlun->svl_first_path));
2182 			vlun->svl_first_path = NULL;
2183 		}
2184 	} else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
2185 		if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) {
2186 			if (!sema_tryp(&vlun->svl_pgr_sema))
2187 				return (TRAN_BUSY);
2188 		}
2189 		pgr_sema_held = 1;
2190 	}
2191 
2192 	/*
2193 	 * If the path is already bound for PKT_PARTIAL_DMA case,
2194 	 * try to use the same path.
2195 	 */
2196 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) {
2197 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2198 		    "vhci_bind_transport: PKT_PARTIAL_DMA "
2199 		    "vpkt 0x%p, path 0x%p\n",
2200 		    (void *)vpkt, (void *)vpkt->vpkt_path));
2201 		pip = vpkt->vpkt_path;
2202 		goto bind_path;
2203 	}
2204 
2205 	/*
2206 	 * Get path_instance. Non-zero with FLAG_PKT_PATH_INSTANCE set
2207 	 * indicates that mdi_select_path should be called to select a
2208 	 * specific instance.
2209 	 *
2210 	 * NB: Condition pkt_path_instance reference on proper allocation.
2211 	 */
2212 	if ((vpkt->vpkt_tgt_pkt->pkt_flags & FLAG_PKT_PATH_INSTANCE) &&
2213 	    scsi_pkt_allocated_correctly(vpkt->vpkt_tgt_pkt)) {
2214 		path_instance = vpkt->vpkt_tgt_pkt->pkt_path_instance;
2215 	}
2216 
2217 	/*
2218 	 * If reservation is active bind the transport directly to the pip
2219 	 * with the reservation.
2220 	 */
2221 	if (vpkt->vpkt_hba_pkt == NULL) {
2222 		if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
2223 			if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) {
2224 				pip = vlun->svl_resrv_pip;
2225 				mdi_hold_path(pip);
2226 				vlun->svl_waiting_for_activepath = 0;
2227 				rval = MDI_SUCCESS;
2228 				goto bind_path;
2229 			} else {
2230 				if (pgr_sema_held) {
2231 					sema_v(&vlun->svl_pgr_sema);
2232 				}
2233 				return (TRAN_BUSY);
2234 			}
2235 		}
2236 try_again:
2237 		rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2238 		    path_instance ? MDI_SELECT_PATH_INSTANCE : 0,
2239 		    (void *)(intptr_t)path_instance, &pip);
2240 		if (rval == MDI_BUSY) {
2241 			if (pgr_sema_held) {
2242 				sema_v(&vlun->svl_pgr_sema);
2243 			}
2244 			return (TRAN_BUSY);
2245 		} else if (rval == MDI_DEVI_ONLINING) {
2246 			/*
2247 			 * if we are here then we are in the midst of
2248 			 * an attach/probe of the client device.
2249 			 * We attempt to bind to ONLINE path if available,
2250 			 * else it is OK to bind to a STANDBY path (instead
2251 			 * of triggering a failover) because IO associated
2252 			 * with attach/probe (eg. INQUIRY, block 0 read)
2253 			 * are completed by targets even on passive paths
2254 			 * If no ONLINE paths available, it is important
2255 			 * to set svl_waiting_for_activepath for two
2256 			 * reasons: (1) avoid sense analysis in the
2257 			 * "external failure detection" codepath in
2258 			 * vhci_intr().  Failure to do so will result in
2259 			 * infinite loop (unless an ONLINE path becomes
2260 			 * available at some point) (2) avoid
2261 			 * unnecessary failover (see "---Waiting For Active
2262 			 * Path---" comment below).
2263 			 */
2264 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining "
2265 			    "state\n", (void *)cdip));
2266 			pip = NULL;
2267 			rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2268 			    mps_flag, NULL, &pip);
2269 			if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2270 				if (vlun->svl_waiting_for_activepath == 0) {
2271 					vlun->svl_waiting_for_activepath = 1;
2272 					vlun->svl_wfa_time = ddi_get_time();
2273 				}
2274 				mps_flag |= MDI_SELECT_STANDBY_PATH;
2275 				rval = mdi_select_path(cdip,
2276 				    vpkt->vpkt_tgt_init_bp,
2277 				    mps_flag, NULL, &pip);
2278 				if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2279 					if (pgr_sema_held) {
2280 						sema_v(&vlun->svl_pgr_sema);
2281 					}
2282 					return (TRAN_FATAL_ERROR);
2283 				}
2284 				goto bind_path;
2285 			}
2286 		} else if ((rval == MDI_FAILURE) ||
2287 		    ((rval == MDI_NOPATH) && (path_instance))) {
2288 			if (pgr_sema_held) {
2289 				sema_v(&vlun->svl_pgr_sema);
2290 			}
2291 			return (TRAN_FATAL_ERROR);
2292 		}
2293 
2294 		if ((pip == NULL) || (rval == MDI_NOPATH)) {
2295 			while (vlun->svl_waiting_for_activepath) {
2296 				/*
2297 				 * ---Waiting For Active Path---
2298 				 * This device was discovered across a
2299 				 * passive path; lets wait for a little
2300 				 * bit, hopefully an active path will
2301 				 * show up obviating the need for a
2302 				 * failover
2303 				 */
2304 				tnow = ddi_get_time();
2305 				if (tnow - vlun->svl_wfa_time >= 60) {
2306 					vlun->svl_waiting_for_activepath = 0;
2307 				} else {
2308 					drv_usecwait(1000);
2309 					if (vlun->svl_waiting_for_activepath
2310 					    == 0) {
2311 						/*
2312 						 * an active path has come
2313 						 * online!
2314 						 */
2315 						goto try_again;
2316 					}
2317 				}
2318 			}
2319 			VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
2320 			if (!held) {
2321 				VHCI_DEBUG(4, (CE_NOTE, NULL,
2322 				    "!Lun not held\n"));
2323 				if (pgr_sema_held) {
2324 					sema_v(&vlun->svl_pgr_sema);
2325 				}
2326 				return (TRAN_BUSY);
2327 			}
2328 			/*
2329 			 * now that the LUN is stable, one last check
2330 			 * to make sure no other changes sneaked in
2331 			 * (like a path coming online or a
2332 			 * failover initiated by another thread)
2333 			 */
2334 			pip = NULL;
2335 			rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2336 			    0, NULL, &pip);
2337 			if (pip != NULL) {
2338 				VHCI_RELEASE_LUN(vlun);
2339 				vlun->svl_waiting_for_activepath = 0;
2340 				goto bind_path;
2341 			}
2342 
2343 			/*
2344 			 * Check if there is an ONLINE path OR a STANDBY path
2345 			 * available. If none is available, do not attempt
2346 			 * to do a failover, just return a fatal error at this
2347 			 * point.
2348 			 */
2349 			npip = NULL;
2350 			rval = mdi_select_path(cdip, NULL,
2351 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
2352 			    NULL, &npip);
2353 			if ((npip == NULL) || (rval != MDI_SUCCESS)) {
2354 				/*
2355 				 * No paths available, jus return FATAL error.
2356 				 */
2357 				VHCI_RELEASE_LUN(vlun);
2358 				if (pgr_sema_held) {
2359 					sema_v(&vlun->svl_pgr_sema);
2360 				}
2361 				return (TRAN_FATAL_ERROR);
2362 			}
2363 			mdi_rele_path(npip);
2364 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking "
2365 			    "mdi_failover\n"));
2366 			rval = mdi_failover(vhci->vhci_dip, cdip,
2367 			    MDI_FAILOVER_ASYNC);
2368 			if (rval == MDI_FAILURE) {
2369 				VHCI_RELEASE_LUN(vlun);
2370 				if (pgr_sema_held) {
2371 					sema_v(&vlun->svl_pgr_sema);
2372 				}
2373 				return (TRAN_FATAL_ERROR);
2374 			} else if (rval == MDI_BUSY) {
2375 				VHCI_RELEASE_LUN(vlun);
2376 				if (pgr_sema_held) {
2377 					sema_v(&vlun->svl_pgr_sema);
2378 				}
2379 				return (TRAN_BUSY);
2380 			} else {
2381 				if (pgr_sema_held) {
2382 					sema_v(&vlun->svl_pgr_sema);
2383 				}
2384 				return (TRAN_BUSY);
2385 			}
2386 		}
2387 		vlun->svl_waiting_for_activepath = 0;
2388 bind_path:
2389 		vpkt->vpkt_path = pip;
2390 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2391 		ASSERT(svp != NULL);
2392 
2393 		psd = svp->svp_psd;
2394 		ASSERT(psd != NULL);
2395 		address = &psd->sd_address;
2396 	} else {
2397 		pkt = vpkt->vpkt_hba_pkt;
2398 		address = &pkt->pkt_address;
2399 	}
2400 
2401 	/* Verify match of specified path_instance and selected path_instance */
2402 	ASSERT((path_instance == 0) ||
2403 	    (path_instance == mdi_pi_get_path_instance(vpkt->vpkt_path)));
2404 
2405 	/*
2406 	 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever
2407 	 * target driver calls vhci_scsi_init_pkt.
2408 	 */
2409 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) &&
2410 	    vpkt->vpkt_path && vpkt->vpkt_hba_pkt) {
2411 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2412 		    "vhci_bind_transport: PKT_PARTIAL_DMA "
2413 		    "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n",
2414 		    (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt));
2415 		pkt = vpkt->vpkt_hba_pkt;
2416 		address = &pkt->pkt_address;
2417 	}
2418 
2419 	if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) {
2420 		pkt = scsi_init_pkt(address, pkt,
2421 		    vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
2422 		    vpkt->vpkt_tgt_init_scblen, 0, flags, func, NULL);
2423 
2424 		if (pkt == NULL) {
2425 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2426 			    "!bind transport: 0x%p 0x%p 0x%p\n",
2427 			    (void *)vhci, (void *)psd, (void *)vpkt));
2428 			if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) {
2429 				MDI_PI_ERRSTAT(vpkt->vpkt_path,
2430 				    MDI_PI_TRANSERR);
2431 				mdi_rele_path(vpkt->vpkt_path);
2432 				vpkt->vpkt_path = NULL;
2433 			}
2434 			if (pgr_sema_held) {
2435 				sema_v(&vlun->svl_pgr_sema);
2436 			}
2437 			/*
2438 			 * Consider it a fatal error if b_error is
2439 			 * set as a result of DMA binding failure
2440 			 * vs. a condition of being temporarily out of
2441 			 * some resource
2442 			 */
2443 			if (vpkt->vpkt_tgt_init_bp == NULL ||
2444 			    geterror(vpkt->vpkt_tgt_init_bp))
2445 				return (TRAN_FATAL_ERROR);
2446 			else
2447 				return (TRAN_BUSY);
2448 		}
2449 	}
2450 
2451 	pkt->pkt_private = vpkt;
2452 	vpkt->vpkt_hba_pkt = pkt;
2453 	return (TRAN_ACCEPT);
2454 }
2455 
2456 
2457 /*PRINTFLIKE3*/
2458 void
2459 vhci_log(int level, dev_info_t *dip, const char *fmt, ...)
2460 {
2461 	char		buf[256];
2462 	va_list		ap;
2463 
2464 	va_start(ap, fmt);
2465 	(void) vsprintf(buf, fmt, ap);
2466 	va_end(ap);
2467 
2468 	scsi_log(dip, "scsi_vhci", level, buf);
2469 }
2470 
2471 /* do a PGR out with the information we've saved away */
2472 static int
2473 vhci_do_prout(scsi_vhci_priv_t *svp)
2474 {
2475 
2476 	struct scsi_pkt			*new_pkt;
2477 	struct buf			*bp;
2478 	scsi_vhci_lun_t			*vlun = svp->svp_svl;
2479 	int				rval, retry, nr_retry, ua_retry;
2480 	uint8_t				*sns, skey;
2481 
2482 	bp = getrbuf(KM_SLEEP);
2483 	bp->b_flags = B_WRITE;
2484 	bp->b_resid = 0;
2485 	bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2486 	bp->b_bcount = vlun->svl_bcount;
2487 
2488 	VHCI_INCR_PATH_CMDCOUNT(svp);
2489 
2490 	new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
2491 	    CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0,
2492 	    SLEEP_FUNC, NULL);
2493 	if (new_pkt == NULL) {
2494 		VHCI_DECR_PATH_CMDCOUNT(svp);
2495 		freerbuf(bp);
2496 		cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed");
2497 		return (0);
2498 	}
2499 	mutex_enter(&vlun->svl_mutex);
2500 	bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2501 	bp->b_bcount = vlun->svl_bcount;
2502 	bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp,
2503 	    sizeof (vlun->svl_cdb));
2504 	new_pkt->pkt_time = vlun->svl_time;
2505 	mutex_exit(&vlun->svl_mutex);
2506 	new_pkt->pkt_flags = FLAG_NOINTR;
2507 
2508 	ua_retry = nr_retry = retry = 0;
2509 again:
2510 	rval = vhci_do_scsi_cmd(new_pkt);
2511 	if (rval != 1) {
2512 		if ((new_pkt->pkt_reason == CMD_CMPLT) &&
2513 		    (SCBP_C(new_pkt) == STATUS_CHECK) &&
2514 		    (new_pkt->pkt_state & STATE_ARQ_DONE)) {
2515 			sns = (uint8_t *)
2516 			    &(((struct scsi_arq_status *)(uintptr_t)
2517 			    (new_pkt->pkt_scbp))->sts_sensedata);
2518 			skey = scsi_sense_key(sns);
2519 			if ((skey == KEY_UNIT_ATTENTION) ||
2520 			    (skey == KEY_NOT_READY)) {
2521 				int max_retry;
2522 				struct scsi_failover_ops *fops;
2523 				fops = vlun->svl_fops;
2524 				rval = fops->sfo_analyze_sense(svp->svp_psd,
2525 				    sns, vlun->svl_fops_ctpriv);
2526 				if (rval == SCSI_SENSE_NOT_READY) {
2527 					max_retry = vhci_prout_not_ready_retry;
2528 					retry = nr_retry++;
2529 					delay(1*drv_usectohz(1000000));
2530 				} else {
2531 					/* chk for state change and update */
2532 					if (rval == SCSI_SENSE_STATE_CHANGED) {
2533 						int held;
2534 						VHCI_HOLD_LUN(vlun,
2535 						    VH_NOSLEEP, held);
2536 						if (!held) {
2537 							rval = TRAN_BUSY;
2538 						} else {
2539 							/* chk for alua first */
2540 							vhci_update_pathstates(
2541 							    (void *)vlun);
2542 						}
2543 					}
2544 					retry = ua_retry++;
2545 					max_retry = VHCI_MAX_PGR_RETRIES;
2546 				}
2547 				if (retry < max_retry) {
2548 					VHCI_DEBUG(4, (CE_WARN, NULL,
2549 					    "!vhci_do_prout retry 0x%x "
2550 					    "(0x%x 0x%x 0x%x)",
2551 					    SCBP_C(new_pkt),
2552 					    new_pkt->pkt_cdbp[0],
2553 					    new_pkt->pkt_cdbp[1],
2554 					    new_pkt->pkt_cdbp[2]));
2555 					goto again;
2556 				}
2557 				rval = 0;
2558 				VHCI_DEBUG(4, (CE_WARN, NULL,
2559 				    "!vhci_do_prout 0x%x "
2560 				    "(0x%x 0x%x 0x%x)",
2561 				    SCBP_C(new_pkt),
2562 				    new_pkt->pkt_cdbp[0],
2563 				    new_pkt->pkt_cdbp[1],
2564 				    new_pkt->pkt_cdbp[2]));
2565 			} else if (skey == KEY_ILLEGAL_REQUEST)
2566 				rval = VHCI_PGR_ILLEGALOP;
2567 		}
2568 	} else {
2569 		rval = 1;
2570 	}
2571 	scsi_destroy_pkt(new_pkt);
2572 	VHCI_DECR_PATH_CMDCOUNT(svp);
2573 	freerbuf(bp);
2574 	return (rval);
2575 }
2576 
2577 static void
2578 vhci_run_cmd(void *arg)
2579 {
2580 	struct scsi_pkt		*pkt = (struct scsi_pkt *)arg;
2581 	struct scsi_pkt		*tpkt;
2582 	scsi_vhci_priv_t	*svp;
2583 	mdi_pathinfo_t		*pip, *npip;
2584 	scsi_vhci_lun_t		*vlun;
2585 	dev_info_t		*cdip;
2586 	scsi_vhci_priv_t	*nsvp;
2587 	int			fail = 0;
2588 	int			rval;
2589 	struct vhci_pkt		*vpkt;
2590 	uchar_t			cdb_1;
2591 	vhci_prout_t		*prout;
2592 
2593 	vpkt = (struct vhci_pkt *)pkt->pkt_private;
2594 	tpkt = vpkt->vpkt_tgt_pkt;
2595 	pip = vpkt->vpkt_path;
2596 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2597 	if (svp == NULL) {
2598 		tpkt->pkt_reason = CMD_TRAN_ERR;
2599 		tpkt->pkt_statistics = STAT_ABORTED;
2600 		goto done;
2601 	}
2602 	vlun = svp->svp_svl;
2603 	prout = &vlun->svl_prout;
2604 	if (SCBP_C(pkt) != STATUS_GOOD)
2605 		fail++;
2606 	cdip = vlun->svl_dip;
2607 	pip = npip = NULL;
2608 	rval = mdi_select_path(cdip, NULL,
2609 	    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, NULL, &npip);
2610 	if ((rval != MDI_SUCCESS) || (npip == NULL)) {
2611 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2612 		    "vhci_run_cmd: no path! 0x%p\n", (void *)svp));
2613 		tpkt->pkt_reason = CMD_TRAN_ERR;
2614 		tpkt->pkt_statistics = STAT_ABORTED;
2615 		goto done;
2616 	}
2617 
2618 	cdb_1 = vlun->svl_cdb[1];
2619 	vlun->svl_cdb[1] &= 0xe0;
2620 	vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
2621 
2622 	do {
2623 		nsvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
2624 		if (nsvp == NULL) {
2625 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2626 			    "vhci_run_cmd: no "
2627 			    "client priv! 0x%p offlined?\n",
2628 			    (void *)npip));
2629 			goto next_path;
2630 		}
2631 		if (vlun->svl_first_path == npip) {
2632 			goto next_path;
2633 		} else {
2634 			if (vhci_do_prout(nsvp) != 1)
2635 				fail++;
2636 		}
2637 next_path:
2638 		pip = npip;
2639 		rval = mdi_select_path(cdip, NULL,
2640 		    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
2641 		    pip, &npip);
2642 		mdi_rele_path(pip);
2643 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
2644 
2645 	vlun->svl_cdb[1] = cdb_1;
2646 
2647 	if (fail) {
2648 		VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, "
2649 		    "couldn't be replicated on all paths",
2650 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
2651 		vhci_print_prout_keys(vlun, "vhci_run_cmd: ");
2652 
2653 		if (SCBP_C(pkt) != STATUS_GOOD) {
2654 			tpkt->pkt_reason = CMD_TRAN_ERR;
2655 			tpkt->pkt_statistics = STAT_ABORTED;
2656 		}
2657 	} else {
2658 		vlun->svl_pgr_active = 1;
2659 		vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:");
2660 
2661 		bcopy((const void *)prout->service_key,
2662 		    (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE);
2663 		bcopy((const void *)prout->res_key,
2664 		    (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE);
2665 
2666 		vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:");
2667 	}
2668 done:
2669 	if (SCBP_C(pkt) == STATUS_GOOD)
2670 		vlun->svl_first_path = NULL;
2671 
2672 	if (svp)
2673 		VHCI_DECR_PATH_CMDCOUNT(svp);
2674 
2675 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
2676 		scsi_destroy_pkt(pkt);
2677 		vpkt->vpkt_hba_pkt = NULL;
2678 		if (vpkt->vpkt_path) {
2679 			mdi_rele_path(vpkt->vpkt_path);
2680 			vpkt->vpkt_path = NULL;
2681 		}
2682 	}
2683 
2684 	sema_v(&vlun->svl_pgr_sema);
2685 	/*
2686 	 * The PROUT commands are not included in the automatic retry
2687 	 * mechanism, therefore, vpkt_org_vpkt should never be set here.
2688 	 */
2689 	ASSERT(vpkt->vpkt_org_vpkt == NULL);
2690 	scsi_hba_pkt_comp(tpkt);
2691 }
2692 
2693 /*
2694  * Get the keys registered with this target.  Since we will have
2695  * registered the same key with multiple initiators, strip out
2696  * any duplicate keys.
2697  *
2698  * The pointers which will be used to filter the registered keys from
2699  * the device will be stored in filter_prin and filter_pkt.  If the
2700  * allocation length of the buffer was sufficient for the number of
2701  * parameter data bytes available to be returned by the device then the
2702  * key filtering will use the keylist returned from the original
2703  * request.  If the allocation length of the buffer was not sufficient,
2704  * then the filtering will use the keylist returned from the request
2705  * that is resent below.
2706  *
2707  * If the device returns an additional length field that is greater than
2708  * the allocation length of the buffer, then allocate a new buffer which
2709  * can accommodate the number of parameter data bytes available to be
2710  * returned.  Resend the scsi PRIN command, filter out the duplicate
2711  * keys and return as many of the unique keys found that was originally
2712  * requested and set the additional length field equal to the data bytes
2713  * of unique reservation keys available to be returned.
2714  *
2715  * If the device returns an additional length field that is less than or
2716  * equal to the allocation length of the buffer, then all the available
2717  * keys registered were returned by the device.  Filter out the
2718  * duplicate keys and return all of the unique keys found and set the
2719  * additional length field equal to the data bytes of the reservation
2720  * keys to be returned.
2721  */
2722 
2723 #define	VHCI_PRIN_HEADER_SZ (sizeof (prin->length) + sizeof (prin->generation))
2724 
2725 static int
2726 vhci_do_prin(struct vhci_pkt **intr_vpkt)
2727 {
2728 	scsi_vhci_priv_t *svp;
2729 	struct vhci_pkt *vpkt = *intr_vpkt;
2730 	vhci_prin_readkeys_t *prin;
2731 	scsi_vhci_lun_t *vlun;
2732 	struct scsi_vhci *vhci = ADDR2VHCI(&vpkt->vpkt_tgt_pkt->pkt_address);
2733 
2734 	struct buf		*new_bp = NULL;
2735 	struct scsi_pkt		*new_pkt = NULL;
2736 	struct vhci_pkt		*new_vpkt = NULL;
2737 	uint32_t		needed_length;
2738 	int			rval = VHCI_CMD_CMPLT;
2739 	uint32_t		prin_length = 0;
2740 	uint32_t		svl_prin_length = 0;
2741 
2742 	ASSERT(vpkt->vpkt_path);
2743 	svp = mdi_pi_get_vhci_private(vpkt->vpkt_path);
2744 	ASSERT(svp);
2745 	vlun = svp->svp_svl;
2746 	ASSERT(vlun);
2747 
2748 	/*
2749 	 * If the caller only asked for an amount of data that would not
2750 	 * be enough to include any key data it is likely that they will
2751 	 * send the next command with a buffer size based on the information
2752 	 * from this header. Doing recovery on this would be a duplication
2753 	 * of efforts.
2754 	 */
2755 	if (vpkt->vpkt_tgt_init_bp->b_bcount <= VHCI_PRIN_HEADER_SZ) {
2756 		rval = VHCI_CMD_CMPLT;
2757 		goto exit;
2758 	}
2759 
2760 	if (vpkt->vpkt_org_vpkt == NULL) {
2761 		/*
2762 		 * Can fail as sleep is not allowed.
2763 		 */
2764 		prin = (vhci_prin_readkeys_t *)
2765 		    bp_mapin_common(vpkt->vpkt_tgt_init_bp, VM_NOSLEEP);
2766 	} else {
2767 		/*
2768 		 * The retry buf doesn't need to be mapped in.
2769 		 */
2770 		prin = (vhci_prin_readkeys_t *)
2771 		    vpkt->vpkt_tgt_init_bp->b_un.b_daddr;
2772 	}
2773 
2774 	if (prin == NULL) {
2775 		VHCI_DEBUG(5, (CE_WARN, NULL,
2776 		    "vhci_do_prin: bp_mapin_common failed."));
2777 		rval = VHCI_CMD_ERROR;
2778 		goto fail;
2779 	}
2780 
2781 	prin_length = BE_32(prin->length);
2782 
2783 	/*
2784 	 * According to SPC-3r22, sec 4.3.4.6: "If the amount of
2785 	 * information to be transferred exceeds the maximum value
2786 	 * that the ALLOCATION LENGTH field is capable of specifying,
2787 	 * the device server shall...terminate the command with CHECK
2788 	 * CONDITION status".  The ALLOCATION LENGTH field of the
2789 	 * PERSISTENT RESERVE IN command is 2 bytes. We should never
2790 	 * get here with an ADDITIONAL LENGTH greater than 0xFFFF
2791 	 * so if we do, then it is an error!
2792 	 */
2793 
2794 
2795 	if ((prin_length + VHCI_PRIN_HEADER_SZ) > 0xFFFF) {
2796 		VHCI_DEBUG(5, (CE_NOTE, NULL,
2797 		    "vhci_do_prin: Device returned invalid "
2798 		    "length 0x%x\n", prin_length));
2799 		rval = VHCI_CMD_ERROR;
2800 		goto fail;
2801 	}
2802 	needed_length = prin_length + VHCI_PRIN_HEADER_SZ;
2803 
2804 	/*
2805 	 * If prin->length is greater than the byte count allocated in the
2806 	 * original buffer, then resend the request with enough buffer
2807 	 * allocated to get all of the available registered keys.
2808 	 */
2809 	if ((vpkt->vpkt_tgt_init_bp->b_bcount < needed_length) &&
2810 	    (vpkt->vpkt_org_vpkt == NULL)) {
2811 
2812 		new_pkt = vhci_create_retry_pkt(vpkt);
2813 		if (new_pkt == NULL) {
2814 			rval = VHCI_CMD_ERROR;
2815 			goto fail;
2816 		}
2817 		new_vpkt = TGTPKT2VHCIPKT(new_pkt);
2818 
2819 		/*
2820 		 * This is the buf with buffer pointer
2821 		 * where the prin readkeys will be
2822 		 * returned from the device
2823 		 */
2824 		new_bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
2825 		    NULL, needed_length, B_READ, NULL_FUNC, NULL);
2826 		if ((new_bp == NULL) || (new_bp->b_un.b_addr == NULL)) {
2827 			if (new_bp) {
2828 				scsi_free_consistent_buf(new_bp);
2829 			}
2830 			vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt);
2831 			rval = VHCI_CMD_ERROR;
2832 			goto fail;
2833 		}
2834 		new_bp->b_bcount = needed_length;
2835 		new_pkt->pkt_cdbp[7] = (uchar_t)(needed_length >> 8);
2836 		new_pkt->pkt_cdbp[8] = (uchar_t)needed_length;
2837 
2838 		rval = VHCI_CMD_RETRY;
2839 
2840 		new_vpkt->vpkt_tgt_init_bp = new_bp;
2841 	}
2842 
2843 	if (rval == VHCI_CMD_RETRY) {
2844 
2845 		/*
2846 		 * There were more keys then the original request asked for.
2847 		 */
2848 		mdi_pathinfo_t *path_holder = vpkt->vpkt_path;
2849 
2850 		/*
2851 		 * Release the old path because it does not matter which path
2852 		 * this command is sent down.  This allows the normal bind
2853 		 * transport mechanism to be used.
2854 		 */
2855 		if (vpkt->vpkt_path != NULL) {
2856 			mdi_rele_path(vpkt->vpkt_path);
2857 			vpkt->vpkt_path = NULL;
2858 		}
2859 
2860 		/*
2861 		 * Dispatch the retry command
2862 		 */
2863 		if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start,
2864 		    (void *) new_vpkt, KM_NOSLEEP) == NULL) {
2865 			if (path_holder) {
2866 				vpkt->vpkt_path = path_holder;
2867 				mdi_hold_path(path_holder);
2868 			}
2869 			scsi_free_consistent_buf(new_bp);
2870 			vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt);
2871 			rval = VHCI_CMD_ERROR;
2872 			goto fail;
2873 		}
2874 
2875 		/*
2876 		 * If we return VHCI_CMD_RETRY, that means the caller
2877 		 * is going to bail and wait for the reissued command
2878 		 * to complete.  In that case, we need to decrement
2879 		 * the path command count right now.  In any other
2880 		 * case, it'll be decremented by the caller.
2881 		 */
2882 		VHCI_DECR_PATH_CMDCOUNT(svp);
2883 		goto exit;
2884 
2885 	}
2886 
2887 	if (rval == VHCI_CMD_CMPLT) {
2888 		/*
2889 		 * The original request got all of the keys or the recovery
2890 		 * packet returns.
2891 		 */
2892 		int new;
2893 		int old;
2894 		int num_keys = prin_length / MHIOC_RESV_KEY_SIZE;
2895 
2896 		VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n",
2897 		    num_keys));
2898 
2899 #ifdef DEBUG
2900 		VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n"));
2901 		if (vhci_debug == 5)
2902 			vhci_print_prin_keys(prin, num_keys);
2903 		VHCI_DEBUG(5, (CE_NOTE, NULL,
2904 		    "vhci_do_prin: MPxIO old keys:\n"));
2905 		if (vhci_debug == 5)
2906 			vhci_print_prin_keys(&vlun->svl_prin, num_keys);
2907 #endif
2908 
2909 		/*
2910 		 * Filter out all duplicate keys returned from the device
2911 		 * We know that we use a different key for every host, so we
2912 		 * can simply strip out duplicates. Otherwise we would need to
2913 		 * do more bookkeeping to figure out which keys to strip out.
2914 		 */
2915 
2916 		new = 0;
2917 
2918 		/*
2919 		 * If we got at least 1 key copy it.
2920 		 */
2921 		if (num_keys > 0) {
2922 			vlun->svl_prin.keylist[0] = prin->keylist[0];
2923 			new++;
2924 		}
2925 
2926 		/*
2927 		 * find next unique key.
2928 		 */
2929 		for (old = 1; old < num_keys; old++) {
2930 			int j;
2931 			int match = 0;
2932 
2933 			if (new >= VHCI_NUM_RESV_KEYS)
2934 				break;
2935 			for (j = 0; j < new; j++) {
2936 				if (bcmp(&prin->keylist[old],
2937 				    &vlun->svl_prin.keylist[j],
2938 				    sizeof (mhioc_resv_key_t)) == 0) {
2939 					match = 1;
2940 					break;
2941 				}
2942 			}
2943 			if (!match) {
2944 				vlun->svl_prin.keylist[new] =
2945 				    prin->keylist[old];
2946 				new++;
2947 			}
2948 		}
2949 
2950 		/* Stored Big Endian */
2951 		vlun->svl_prin.generation = prin->generation;
2952 		svl_prin_length = new * sizeof (mhioc_resv_key_t);
2953 		/* Stored Big Endian */
2954 		vlun->svl_prin.length = BE_32(svl_prin_length);
2955 		svl_prin_length += VHCI_PRIN_HEADER_SZ;
2956 
2957 		/*
2958 		 * If we arrived at this point after issuing a retry, make sure
2959 		 * that we put everything back the way it originally was so
2960 		 * that the target driver can complete the command correctly.
2961 		 */
2962 		if (vpkt->vpkt_org_vpkt != NULL) {
2963 			new_bp = vpkt->vpkt_tgt_init_bp;
2964 
2965 			scsi_free_consistent_buf(new_bp);
2966 
2967 			vpkt = vhci_sync_retry_pkt(vpkt);
2968 			*intr_vpkt = vpkt;
2969 
2970 			/*
2971 			 * Make sure the original buffer is mapped into kernel
2972 			 * space before we try to copy the filtered keys into
2973 			 * it.
2974 			 */
2975 			prin = (vhci_prin_readkeys_t *)bp_mapin_common(
2976 			    vpkt->vpkt_tgt_init_bp, VM_NOSLEEP);
2977 		}
2978 
2979 		/*
2980 		 * Now copy the desired number of prin keys into the original
2981 		 * target buffer.
2982 		 */
2983 		if (svl_prin_length <= vpkt->vpkt_tgt_init_bp->b_bcount) {
2984 			/*
2985 			 * It is safe to return all of the available unique
2986 			 * keys
2987 			 */
2988 			bcopy(&vlun->svl_prin, prin, svl_prin_length);
2989 		} else {
2990 			/*
2991 			 * Not all of the available keys were requested by the
2992 			 * original command.
2993 			 */
2994 			bcopy(&vlun->svl_prin, prin,
2995 			    vpkt->vpkt_tgt_init_bp->b_bcount);
2996 		}
2997 #ifdef DEBUG
2998 		VHCI_DEBUG(5, (CE_NOTE, NULL,
2999 		    "vhci_do_prin: To Application:\n"));
3000 		if (vhci_debug == 5)
3001 			vhci_print_prin_keys(prin, new);
3002 		VHCI_DEBUG(5, (CE_NOTE, NULL,
3003 		    "vhci_do_prin: MPxIO new keys:\n"));
3004 		if (vhci_debug == 5)
3005 			vhci_print_prin_keys(&vlun->svl_prin, new);
3006 #endif
3007 	}
3008 fail:
3009 	if (rval == VHCI_CMD_ERROR) {
3010 		/*
3011 		 * If we arrived at this point after issuing a
3012 		 * retry, make sure that we put everything back
3013 		 * the way it originally was so that ssd can
3014 		 * complete the command correctly.
3015 		 */
3016 
3017 		if (vpkt->vpkt_org_vpkt != NULL) {
3018 			new_bp = vpkt->vpkt_tgt_init_bp;
3019 			if (new_bp != NULL) {
3020 				scsi_free_consistent_buf(new_bp);
3021 			}
3022 
3023 			new_vpkt = vpkt;
3024 			vpkt = vpkt->vpkt_org_vpkt;
3025 
3026 			vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3027 			    new_vpkt->vpkt_tgt_pkt);
3028 		}
3029 
3030 		/*
3031 		 * Mark this command completion as having an error so that
3032 		 * ssd will retry the command.
3033 		 */
3034 
3035 		vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3036 		vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3037 
3038 		rval = VHCI_CMD_CMPLT;
3039 	}
3040 exit:
3041 	/*
3042 	 * Make sure that the semaphore is only released once.
3043 	 */
3044 	if (rval == VHCI_CMD_CMPLT) {
3045 		sema_v(&vlun->svl_pgr_sema);
3046 	}
3047 
3048 	return (rval);
3049 }
3050 
3051 static void
3052 vhci_intr(struct scsi_pkt *pkt)
3053 {
3054 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_private;
3055 	struct scsi_pkt		*tpkt;
3056 	scsi_vhci_priv_t	*svp;
3057 	scsi_vhci_lun_t		*vlun;
3058 	int			rval, held;
3059 	struct scsi_failover_ops	*fops;
3060 	uint8_t			*sns, skey, asc, ascq;
3061 	mdi_pathinfo_t		*lpath;
3062 	static char		*timeout_err = "Command Timeout";
3063 	static char		*parity_err = "Parity Error";
3064 	char			*err_str = NULL;
3065 	dev_info_t		*vdip, *cdip;
3066 	char			*cpath;
3067 
3068 	ASSERT(vpkt != NULL);
3069 	tpkt = vpkt->vpkt_tgt_pkt;
3070 	ASSERT(tpkt != NULL);
3071 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3072 	ASSERT(svp != NULL);
3073 	vlun = svp->svp_svl;
3074 	ASSERT(vlun != NULL);
3075 	lpath = vpkt->vpkt_path;
3076 
3077 	/*
3078 	 * sync up the target driver's pkt with the pkt that
3079 	 * we actually used
3080 	 */
3081 	*(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
3082 	tpkt->pkt_resid = pkt->pkt_resid;
3083 	tpkt->pkt_state = pkt->pkt_state;
3084 	tpkt->pkt_statistics = pkt->pkt_statistics;
3085 	tpkt->pkt_reason = pkt->pkt_reason;
3086 
3087 	/* Return path_instance information back to the target driver. */
3088 	if (scsi_pkt_allocated_correctly(tpkt)) {
3089 		if (scsi_pkt_allocated_correctly(pkt)) {
3090 			/*
3091 			 * If both packets were correctly allocated,
3092 			 * return path returned by pHCI.
3093 			 */
3094 			tpkt->pkt_path_instance = pkt->pkt_path_instance;
3095 		} else {
3096 			/* Otherwise return path of pHCI we used */
3097 			tpkt->pkt_path_instance =
3098 			    mdi_pi_get_path_instance(lpath);
3099 		}
3100 	}
3101 
3102 	if (pkt->pkt_cdbp[0] == SCMD_PROUT &&
3103 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3104 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) {
3105 		if ((SCBP_C(pkt) != STATUS_GOOD) ||
3106 		    (pkt->pkt_reason != CMD_CMPLT)) {
3107 			sema_v(&vlun->svl_pgr_sema);
3108 		}
3109 	} else if (pkt->pkt_cdbp[0] == SCMD_PRIN) {
3110 		if (pkt->pkt_reason != CMD_CMPLT ||
3111 		    (SCBP_C(pkt) != STATUS_GOOD)) {
3112 			sema_v(&vlun->svl_pgr_sema);
3113 		}
3114 	}
3115 
3116 	switch (pkt->pkt_reason) {
3117 	case CMD_CMPLT:
3118 		/*
3119 		 * cmd completed successfully, check for scsi errors
3120 		 */
3121 		switch (*(pkt->pkt_scbp)) {
3122 		case STATUS_CHECK:
3123 			if (pkt->pkt_state & STATE_ARQ_DONE) {
3124 				sns = (uint8_t *)
3125 				    &(((struct scsi_arq_status *)(uintptr_t)
3126 				    (pkt->pkt_scbp))->sts_sensedata);
3127 				skey = scsi_sense_key(sns);
3128 				asc = scsi_sense_asc(sns);
3129 				ascq = scsi_sense_ascq(sns);
3130 				fops = vlun->svl_fops;
3131 				ASSERT(fops != NULL);
3132 				VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: "
3133 				    "Received sns key %x  esc %x  escq %x\n",
3134 				    skey, asc, ascq));
3135 
3136 				if (vlun->svl_waiting_for_activepath == 1) {
3137 					/*
3138 					 * if we are here it means we are
3139 					 * in the midst of a probe/attach
3140 					 * through a passive path; this
3141 					 * case is exempt from sense analysis
3142 					 * for detection of ext. failover
3143 					 * because that would unnecessarily
3144 					 * increase attach time.
3145 					 */
3146 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3147 					    vpkt->vpkt_tgt_init_scblen);
3148 					break;
3149 				}
3150 				if (asc == VHCI_SCSI_PERR) {
3151 					/*
3152 					 * parity error
3153 					 */
3154 					err_str = parity_err;
3155 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3156 					    vpkt->vpkt_tgt_init_scblen);
3157 					break;
3158 				}
3159 				rval = fops->sfo_analyze_sense(svp->svp_psd,
3160 				    sns, vlun->svl_fops_ctpriv);
3161 				if ((rval == SCSI_SENSE_NOFAILOVER) ||
3162 				    (rval == SCSI_SENSE_UNKNOWN) ||
3163 				    (rval == SCSI_SENSE_NOT_READY)) {
3164 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3165 					    vpkt->vpkt_tgt_init_scblen);
3166 					break;
3167 				} else if (rval == SCSI_SENSE_STATE_CHANGED) {
3168 					struct scsi_vhci	*vhci;
3169 					vhci = ADDR2VHCI(&tpkt->pkt_address);
3170 					VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3171 					if (!held) {
3172 						/*
3173 						 * looks like some other thread
3174 						 * has already detected this
3175 						 * condition
3176 						 */
3177 						tpkt->pkt_state &=
3178 						    ~STATE_ARQ_DONE;
3179 						*(tpkt->pkt_scbp) =
3180 						    STATUS_BUSY;
3181 						break;
3182 					}
3183 					(void) taskq_dispatch(
3184 					    vhci->vhci_update_pathstates_taskq,
3185 					    vhci_update_pathstates,
3186 					    (void *)vlun, KM_SLEEP);
3187 				} else {
3188 					/*
3189 					 * externally initiated failover
3190 					 * has occurred or is in progress
3191 					 */
3192 					VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3193 					if (!held) {
3194 						/*
3195 						 * looks like some other thread
3196 						 * has already detected this
3197 						 * condition
3198 						 */
3199 						tpkt->pkt_state &=
3200 						    ~STATE_ARQ_DONE;
3201 						*(tpkt->pkt_scbp) =
3202 						    STATUS_BUSY;
3203 						break;
3204 					} else {
3205 						rval = vhci_handle_ext_fo
3206 						    (pkt, rval);
3207 						if (rval == BUSY_RETURN) {
3208 							tpkt->pkt_state &=
3209 							    ~STATE_ARQ_DONE;
3210 							*(tpkt->pkt_scbp) =
3211 							    STATUS_BUSY;
3212 							break;
3213 						}
3214 						bcopy(pkt->pkt_scbp,
3215 						    tpkt->pkt_scbp,
3216 						    vpkt->vpkt_tgt_init_scblen);
3217 						break;
3218 					}
3219 				}
3220 			}
3221 			break;
3222 
3223 		/*
3224 		 * If this is a good SCSI-II RELEASE cmd completion then restore
3225 		 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG.
3226 		 * If this is a good SCSI-II RESERVE cmd completion then set
3227 		 * VLUN_RESERVE_ACTIVE_FLG.
3228 		 */
3229 		case STATUS_GOOD:
3230 			if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) ||
3231 			    (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) {
3232 				(void) mdi_set_lb_policy(vlun->svl_dip,
3233 				    vlun->svl_lb_policy_save);
3234 				vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
3235 				VHCI_DEBUG(1, (CE_WARN, NULL,
3236 				    "!vhci_intr: vlun 0x%p release path 0x%p",
3237 				    (void *)vlun, (void *)vpkt->vpkt_path));
3238 			}
3239 
3240 			if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3241 			    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3242 				vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG;
3243 				vlun->svl_resrv_pip = vpkt->vpkt_path;
3244 				VHCI_DEBUG(1, (CE_WARN, NULL,
3245 				    "!vhci_intr: vlun 0x%p reserved path 0x%p",
3246 				    (void *)vlun, (void *)vpkt->vpkt_path));
3247 			}
3248 			break;
3249 
3250 		case STATUS_RESERVATION_CONFLICT:
3251 			VHCI_DEBUG(1, (CE_WARN, NULL,
3252 			    "!vhci_intr: vlun 0x%p "
3253 			    "reserve conflict on path 0x%p",
3254 			    (void *)vlun, (void *)vpkt->vpkt_path));
3255 			/* FALLTHROUGH */
3256 		default:
3257 			break;
3258 		}
3259 
3260 		/*
3261 		 * Update I/O completion statistics for the path
3262 		 */
3263 		mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp);
3264 
3265 		/*
3266 		 * Command completed successfully, release the dma binding and
3267 		 * destroy the transport side of the packet.
3268 		 */
3269 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
3270 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3271 		    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
3272 			if (SCBP_C(pkt) == STATUS_GOOD) {
3273 				ASSERT(vlun->svl_taskq);
3274 				svp->svp_last_pkt_reason = pkt->pkt_reason;
3275 				(void) taskq_dispatch(vlun->svl_taskq,
3276 				    vhci_run_cmd, pkt, KM_SLEEP);
3277 				return;
3278 			}
3279 		}
3280 		if ((SCBP_C(pkt) == STATUS_GOOD) &&
3281 		    (pkt->pkt_cdbp[0] == SCMD_PRIN) && vpkt->vpkt_tgt_init_bp) {
3282 			/*
3283 			 * If the action (value in byte 1 of the cdb) is zero,
3284 			 * we're reading keys, and that's the only condition
3285 			 * where we need to be concerned with filtering keys
3286 			 * and potential retries.  Otherwise, we simply signal
3287 			 * the semaphore and move on.
3288 			 */
3289 			if (pkt->pkt_cdbp[1] == 0) {
3290 				/*
3291 				 * If this is the completion of an internal
3292 				 * retry then we need to make sure that the
3293 				 * pkt and tpkt pointers are readjusted so
3294 				 * the calls to scsi_destroy_pkt and pkt_comp
3295 				 * below work * correctly.
3296 				 */
3297 				if (vpkt->vpkt_org_vpkt != NULL) {
3298 					pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt;
3299 					tpkt = vpkt->vpkt_org_vpkt->
3300 					    vpkt_tgt_pkt;
3301 
3302 					/*
3303 					 * If this command was issued through
3304 					 * the taskq then we need to clear
3305 					 * this flag for proper processing in
3306 					 * the case of a retry from the target
3307 					 * driver.
3308 					 */
3309 					vpkt->vpkt_state &=
3310 					    ~VHCI_PKT_THRU_TASKQ;
3311 				}
3312 
3313 				/*
3314 				 * if vhci_do_prin returns VHCI_CMD_CMPLT then
3315 				 * vpkt will contain the address of the
3316 				 * original vpkt
3317 				 */
3318 				if (vhci_do_prin(&vpkt) == VHCI_CMD_RETRY) {
3319 					/*
3320 					 * The command has been resent to get
3321 					 * all the keys from the device.  Don't
3322 					 * complete the command with ssd until
3323 					 * the retry completes.
3324 					 */
3325 					return;
3326 				}
3327 			} else {
3328 				sema_v(&vlun->svl_pgr_sema);
3329 			}
3330 		}
3331 
3332 		break;
3333 
3334 	case CMD_TIMEOUT:
3335 		if ((pkt->pkt_statistics &
3336 		    (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) {
3337 
3338 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3339 			    "!scsi vhci timeout invoked\n"));
3340 
3341 			(void) vhci_recovery_reset(vlun, &pkt->pkt_address,
3342 			    FALSE, VHCI_DEPTH_ALL);
3343 		}
3344 		MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR);
3345 		tpkt->pkt_statistics |= STAT_ABORTED;
3346 		err_str = timeout_err;
3347 		break;
3348 
3349 	case CMD_TRAN_ERR:
3350 		/*
3351 		 * This status is returned if the transport has sent the cmd
3352 		 * down the link to the target and then some error occurs.
3353 		 * In case of SCSI-II RESERVE cmd, we don't know if the
3354 		 * reservation been accepted by the target or not, so we need
3355 		 * to clear the reservation.
3356 		 */
3357 		if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3358 		    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3359 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received"
3360 			    " cmd_tran_err for scsi-2 reserve cmd\n"));
3361 			if (!vhci_recovery_reset(vlun, &pkt->pkt_address,
3362 			    TRUE, VHCI_DEPTH_TARGET)) {
3363 				VHCI_DEBUG(1, (CE_WARN, NULL,
3364 				    "!vhci_intr cmd_tran_err reset failed!"));
3365 			}
3366 		}
3367 		break;
3368 
3369 	case CMD_DEV_GONE:
3370 		/*
3371 		 * If this is the last path then report CMD_DEV_GONE to the
3372 		 * target driver, otherwise report BUSY to triggger retry.
3373 		 */
3374 		if (vlun->svl_dip &&
3375 		    (mdi_client_get_path_count(vlun->svl_dip) <= 1)) {
3376 			struct scsi_vhci	*vhci;
3377 			vhci = ADDR2VHCI(&tpkt->pkt_address);
3378 			VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
3379 			    "cmd_dev_gone on last path\n"));
3380 			(void) vhci_invalidate_mpapi_lu(vhci, vlun);
3381 			break;
3382 		}
3383 
3384 		/* Report CMD_CMPLT-with-BUSY to cause retry. */
3385 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
3386 		    "cmd_dev_gone\n"));
3387 		tpkt->pkt_reason = CMD_CMPLT;
3388 		tpkt->pkt_state = STATE_GOT_BUS |
3389 		    STATE_GOT_TARGET | STATE_SENT_CMD |
3390 		    STATE_GOT_STATUS;
3391 		*(tpkt->pkt_scbp) = STATUS_BUSY;
3392 		break;
3393 
3394 	default:
3395 		break;
3396 	}
3397 
3398 	/*
3399 	 * SCSI-II RESERVE cmd has been serviced by the lower layers clear
3400 	 * the flag so the lun is not QUIESCED any longer.
3401 	 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt
3402 	 * is retried, a taskq shall again be dispatched to service it.  Else
3403 	 * it may lead to a system hang if the retry is within interrupt
3404 	 * context.
3405 	 */
3406 	if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3407 	    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3408 		vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
3409 		vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
3410 	}
3411 
3412 	/*
3413 	 * vpkt_org_vpkt should always be NULL here if the retry command
3414 	 * has been successfully processed.  If vpkt_org_vpkt != NULL at
3415 	 * this point, it is an error so restore the original vpkt and
3416 	 * return an error to the target driver so it can retry the
3417 	 * command as appropriate.
3418 	 */
3419 	if (vpkt->vpkt_org_vpkt != NULL) {
3420 		struct vhci_pkt *new_vpkt = vpkt;
3421 		vpkt = vpkt->vpkt_org_vpkt;
3422 
3423 		vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3424 		    new_vpkt->vpkt_tgt_pkt);
3425 
3426 		/*
3427 		 * Mark this command completion as having an error so that
3428 		 * ssd will retry the command.
3429 		 */
3430 		vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3431 		vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3432 
3433 		pkt = vpkt->vpkt_hba_pkt;
3434 		tpkt = vpkt->vpkt_tgt_pkt;
3435 	}
3436 
3437 	if ((err_str != NULL) && (pkt->pkt_reason !=
3438 	    svp->svp_last_pkt_reason)) {
3439 		cdip = vlun->svl_dip;
3440 		vdip = ddi_get_parent(cdip);
3441 		cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3442 		vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s",
3443 		    ddi_pathname(cdip, cpath), ddi_driver_name(cdip),
3444 		    ddi_get_instance(cdip), err_str,
3445 		    mdi_pi_spathname(vpkt->vpkt_path));
3446 		kmem_free(cpath, MAXPATHLEN);
3447 	}
3448 	svp->svp_last_pkt_reason = pkt->pkt_reason;
3449 	VHCI_DECR_PATH_CMDCOUNT(svp);
3450 
3451 	/*
3452 	 * For PARTIAL_DMA, vhci should not free the path.
3453 	 * Target driver will call into vhci_scsi_dmafree or
3454 	 * destroy pkt to release this path.
3455 	 */
3456 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
3457 		scsi_destroy_pkt(pkt);
3458 		vpkt->vpkt_hba_pkt = NULL;
3459 		if (vpkt->vpkt_path) {
3460 			mdi_rele_path(vpkt->vpkt_path);
3461 			vpkt->vpkt_path = NULL;
3462 		}
3463 	}
3464 
3465 	scsi_hba_pkt_comp(tpkt);
3466 }
3467 
3468 /*
3469  * two possibilities: (1) failover has completed
3470  * or (2) is in progress; update our path states for
3471  * the former case; for the latter case,
3472  * initiate a scsi_watch request to
3473  * determine when failover completes - vlun is HELD
3474  * until failover completes; BUSY is returned to upper
3475  * layer in both the cases
3476  */
3477 static int
3478 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat)
3479 {
3480 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_private;
3481 	struct scsi_pkt		*tpkt;
3482 	scsi_vhci_priv_t	*svp;
3483 	scsi_vhci_lun_t		*vlun;
3484 	struct scsi_vhci	*vhci;
3485 	scsi_vhci_swarg_t	*swarg;
3486 	char			*path;
3487 
3488 	ASSERT(vpkt != NULL);
3489 	tpkt = vpkt->vpkt_tgt_pkt;
3490 	ASSERT(tpkt != NULL);
3491 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3492 	ASSERT(svp != NULL);
3493 	vlun = svp->svp_svl;
3494 	ASSERT(vlun != NULL);
3495 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3496 
3497 	vhci = ADDR2VHCI(&tpkt->pkt_address);
3498 
3499 	if (fostat == SCSI_SENSE_INACTIVE) {
3500 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover "
3501 		    "detected for %s; updating path states...\n",
3502 		    vlun->svl_lun_wwn));
3503 		/*
3504 		 * set the vlun flag to indicate to the task that the target
3505 		 * port group needs updating
3506 		 */
3507 		vlun->svl_flags |= VLUN_UPDATE_TPG;
3508 		(void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3509 		    vhci_update_pathstates, (void *)vlun, KM_SLEEP);
3510 	} else {
3511 		path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3512 		vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip),
3513 		    "!%s (%s%d): Waiting for externally initiated failover "
3514 		    "to complete", ddi_pathname(vlun->svl_dip, path),
3515 		    ddi_driver_name(vlun->svl_dip),
3516 		    ddi_get_instance(vlun->svl_dip));
3517 		kmem_free(path, MAXPATHLEN);
3518 		swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP);
3519 		if (swarg == NULL) {
3520 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: "
3521 			    "request packet allocation for %s failed....\n",
3522 			    vlun->svl_lun_wwn));
3523 			VHCI_RELEASE_LUN(vlun);
3524 			return (PKT_RETURN);
3525 		}
3526 		swarg->svs_svp = svp;
3527 		swarg->svs_tos = ddi_get_time();
3528 		swarg->svs_pi = vpkt->vpkt_path;
3529 		swarg->svs_release_lun = 0;
3530 		swarg->svs_done = 0;
3531 		/*
3532 		 * place a hold on the path...we don't want it to
3533 		 * vanish while scsi_watch is in progress
3534 		 */
3535 		mdi_hold_path(vpkt->vpkt_path);
3536 		svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd,
3537 		    VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb,
3538 		    (caddr_t)swarg);
3539 	}
3540 	return (BUSY_RETURN);
3541 }
3542 
3543 /*
3544  * vhci_efo_watch_cb:
3545  *	Callback from scsi_watch request to check the failover status.
3546  *	Completion is either due to successful failover or timeout.
3547  *	Upon successful completion, vhci_update_path_states is called.
3548  *	For timeout condition, vhci_efo_done is called.
3549  *	Always returns 0 to scsi_watch to keep retrying till vhci_efo_done
3550  *	terminates this request properly in a separate thread.
3551  */
3552 
3553 static int
3554 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
3555 {
3556 	struct scsi_status		*statusp = resultp->statusp;
3557 	uint8_t				*sensep = (uint8_t *)resultp->sensep;
3558 	struct scsi_pkt			*pkt = resultp->pkt;
3559 	scsi_vhci_swarg_t		*swarg;
3560 	scsi_vhci_priv_t		*svp;
3561 	scsi_vhci_lun_t			*vlun;
3562 	struct scsi_vhci		*vhci;
3563 	dev_info_t			*vdip;
3564 	int				rval, updt_paths;
3565 
3566 	swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg;
3567 	svp = swarg->svs_svp;
3568 	if (swarg->svs_done) {
3569 		/*
3570 		 * Already completed failover or timedout.
3571 		 * Waiting for vhci_efo_done to terminate this scsi_watch.
3572 		 */
3573 		return (0);
3574 	}
3575 
3576 	ASSERT(svp != NULL);
3577 	vlun = svp->svp_svl;
3578 	ASSERT(vlun != NULL);
3579 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3580 	vlun->svl_efo_update_path = 0;
3581 	vdip = ddi_get_parent(vlun->svl_dip);
3582 	vhci = ddi_get_soft_state(vhci_softstate,
3583 	    ddi_get_instance(vdip));
3584 
3585 	updt_paths = 0;
3586 
3587 	if (pkt->pkt_reason != CMD_CMPLT) {
3588 		if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3589 			swarg->svs_release_lun = 1;
3590 			goto done;
3591 		}
3592 		return (0);
3593 	}
3594 	if (*((unsigned char *)statusp) == STATUS_CHECK) {
3595 		rval = vlun->svl_fops->sfo_analyze_sense(svp->svp_psd, sensep,
3596 		    vlun->svl_fops_ctpriv);
3597 		switch (rval) {
3598 			/*
3599 			 * Only update path states in case path is definitely
3600 			 * inactive, or no failover occurred.  For all other
3601 			 * check conditions continue pinging.  A unexpected
3602 			 * check condition shouldn't cause pinging to complete
3603 			 * prematurely.
3604 			 */
3605 			case SCSI_SENSE_INACTIVE:
3606 			case SCSI_SENSE_NOFAILOVER:
3607 				updt_paths = 1;
3608 				break;
3609 			default:
3610 				if ((ddi_get_time() - swarg->svs_tos)
3611 				    >= VHCI_EXTFO_TIMEOUT) {
3612 					swarg->svs_release_lun = 1;
3613 					goto done;
3614 				}
3615 				return (0);
3616 		}
3617 	} else if (*((unsigned char *)statusp) ==
3618 	    STATUS_RESERVATION_CONFLICT) {
3619 		updt_paths = 1;
3620 	} else if ((*((unsigned char *)statusp)) &
3621 	    (STATUS_BUSY | STATUS_QFULL)) {
3622 		return (0);
3623 	}
3624 	if ((*((unsigned char *)statusp) == STATUS_GOOD) ||
3625 	    (updt_paths == 1)) {
3626 		/*
3627 		 * we got here because we had detected an
3628 		 * externally initiated failover; things
3629 		 * have settled down now, so let's
3630 		 * start up a task to update the
3631 		 * path states and target port group
3632 		 */
3633 		vlun->svl_efo_update_path = 1;
3634 		swarg->svs_done = 1;
3635 		vlun->svl_swarg = swarg;
3636 		vlun->svl_flags |= VLUN_UPDATE_TPG;
3637 		(void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3638 		    vhci_update_pathstates, (void *)vlun,
3639 		    KM_SLEEP);
3640 		return (0);
3641 	}
3642 	if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3643 		swarg->svs_release_lun = 1;
3644 		goto done;
3645 	}
3646 	return (0);
3647 done:
3648 	swarg->svs_done = 1;
3649 	(void) taskq_dispatch(vhci->vhci_taskq,
3650 	    vhci_efo_done, (void *)swarg, KM_SLEEP);
3651 	return (0);
3652 }
3653 
3654 /*
3655  * vhci_efo_done:
3656  *	cleanly terminates scsi_watch and free up resources.
3657  *	Called as taskq function in vhci_efo_watch_cb for EFO timeout condition
3658  *	or by vhci_update_path_states invoked during external initiated
3659  *	failover completion.
3660  */
3661 static void
3662 vhci_efo_done(void *arg)
3663 {
3664 	scsi_vhci_lun_t			*vlun;
3665 	scsi_vhci_swarg_t		*swarg = (scsi_vhci_swarg_t *)arg;
3666 	scsi_vhci_priv_t		*svp = swarg->svs_svp;
3667 	ASSERT(svp);
3668 
3669 	vlun = svp->svp_svl;
3670 	ASSERT(vlun);
3671 
3672 	/* Wait for clean termination of scsi_watch */
3673 	(void) scsi_watch_request_terminate(svp->svp_sw_token,
3674 	    SCSI_WATCH_TERMINATE_ALL_WAIT);
3675 	svp->svp_sw_token = NULL;
3676 
3677 	/* release path and freeup resources to indicate failover completion */
3678 	mdi_rele_path(swarg->svs_pi);
3679 	if (swarg->svs_release_lun) {
3680 		VHCI_RELEASE_LUN(vlun);
3681 	}
3682 	kmem_free((void *)swarg, sizeof (*swarg));
3683 }
3684 
3685 /*
3686  * Update the path states
3687  * vlun should be HELD when this is invoked.
3688  * Calls vhci_efo_done to cleanup resources allocated for EFO.
3689  */
3690 void
3691 vhci_update_pathstates(void *arg)
3692 {
3693 	mdi_pathinfo_t			*pip, *npip;
3694 	dev_info_t			*dip;
3695 	struct scsi_failover_ops	*fo;
3696 	struct scsi_vhci_priv		*svp;
3697 	struct scsi_device		*psd;
3698 	struct scsi_path_opinfo		opinfo;
3699 	char				*pclass, *tptr;
3700 	struct scsi_vhci_lun		*vlun = (struct scsi_vhci_lun *)arg;
3701 	int				sps; /* mdi_select_path() status */
3702 	char				*cpath;
3703 	struct scsi_vhci		*vhci;
3704 	struct scsi_pkt			*pkt;
3705 	struct buf			*bp;
3706 	int				reserve_conflict = 0;
3707 
3708 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3709 	dip  = vlun->svl_dip;
3710 	pip = npip = NULL;
3711 
3712 	vhci = ddi_get_soft_state(vhci_softstate,
3713 	    ddi_get_instance(ddi_get_parent(dip)));
3714 
3715 	sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH |
3716 	    MDI_SELECT_STANDBY_PATH | MDI_SELECT_NO_PREFERRED), NULL, &npip);
3717 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
3718 		goto done;
3719 	}
3720 
3721 	fo = vlun->svl_fops;
3722 	do {
3723 		pip = npip;
3724 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
3725 		psd = svp->svp_psd;
3726 		if (fo->sfo_path_get_opinfo(psd, &opinfo,
3727 		    vlun->svl_fops_ctpriv) != 0) {
3728 			sps = mdi_select_path(dip, NULL,
3729 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3730 			    MDI_SELECT_NO_PREFERRED), pip, &npip);
3731 			mdi_rele_path(pip);
3732 			continue;
3733 		}
3734 
3735 		if (mdi_prop_lookup_string(pip, "path-class", &pclass) !=
3736 		    MDI_SUCCESS) {
3737 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3738 			    "!vhci_update_pathstates: prop lookup failed for "
3739 			    "path 0x%p\n", (void *)pip));
3740 			sps = mdi_select_path(dip, NULL,
3741 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3742 			    MDI_SELECT_NO_PREFERRED), pip, &npip);
3743 			mdi_rele_path(pip);
3744 			continue;
3745 		}
3746 
3747 		/*
3748 		 * Need to update the "path-class" property
3749 		 * value in the device tree if different
3750 		 * from the existing value.
3751 		 */
3752 		if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) {
3753 			(void) mdi_prop_update_string(pip, "path-class",
3754 			    opinfo.opinfo_path_attr);
3755 		}
3756 
3757 		/*
3758 		 * Only change the state if needed. i.e. Don't call
3759 		 * mdi_pi_set_state to ONLINE a path if its already
3760 		 * ONLINE. Same for STANDBY paths.
3761 		 */
3762 
3763 		if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE ||
3764 		    opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) {
3765 			if (!(MDI_PI_IS_ONLINE(pip))) {
3766 				VHCI_DEBUG(1, (CE_NOTE, NULL,
3767 				    "!vhci_update_pathstates: marking path"
3768 				    " 0x%p as ONLINE\n", (void *)pip));
3769 				cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3770 				vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
3771 				    "(%s%d): path %s "
3772 				    "is now ONLINE because of "
3773 				    "an externally initiated failover",
3774 				    ddi_pathname(dip, cpath),
3775 				    ddi_driver_name(dip),
3776 				    ddi_get_instance(dip),
3777 				    mdi_pi_spathname(pip));
3778 				kmem_free(cpath, MAXPATHLEN);
3779 				mdi_pi_set_state(pip,
3780 				    MDI_PATHINFO_STATE_ONLINE);
3781 				mdi_pi_set_preferred(pip,
3782 				    opinfo.opinfo_preferred);
3783 				tptr = kmem_alloc(strlen
3784 				    (opinfo.opinfo_path_attr)+1, KM_SLEEP);
3785 				(void) strlcpy(tptr, opinfo.opinfo_path_attr,
3786 				    (strlen(opinfo.opinfo_path_attr)+1));
3787 				mutex_enter(&vlun->svl_mutex);
3788 				if (vlun->svl_active_pclass != NULL) {
3789 					kmem_free(vlun->svl_active_pclass,
3790 					    strlen(vlun->svl_active_pclass)+1);
3791 				}
3792 				vlun->svl_active_pclass = tptr;
3793 				if (vlun->svl_waiting_for_activepath) {
3794 					vlun->svl_waiting_for_activepath = 0;
3795 				}
3796 				mutex_exit(&vlun->svl_mutex);
3797 				/* Check for Reservation Conflict */
3798 				bp = scsi_alloc_consistent_buf(
3799 				    &svp->svp_psd->sd_address,
3800 				    (struct buf *)NULL, DEV_BSIZE, B_READ,
3801 				    NULL, NULL);
3802 				if (!bp) {
3803 					VHCI_DEBUG(1, (CE_NOTE, NULL,
3804 					    "vhci_update_pathstates: "
3805 					    "!No resources (buf)\n"));
3806 					mdi_rele_path(pip);
3807 					goto done;
3808 				}
3809 				pkt = scsi_init_pkt(&svp->svp_psd->sd_address,
3810 				    NULL, bp, CDB_GROUP1,
3811 				    sizeof (struct scsi_arq_status), 0,
3812 				    PKT_CONSISTENT, NULL, NULL);
3813 				if (pkt) {
3814 					(void) scsi_setup_cdb((union scsi_cdb *)
3815 					    (uintptr_t)pkt->pkt_cdbp,
3816 					    SCMD_READ, 1, 1, 0);
3817 					pkt->pkt_time = 3*30;
3818 					pkt->pkt_flags = FLAG_NOINTR;
3819 					pkt->pkt_path_instance =
3820 					    mdi_pi_get_path_instance(pip);
3821 
3822 					if ((scsi_transport(pkt) ==
3823 					    TRAN_ACCEPT) && (pkt->pkt_reason
3824 					    == CMD_CMPLT) && (SCBP_C(pkt) ==
3825 					    STATUS_RESERVATION_CONFLICT)) {
3826 						reserve_conflict = 1;
3827 					}
3828 					scsi_destroy_pkt(pkt);
3829 				}
3830 				scsi_free_consistent_buf(bp);
3831 			} else if (MDI_PI_IS_ONLINE(pip)) {
3832 				if (strcmp(pclass, opinfo.opinfo_path_attr)
3833 				    != 0) {
3834 					mdi_pi_set_preferred(pip,
3835 					    opinfo.opinfo_preferred);
3836 					mutex_enter(&vlun->svl_mutex);
3837 					if (vlun->svl_active_pclass == NULL ||
3838 					    strcmp(opinfo.opinfo_path_attr,
3839 					    vlun->svl_active_pclass) != 0) {
3840 						mutex_exit(&vlun->svl_mutex);
3841 						tptr = kmem_alloc(strlen
3842 						    (opinfo.opinfo_path_attr)+1,
3843 						    KM_SLEEP);
3844 						(void) strlcpy(tptr,
3845 						    opinfo.opinfo_path_attr,
3846 						    (strlen
3847 						    (opinfo.opinfo_path_attr)
3848 						    +1));
3849 						mutex_enter(&vlun->svl_mutex);
3850 					} else {
3851 						/*
3852 						 * No need to update
3853 						 * svl_active_pclass
3854 						 */
3855 						tptr = NULL;
3856 						mutex_exit(&vlun->svl_mutex);
3857 					}
3858 					if (tptr) {
3859 						if (vlun->svl_active_pclass
3860 						    != NULL) {
3861 							kmem_free(vlun->
3862 							    svl_active_pclass,
3863 							    strlen(vlun->
3864 							    svl_active_pclass)
3865 							    +1);
3866 						}
3867 						vlun->svl_active_pclass = tptr;
3868 						mutex_exit(&vlun->svl_mutex);
3869 					}
3870 				}
3871 			}
3872 		} else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) &&
3873 		    !(MDI_PI_IS_STANDBY(pip))) {
3874 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3875 			    "!vhci_update_pathstates: marking path"
3876 			    " 0x%p as STANDBY\n", (void *)pip));
3877 			cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3878 			vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
3879 			    "(%s%d): path %s "
3880 			    "is now STANDBY because of "
3881 			    "an externally initiated failover",
3882 			    ddi_pathname(dip, cpath),
3883 			    ddi_driver_name(dip),
3884 			    ddi_get_instance(dip),
3885 			    mdi_pi_spathname(pip));
3886 			kmem_free(cpath, MAXPATHLEN);
3887 			mdi_pi_set_state(pip,
3888 			    MDI_PATHINFO_STATE_STANDBY);
3889 			mdi_pi_set_preferred(pip,
3890 			    opinfo.opinfo_preferred);
3891 			mutex_enter(&vlun->svl_mutex);
3892 			if (vlun->svl_active_pclass != NULL) {
3893 				if (strcmp(vlun->svl_active_pclass,
3894 				    opinfo.opinfo_path_attr) == 0) {
3895 					kmem_free(vlun->
3896 					    svl_active_pclass,
3897 					    strlen(vlun->
3898 					    svl_active_pclass)+1);
3899 					vlun->svl_active_pclass = NULL;
3900 				}
3901 			}
3902 			mutex_exit(&vlun->svl_mutex);
3903 		}
3904 		(void) mdi_prop_free(pclass);
3905 		sps = mdi_select_path(dip, NULL,
3906 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3907 		    MDI_SELECT_NO_PREFERRED), pip, &npip);
3908 		mdi_rele_path(pip);
3909 
3910 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
3911 
3912 	/*
3913 	 * Check to see if this vlun has an active SCSI-II RESERVE.  If so
3914 	 * clear the reservation by sending a reset, so the host doesn't
3915 	 * receive a reservation conflict.
3916 	 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun. Also notify ssd
3917 	 * of the reset, explicitly.
3918 	 */
3919 	if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
3920 		if (reserve_conflict && (vlun->svl_xlf_capable == 0)) {
3921 			(void) vhci_recovery_reset(vlun,
3922 			    &svp->svp_psd->sd_address, FALSE,
3923 			    VHCI_DEPTH_TARGET);
3924 		}
3925 		vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
3926 		mutex_enter(&vhci->vhci_mutex);
3927 		scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
3928 		    &vhci->vhci_reset_notify_listf);
3929 		mutex_exit(&vhci->vhci_mutex);
3930 	}
3931 	if (vlun->svl_flags & VLUN_UPDATE_TPG) {
3932 		/*
3933 		 * Update the AccessState of related MP-API TPGs
3934 		 */
3935 		(void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
3936 		vlun->svl_flags &= ~VLUN_UPDATE_TPG;
3937 	}
3938 done:
3939 	if (vlun->svl_efo_update_path) {
3940 		vlun->svl_efo_update_path = 0;
3941 		vhci_efo_done(vlun->svl_swarg);
3942 		vlun->svl_swarg = 0;
3943 	}
3944 	VHCI_RELEASE_LUN(vlun);
3945 }
3946 
3947 /* ARGSUSED */
3948 static int
3949 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
3950 {
3951 	scsi_hba_tran_t		*hba = NULL;
3952 	struct scsi_device	*psd = NULL;
3953 	scsi_vhci_lun_t		*vlun = NULL;
3954 	dev_info_t		*pdip = NULL;
3955 	dev_info_t		*tgt_dip;
3956 	struct scsi_vhci	*vhci;
3957 	char			*guid;
3958 	scsi_vhci_priv_t	*svp = NULL;
3959 	int			rval = MDI_FAILURE;
3960 	int			vlun_alloced = 0;
3961 
3962 	ASSERT(vdip != NULL);
3963 	ASSERT(pip != NULL);
3964 
3965 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
3966 	ASSERT(vhci != NULL);
3967 
3968 	pdip = mdi_pi_get_phci(pip);
3969 	ASSERT(pdip != NULL);
3970 
3971 	hba = ddi_get_driver_private(pdip);
3972 	ASSERT(hba != NULL);
3973 
3974 	tgt_dip = mdi_pi_get_client(pip);
3975 	ASSERT(tgt_dip != NULL);
3976 
3977 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
3978 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
3979 		VHCI_DEBUG(1, (CE_WARN, NULL,
3980 		    "vhci_pathinfo_init: lun guid property failed"));
3981 		goto failure;
3982 	}
3983 
3984 	vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced);
3985 	ddi_prop_free(guid);
3986 
3987 	vlun->svl_dip = tgt_dip;
3988 
3989 	svp = kmem_zalloc(sizeof (*svp), KM_SLEEP);
3990 	svp->svp_svl = vlun;
3991 
3992 	vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip);
3993 	mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL);
3994 	cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL);
3995 
3996 	psd = kmem_zalloc(sizeof (*psd), KM_SLEEP);
3997 	mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL);
3998 
3999 	if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
4000 		/*
4001 		 * For a SCSI_HBA_ADDR_COMPLEX transport we store a pointer to
4002 		 * scsi_device in the scsi_address structure.  This allows an
4003 		 * an HBA driver to find its scsi_device(9S) and
4004 		 * per-scsi_device(9S) HBA private data given a
4005 		 * scsi_address(9S) by using scsi_address_device(9F) and
4006 		 * scsi_device_hba_private_get(9F)).
4007 		 */
4008 		psd->sd_address.a.a_sd = psd;
4009 	} else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4010 		/*
4011 		 * Clone transport structure if requested, so
4012 		 * Self enumerating HBAs always need to use cloning
4013 		 */
4014 		scsi_hba_tran_t	*clone =
4015 		    kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP);
4016 		bcopy(hba, clone, sizeof (scsi_hba_tran_t));
4017 		hba = clone;
4018 		hba->tran_sd = psd;
4019 	} else {
4020 		/*
4021 		 * SPI pHCI unit-address. If we ever need to support this
4022 		 * we could set a.spi.a_target/a.spi.a_lun based on pathinfo
4023 		 * node unit-address properties.  For now we fail...
4024 		 */
4025 		goto failure;
4026 	}
4027 
4028 	psd->sd_dev = tgt_dip;
4029 	psd->sd_address.a_hba_tran = hba;
4030 
4031 	/*
4032 	 * Mark scsi_device as being associated with a pathinfo node. For
4033 	 * a scsi_device structure associated with a devinfo node,
4034 	 * scsi_ctlops_initchild sets this field to NULL.
4035 	 */
4036 	psd->sd_pathinfo = pip;
4037 
4038 	/*
4039 	 * LEGACY: sd_private: set for older mpxio-capable pHCI drivers with
4040 	 * too much scsi_vhci/mdi/ndi knowledge. Remove this code when all
4041 	 * mpxio-capable pHCI drivers use SCSA enumeration services (or at
4042 	 * least have been changed to use sd_pathinfo instead).
4043 	 */
4044 	psd->sd_private = (caddr_t)pip;
4045 
4046 	/* See scsi_hba.c for info on sd_tran_safe kludge */
4047 	psd->sd_tran_safe = hba;
4048 
4049 	svp->svp_psd = psd;
4050 	mdi_pi_set_vhci_private(pip, (caddr_t)svp);
4051 
4052 	/*
4053 	 * call hba's target init entry point if it exists
4054 	 */
4055 	if (hba->tran_tgt_init != NULL) {
4056 		if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip,
4057 		    hba, psd)) != DDI_SUCCESS) {
4058 			VHCI_DEBUG(1, (CE_WARN, pdip,
4059 			    "!vhci_pathinfo_init: tran_tgt_init failed for "
4060 			    "path=0x%p rval=%x", (void *)pip, rval));
4061 			goto failure;
4062 		}
4063 	}
4064 
4065 	svp->svp_new_path = 1;
4066 
4067 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n",
4068 	    (void *)pip));
4069 	return (MDI_SUCCESS);
4070 
4071 failure:
4072 	if (psd) {
4073 		mutex_destroy(&psd->sd_mutex);
4074 		kmem_free(psd, sizeof (*psd));
4075 	}
4076 	if (svp) {
4077 		mdi_pi_set_vhci_private(pip, NULL);
4078 		mutex_destroy(&svp->svp_mutex);
4079 		cv_destroy(&svp->svp_cv);
4080 		kmem_free(svp, sizeof (*svp));
4081 	}
4082 	if (hba && (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE))
4083 		kmem_free(hba, sizeof (scsi_hba_tran_t));
4084 
4085 	if (vlun_alloced)
4086 		vhci_lun_free(tgt_dip);
4087 
4088 	return (rval);
4089 }
4090 
4091 /* ARGSUSED */
4092 static int
4093 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4094 {
4095 	scsi_hba_tran_t		*hba = NULL;
4096 	struct scsi_device	*psd = NULL;
4097 	dev_info_t		*pdip = NULL;
4098 	dev_info_t		*cdip = NULL;
4099 	scsi_vhci_priv_t	*svp = NULL;
4100 
4101 	ASSERT(vdip != NULL);
4102 	ASSERT(pip != NULL);
4103 
4104 	pdip = mdi_pi_get_phci(pip);
4105 	ASSERT(pdip != NULL);
4106 
4107 	cdip = mdi_pi_get_client(pip);
4108 	ASSERT(cdip != NULL);
4109 
4110 	hba = ddi_get_driver_private(pdip);
4111 	ASSERT(hba != NULL);
4112 
4113 	vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_UNINIT);
4114 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4115 	if (svp == NULL) {
4116 		/* path already freed. Nothing to do. */
4117 		return (MDI_SUCCESS);
4118 	}
4119 
4120 	psd = svp->svp_psd;
4121 	ASSERT(psd != NULL);
4122 
4123 	if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
4124 		/* Verify plumbing */
4125 		ASSERT(psd->sd_address.a_hba_tran == hba);
4126 		ASSERT(psd->sd_address.a.a_sd == psd);
4127 	} else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4128 		/* Switch to cloned scsi_hba_tran(9S) structure */
4129 		hba = psd->sd_address.a_hba_tran;
4130 		ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE);
4131 		ASSERT(hba->tran_sd == psd);
4132 	}
4133 
4134 	if (hba->tran_tgt_free != NULL) {
4135 		(*hba->tran_tgt_free) (pdip, cdip, hba, psd);
4136 	}
4137 	mutex_destroy(&psd->sd_mutex);
4138 	if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4139 		kmem_free(hba, sizeof (*hba));
4140 	}
4141 
4142 	mdi_pi_set_vhci_private(pip, NULL);
4143 
4144 	/*
4145 	 * Free the pathinfo related scsi_device inquiry data. Note that this
4146 	 * matches what happens for scsi_hba.c devinfo case at uninitchild time.
4147 	 */
4148 	if (psd->sd_inq)
4149 		kmem_free((caddr_t)psd->sd_inq, sizeof (struct scsi_inquiry));
4150 	kmem_free((caddr_t)psd, sizeof (*psd));
4151 
4152 	mutex_destroy(&svp->svp_mutex);
4153 	cv_destroy(&svp->svp_cv);
4154 	kmem_free((caddr_t)svp, sizeof (*svp));
4155 
4156 	/*
4157 	 * If this is the last path to the client,
4158 	 * then free up the vlun as well.
4159 	 */
4160 	if (mdi_client_get_path_count(cdip) == 1) {
4161 		vhci_lun_free(cdip);
4162 	}
4163 
4164 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n",
4165 	    (void *)pip));
4166 	return (MDI_SUCCESS);
4167 }
4168 
4169 /* ARGSUSED */
4170 static int
4171 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip,
4172     mdi_pathinfo_state_t state, uint32_t ext_state, int flags)
4173 {
4174 	int			rval = MDI_SUCCESS;
4175 	scsi_vhci_priv_t	*svp;
4176 	scsi_vhci_lun_t		*vlun;
4177 	int			held;
4178 	int			op = (flags & 0xf00) >> 8;
4179 	struct scsi_vhci	*vhci;
4180 
4181 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4182 
4183 	if (flags & MDI_EXT_STATE_CHANGE) {
4184 		/*
4185 		 * We do not want to issue any commands down the path in case
4186 		 * sync flag is set. Lower layers might not be ready to accept
4187 		 * any I/O commands.
4188 		 */
4189 		if (op == DRIVER_DISABLE)
4190 			return (MDI_SUCCESS);
4191 
4192 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4193 		if (svp == NULL) {
4194 			return (MDI_FAILURE);
4195 		}
4196 		vlun = svp->svp_svl;
4197 
4198 		if (flags & MDI_BEFORE_STATE_CHANGE) {
4199 			/*
4200 			 * Hold the LUN.
4201 			 */
4202 			VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
4203 			if (flags & MDI_DISABLE_OP)  {
4204 				/*
4205 				 * Issue scsi reset if it happens to be
4206 				 * reserved path.
4207 				 */
4208 				if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4209 					/*
4210 					 * if reservation pending on
4211 					 * this path, dont' mark the
4212 					 * path busy
4213 					 */
4214 					if (op == DRIVER_DISABLE_TRANSIENT) {
4215 						VHCI_DEBUG(1, (CE_NOTE, NULL,
4216 						    "!vhci_pathinfo"
4217 						    "_state_change (pip:%p): "
4218 						    " reservation: fail busy\n",
4219 						    (void *)pip));
4220 						return (MDI_FAILURE);
4221 					}
4222 					if (pip == vlun->svl_resrv_pip) {
4223 						if (vhci_recovery_reset(
4224 						    svp->svp_svl,
4225 						    &svp->svp_psd->sd_address,
4226 						    TRUE,
4227 						    VHCI_DEPTH_TARGET) == 0) {
4228 							VHCI_DEBUG(1,
4229 							    (CE_NOTE, NULL,
4230 							    "!vhci_pathinfo"
4231 							    "_state_change "
4232 							    " (pip:%p): "
4233 							    "reset failed, "
4234 							    "give up!\n",
4235 							    (void *)pip));
4236 						}
4237 						vlun->svl_flags &=
4238 						    ~VLUN_RESERVE_ACTIVE_FLG;
4239 					}
4240 				}
4241 			} else if (flags & MDI_ENABLE_OP)  {
4242 				if (((vhci->vhci_conf_flags &
4243 				    VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4244 				    VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4245 				    MDI_PI_IS_USER_DISABLE(pip) &&
4246 				    MDI_PI_IS_STANDBY(pip)) {
4247 					struct scsi_failover_ops	*fo;
4248 					char *best_pclass, *pclass = NULL;
4249 					int  best_class, rv;
4250 					/*
4251 					 * Failback if enabling a standby path
4252 					 * and it is the primary class or
4253 					 * preferred class
4254 					 */
4255 					best_class = mdi_pi_get_preferred(pip);
4256 					if (best_class == 0) {
4257 						/*
4258 						 * if not preferred - compare
4259 						 * path-class with class
4260 						 */
4261 						fo = vlun->svl_fops;
4262 						(void) fo->sfo_pathclass_next(
4263 						    NULL, &best_pclass,
4264 						    vlun->svl_fops_ctpriv);
4265 						pclass = NULL;
4266 						rv = mdi_prop_lookup_string(pip,
4267 						    "path-class", &pclass);
4268 						if (rv != MDI_SUCCESS ||
4269 						    pclass == NULL) {
4270 							vhci_log(CE_NOTE, vdip,
4271 							    "!path-class "
4272 							    " lookup "
4273 							    "failed. rv: %d"
4274 							    "class: %p", rv,
4275 							    (void *)pclass);
4276 						} else if (strncmp(pclass,
4277 						    best_pclass,
4278 						    strlen(best_pclass)) == 0) {
4279 							best_class = 1;
4280 						}
4281 						if (rv == MDI_SUCCESS &&
4282 						    pclass != NULL) {
4283 							rv = mdi_prop_free(
4284 							    pclass);
4285 							if (rv !=
4286 							    DDI_PROP_SUCCESS) {
4287 								vhci_log(
4288 								    CE_NOTE,
4289 								    vdip,
4290 								    "!path-"
4291 								    "class"
4292 								    " free"
4293 								    " failed"
4294 								    " rv: %d"
4295 								    " class: "
4296 								    "%p",
4297 								    rv,
4298 								    (void *)
4299 								    pclass);
4300 							}
4301 						}
4302 					}
4303 					if (best_class == 1) {
4304 						VHCI_DEBUG(1, (CE_NOTE, NULL,
4305 						    "preferred path: %p "
4306 						    "USER_DISABLE->USER_ENABLE "
4307 						    "transition for lun %s\n",
4308 						    (void *)pip,
4309 						    vlun->svl_lun_wwn));
4310 						(void) taskq_dispatch(
4311 						    vhci->vhci_taskq,
4312 						    vhci_initiate_auto_failback,
4313 						    (void *) vlun, KM_SLEEP);
4314 					}
4315 				}
4316 				/*
4317 				 * if PGR is active, revalidate key and
4318 				 * register on this path also, if key is
4319 				 * still valid
4320 				 */
4321 				sema_p(&vlun->svl_pgr_sema);
4322 				if (vlun->svl_pgr_active)
4323 					(void)
4324 					    vhci_pgr_validate_and_register(svp);
4325 				sema_v(&vlun->svl_pgr_sema);
4326 				/*
4327 				 * Inform target driver about any
4328 				 * reservations to be reinstated if target
4329 				 * has dropped reservation during the busy
4330 				 * period.
4331 				 */
4332 				mutex_enter(&vhci->vhci_mutex);
4333 				scsi_hba_reset_notify_callback(
4334 				    &vhci->vhci_mutex,
4335 				    &vhci->vhci_reset_notify_listf);
4336 				mutex_exit(&vhci->vhci_mutex);
4337 			}
4338 		}
4339 		if (flags & MDI_AFTER_STATE_CHANGE) {
4340 			if (flags & MDI_ENABLE_OP)  {
4341 				mutex_enter(&vhci_global_mutex);
4342 				cv_broadcast(&vhci_cv);
4343 				mutex_exit(&vhci_global_mutex);
4344 			}
4345 			if (vlun->svl_setcap_done) {
4346 				(void) vhci_pHCI_cap(&svp->svp_psd->sd_address,
4347 				    "sector-size", vlun->svl_sector_size,
4348 				    1, pip);
4349 			}
4350 
4351 			/*
4352 			 * Release the LUN
4353 			 */
4354 			VHCI_RELEASE_LUN(vlun);
4355 
4356 			/*
4357 			 * Path transition is complete.
4358 			 * Run callback to indicate target driver to
4359 			 * retry to prevent IO starvation.
4360 			 */
4361 			if (scsi_callback_id != 0) {
4362 				ddi_run_callback(&scsi_callback_id);
4363 			}
4364 		}
4365 	} else {
4366 		switch (state) {
4367 		case MDI_PATHINFO_STATE_ONLINE:
4368 			rval = vhci_pathinfo_online(vdip, pip, flags);
4369 			break;
4370 
4371 		case MDI_PATHINFO_STATE_OFFLINE:
4372 			rval = vhci_pathinfo_offline(vdip, pip, flags);
4373 			break;
4374 
4375 		default:
4376 			break;
4377 		}
4378 		/*
4379 		 * Path transition is complete.
4380 		 * Run callback to indicate target driver to
4381 		 * retry to prevent IO starvation.
4382 		 */
4383 		if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) {
4384 			ddi_run_callback(&scsi_callback_id);
4385 		}
4386 		return (rval);
4387 	}
4388 
4389 	return (MDI_SUCCESS);
4390 }
4391 
4392 /*
4393  * Parse the mpxio load balancing options. The datanameptr
4394  * will point to a string containing the load-balance-options value.
4395  * The load-balance-options value will be a property that
4396  * defines the load-balance algorithm and any arguments to that
4397  * algorithm.
4398  * For example:
4399  * device-type-mpxio-options-list=
4400  * "device-type=SUN    SENA", "load-balance-options=logical-block-options"
4401  * "device-type=SUN     SE6920", "round-robin-options";
4402  * logical-block-options="load-balance=logical-block", "region-size=15";
4403  * round-robin-options="load-balance=round-robin";
4404  *
4405  * If the load-balance is not defined the load balance algorithm will
4406  * default to the global setting. There will be default values assigned
4407  * to the arguments (region-size=18) and if an argument is one
4408  * that is not known, it will be ignored.
4409  */
4410 static void
4411 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip,
4412 	caddr_t datanameptr)
4413 {
4414 	char			*dataptr, *next_entry;
4415 	caddr_t			config_list	= NULL;
4416 	int			config_list_len = 0, list_len = 0;
4417 	int			region_size = -1;
4418 	client_lb_t		load_balance;
4419 
4420 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr,
4421 	    (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) {
4422 		return;
4423 	}
4424 
4425 	list_len = config_list_len;
4426 	next_entry = config_list;
4427 	while (config_list_len > 0) {
4428 		dataptr = next_entry;
4429 
4430 		if (strncmp(mdi_load_balance, dataptr,
4431 		    strlen(mdi_load_balance)) == 0) {
4432 			/* get the load-balance scheme */
4433 			dataptr += strlen(mdi_load_balance) + 1;
4434 			if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) {
4435 				(void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR);
4436 				load_balance = LOAD_BALANCE_RR;
4437 			} else if (strcmp(dataptr,
4438 			    LOAD_BALANCE_PROP_LBA) == 0) {
4439 				(void) mdi_set_lb_policy(cdip,
4440 				    LOAD_BALANCE_LBA);
4441 				load_balance = LOAD_BALANCE_LBA;
4442 			} else if (strcmp(dataptr,
4443 			    LOAD_BALANCE_PROP_NONE) == 0) {
4444 				(void) mdi_set_lb_policy(cdip,
4445 				    LOAD_BALANCE_NONE);
4446 				load_balance = LOAD_BALANCE_NONE;
4447 			}
4448 		} else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE,
4449 		    strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) {
4450 			int	i = 0;
4451 			char	*ptr;
4452 			char	*tmp;
4453 
4454 			tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1);
4455 			/* check for numeric value */
4456 			for (ptr = tmp; i < strlen(tmp); i++, ptr++) {
4457 				if (!isdigit(*ptr)) {
4458 					cmn_err(CE_WARN,
4459 					    "Illegal region size: %s."
4460 					    " Setting to default value: %d",
4461 					    tmp,
4462 					    LOAD_BALANCE_DEFAULT_REGION_SIZE);
4463 					region_size =
4464 					    LOAD_BALANCE_DEFAULT_REGION_SIZE;
4465 					break;
4466 				}
4467 			}
4468 			if (i >= strlen(tmp)) {
4469 				region_size = stoi(&tmp);
4470 			}
4471 			(void) mdi_set_lb_region_size(cdip, region_size);
4472 		}
4473 		config_list_len -= (strlen(next_entry) + 1);
4474 		next_entry += strlen(next_entry) + 1;
4475 	}
4476 #ifdef DEBUG
4477 	if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) {
4478 		VHCI_DEBUG(1, (CE_NOTE, dip,
4479 		    "!vhci_parse_mpxio_lb_options: region-size: %d"
4480 		    "only valid for load-balance=logical-block\n",
4481 		    region_size));
4482 	}
4483 #endif
4484 	if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) {
4485 		VHCI_DEBUG(1, (CE_NOTE, dip,
4486 		    "!vhci_parse_mpxio_lb_options: No region-size"
4487 		    " defined load-balance=logical-block."
4488 		    " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE));
4489 		(void) mdi_set_lb_region_size(cdip,
4490 		    LOAD_BALANCE_DEFAULT_REGION_SIZE);
4491 	}
4492 	if (list_len > 0) {
4493 		kmem_free(config_list, list_len);
4494 	}
4495 }
4496 
4497 /*
4498  * Parse the device-type-mpxio-options-list looking for the key of
4499  * "load-balance-options". If found, parse the load balancing options.
4500  * Check the comment of the vhci_get_device_type_mpxio_options()
4501  * for the device-type-mpxio-options-list.
4502  */
4503 static void
4504 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4505 		caddr_t datanameptr, int list_len)
4506 {
4507 	char		*dataptr;
4508 	int		len;
4509 
4510 	/*
4511 	 * get the data list
4512 	 */
4513 	dataptr = datanameptr;
4514 	len = 0;
4515 	while (len < list_len &&
4516 	    strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR))
4517 	    != 0) {
4518 		if (strncmp(dataptr, LOAD_BALANCE_OPTIONS,
4519 		    strlen(LOAD_BALANCE_OPTIONS)) == 0) {
4520 			len += strlen(LOAD_BALANCE_OPTIONS) + 1;
4521 			dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1;
4522 			vhci_parse_mpxio_lb_options(dip, cdip, dataptr);
4523 		}
4524 		len += strlen(dataptr) + 1;
4525 		dataptr += strlen(dataptr) + 1;
4526 	}
4527 }
4528 
4529 /*
4530  * Check the inquriy string returned from the device with the device-type
4531  * Check for the existence of the device-type-mpxio-options-list and
4532  * if found parse the list checking for a match with the device-type
4533  * value and the inquiry string returned from the device. If a match
4534  * is found, parse the mpxio options list. The format of the
4535  * device-type-mpxio-options-list is:
4536  * device-type-mpxio-options-list=
4537  * "device-type=SUN    SENA", "load-balance-options=logical-block-options"
4538  * "device-type=SUN     SE6920", "round-robin-options";
4539  * logical-block-options="load-balance=logical-block", "region-size=15";
4540  * round-robin-options="load-balance=round-robin";
4541  */
4542 void
4543 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4544 	struct scsi_device *devp)
4545 {
4546 
4547 	caddr_t			config_list	= NULL;
4548 	caddr_t			vidptr, datanameptr;
4549 	int			vidlen, dupletlen = 0;
4550 	int			config_list_len = 0, len;
4551 	struct scsi_inquiry	*inq = devp->sd_inq;
4552 
4553 	/*
4554 	 * look up the device-type-mpxio-options-list and walk thru
4555 	 * the list compare the vendor ids of the earlier inquiry command and
4556 	 * with those vids in the list if there is a match, lookup
4557 	 * the mpxio-options value
4558 	 */
4559 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
4560 	    MPXIO_OPTIONS_LIST,
4561 	    (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) {
4562 
4563 		/*
4564 		 * Compare vids in each duplet - if it matches,
4565 		 * parse the mpxio options list.
4566 		 */
4567 		for (len = config_list_len, vidptr = config_list; len > 0;
4568 		    len -= dupletlen) {
4569 
4570 			dupletlen = 0;
4571 
4572 			if (strlen(vidptr) != 0 &&
4573 			    strncmp(vidptr, DEVICE_TYPE_STR,
4574 			    strlen(DEVICE_TYPE_STR)) == 0) {
4575 				/* point to next duplet */
4576 				datanameptr = vidptr + strlen(vidptr) + 1;
4577 				/* add len of this duplet */
4578 				dupletlen += strlen(vidptr) + 1;
4579 				/* get to device type */
4580 				vidptr += strlen(DEVICE_TYPE_STR) + 1;
4581 				vidlen = strlen(vidptr);
4582 				if ((vidlen != 0) &&
4583 				    bcmp(inq->inq_vid, vidptr, vidlen) == 0) {
4584 					vhci_parse_mpxio_options(dip, cdip,
4585 					    datanameptr, len - dupletlen);
4586 					break;
4587 				}
4588 				/* get to next duplet */
4589 				vidptr += strlen(vidptr) + 1;
4590 			}
4591 			/* get to the next device-type */
4592 			while (len - dupletlen > 0 &&
4593 			    strlen(vidptr) != 0 &&
4594 			    strncmp(vidptr, DEVICE_TYPE_STR,
4595 			    strlen(DEVICE_TYPE_STR)) != 0) {
4596 				dupletlen += strlen(vidptr) + 1;
4597 				vidptr += strlen(vidptr) + 1;
4598 			}
4599 		}
4600 		if (config_list_len > 0) {
4601 			kmem_free(config_list, config_list_len);
4602 		}
4603 	}
4604 }
4605 
4606 static int
4607 vhci_update_pathinfo(struct scsi_device *psd,  mdi_pathinfo_t *pip,
4608 	struct scsi_failover_ops *fo,
4609 	scsi_vhci_lun_t		*vlun,
4610 	struct scsi_vhci	*vhci)
4611 {
4612 	struct scsi_path_opinfo		opinfo;
4613 	char				*pclass, *best_pclass;
4614 
4615 	if (fo->sfo_path_get_opinfo(psd, &opinfo, vlun->svl_fops_ctpriv) != 0) {
4616 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: "
4617 		    "Failed to get operation info for path:%p\n", (void *)pip));
4618 		return (MDI_FAILURE);
4619 	}
4620 	/* set the xlf capable flag in the vlun for future use */
4621 	vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable;
4622 	(void) mdi_prop_update_string(pip, "path-class",
4623 	    opinfo.opinfo_path_attr);
4624 
4625 	pclass = opinfo.opinfo_path_attr;
4626 	if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) {
4627 		mutex_enter(&vlun->svl_mutex);
4628 		if (vlun->svl_active_pclass != NULL) {
4629 			if (strcmp(vlun->svl_active_pclass, pclass) != 0) {
4630 				mutex_exit(&vlun->svl_mutex);
4631 				/*
4632 				 * Externally initiated failover has happened;
4633 				 * force the path state to be STANDBY/ONLINE,
4634 				 * next IO will trigger failover and thus
4635 				 * sync-up the pathstates.  Reason we don't
4636 				 * sync-up immediately by invoking
4637 				 * vhci_update_pathstates() is because it
4638 				 * needs a VHCI_HOLD_LUN() and we don't
4639 				 * want to block here.
4640 				 *
4641 				 * Further, if the device is an ALUA device,
4642 				 * then failure to exactly match 'pclass' and
4643 				 * 'svl_active_pclass'(as is the case here)
4644 				 * indicates that the currently active path
4645 				 * is a 'non-optimized' path - which means
4646 				 * that 'svl_active_pclass' needs to be
4647 				 * replaced with opinfo.opinfo_path_state
4648 				 * value.
4649 				 */
4650 
4651 				if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) {
4652 					char	*tptr;
4653 
4654 					/*
4655 					 * The device is ALUA compliant. The
4656 					 * state need to be changed to online
4657 					 * rather than standby state which is
4658 					 * done typically for a asymmetric
4659 					 * device that is non ALUA compliant.
4660 					 */
4661 					mdi_pi_set_state(pip,
4662 					    MDI_PATHINFO_STATE_ONLINE);
4663 					tptr = kmem_alloc(strlen
4664 					    (opinfo.opinfo_path_attr)+1,
4665 					    KM_SLEEP);
4666 					(void) strlcpy(tptr,
4667 					    opinfo.opinfo_path_attr,
4668 					    (strlen(opinfo.opinfo_path_attr)
4669 					    +1));
4670 					mutex_enter(&vlun->svl_mutex);
4671 					kmem_free(vlun->svl_active_pclass,
4672 					    strlen(vlun->svl_active_pclass)+1);
4673 					vlun->svl_active_pclass = tptr;
4674 					mutex_exit(&vlun->svl_mutex);
4675 				} else {
4676 					/*
4677 					 * Non ALUA device case.
4678 					 */
4679 					mdi_pi_set_state(pip,
4680 					    MDI_PATHINFO_STATE_STANDBY);
4681 				}
4682 				vlun->svl_fo_support = opinfo.opinfo_mode;
4683 				mdi_pi_set_preferred(pip,
4684 				    opinfo.opinfo_preferred);
4685 				return (MDI_SUCCESS);
4686 			}
4687 		} else {
4688 			char	*tptr;
4689 
4690 			/*
4691 			 * lets release the mutex before we try to
4692 			 * allocate since the potential to sleep is
4693 			 * possible.
4694 			 */
4695 			mutex_exit(&vlun->svl_mutex);
4696 			tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP);
4697 			(void) strlcpy(tptr, pclass, (strlen(pclass)+1));
4698 			mutex_enter(&vlun->svl_mutex);
4699 			vlun->svl_active_pclass = tptr;
4700 		}
4701 		mutex_exit(&vlun->svl_mutex);
4702 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4703 		vlun->svl_waiting_for_activepath = 0;
4704 	} else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) {
4705 		mutex_enter(&vlun->svl_mutex);
4706 		if (vlun->svl_active_pclass == NULL) {
4707 			char	*tptr;
4708 
4709 			mutex_exit(&vlun->svl_mutex);
4710 			tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP);
4711 			(void) strlcpy(tptr, pclass, (strlen(pclass)+1));
4712 			mutex_enter(&vlun->svl_mutex);
4713 			vlun->svl_active_pclass = tptr;
4714 		}
4715 		mutex_exit(&vlun->svl_mutex);
4716 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4717 		vlun->svl_waiting_for_activepath = 0;
4718 	} else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) {
4719 		mutex_enter(&vlun->svl_mutex);
4720 		if (vlun->svl_active_pclass != NULL) {
4721 			if (strcmp(vlun->svl_active_pclass, pclass) == 0) {
4722 				mutex_exit(&vlun->svl_mutex);
4723 				/*
4724 				 * externally initiated failover has happened;
4725 				 * force state to ONLINE (see comment above)
4726 				 */
4727 				mdi_pi_set_state(pip,
4728 				    MDI_PATHINFO_STATE_ONLINE);
4729 				vlun->svl_fo_support = opinfo.opinfo_mode;
4730 				mdi_pi_set_preferred(pip,
4731 				    opinfo.opinfo_preferred);
4732 				return (MDI_SUCCESS);
4733 			}
4734 		}
4735 		mutex_exit(&vlun->svl_mutex);
4736 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY);
4737 
4738 		/*
4739 		 * Initiate auto-failback, if enabled, for path if path-state
4740 		 * is transitioning from OFFLINE->STANDBY and pathclass is the
4741 		 * preferred pathclass for this storage.
4742 		 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE
4743 		 * (above), where the pi state is set to STANDBY, we don't
4744 		 * initiate auto-failback as the next IO shall take care of.
4745 		 * this. See comment above.
4746 		 */
4747 		(void) fo->sfo_pathclass_next(NULL, &best_pclass,
4748 		    vlun->svl_fops_ctpriv);
4749 		if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4750 		    VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4751 		    (strcmp(pclass, best_pclass) == 0) &&
4752 		    ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE)||
4753 		    (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) {
4754 			VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p"
4755 			    " OFFLINE->STANDBY transition for lun %s\n",
4756 			    best_pclass, (void *)pip, vlun->svl_lun_wwn));
4757 			(void) taskq_dispatch(vhci->vhci_taskq,
4758 			    vhci_initiate_auto_failback, (void *) vlun,
4759 			    KM_SLEEP);
4760 		}
4761 	}
4762 	vlun->svl_fo_support = opinfo.opinfo_mode;
4763 	mdi_pi_set_preferred(pip, opinfo.opinfo_preferred);
4764 
4765 	VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x,"
4766 	    " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n",
4767 	    opinfo.opinfo_rev, opinfo.opinfo_path_state,
4768 	    opinfo.opinfo_preferred, opinfo.opinfo_mode));
4769 
4770 	return (MDI_SUCCESS);
4771 }
4772 
4773 /*
4774  * Form the kstat name and and call mdi_pi_kstat_create()
4775  */
4776 void
4777 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip)
4778 {
4779 	dev_info_t	*tgt_dip;
4780 	dev_info_t	*pdip;
4781 	char		*guid;
4782 	char		*target_port, *target_port_dup;
4783 	char		ks_name[KSTAT_STRLEN];
4784 	uint_t		pid;
4785 	int		by_id;
4786 	mod_hash_val_t	hv;
4787 
4788 
4789 	/* return if we have already allocated kstats */
4790 	if (mdi_pi_kstat_exists(pip))
4791 		return;
4792 
4793 	/*
4794 	 * We need instance numbers to create a kstat name, return if we don't
4795 	 * have instance numbers assigned yet.
4796 	 */
4797 	tgt_dip = mdi_pi_get_client(pip);
4798 	pdip = mdi_pi_get_phci(pip);
4799 	if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1))
4800 		return;
4801 
4802 	/*
4803 	 * A path oriented kstat has a ks_name of the form:
4804 	 *
4805 	 * <client-driver><instance>.t<pid>.<pHCI-driver><instance>
4806 	 *
4807 	 * We maintain a bidirectional 'target-port' to <pid> map,
4808 	 * called targetmap. All pathinfo nodes with the same
4809 	 * 'target-port' map to the same <pid>. The iostat(1M) code,
4810 	 * when parsing a path oriented kstat name, uses the <pid> as
4811 	 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order
4812 	 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats,
4813 	 * this ioctl needs to translate a <pid> to a 'target-port'
4814 	 * even after all pathinfo nodes associated with the
4815 	 * 'target-port' have been destroyed. This is needed to support
4816 	 * consistent first-iteration activity-since-boot iostat(1M)
4817 	 * output. Because of this requirement, the mapping can't be
4818 	 * based on pathinfo information in a devinfo snapshot.
4819 	 */
4820 
4821 	/* determine 'target-port' */
4822 	if (mdi_prop_lookup_string(pip,
4823 	    SCSI_ADDR_PROP_TARGET_PORT, &target_port) == MDI_SUCCESS) {
4824 		target_port_dup = i_ddi_strdup(target_port, KM_SLEEP);
4825 		(void) mdi_prop_free(target_port);
4826 		by_id = 1;
4827 	} else {
4828 		/*
4829 		 * If the pHCI did not set up 'target-port' on this
4830 		 * pathinfo node, assume that our client is the only
4831 		 * one with paths to the device by using the guid
4832 		 * value as the 'target-port'. Since no other client
4833 		 * will have the same guid, no other client will use
4834 		 * the same <pid>.  NOTE: a client with an instance
4835 		 * number always has a guid.
4836 		 */
4837 		(void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
4838 		    PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid);
4839 		target_port_dup = i_ddi_strdup(guid, KM_SLEEP);
4840 		ddi_prop_free(guid);
4841 
4842 		/*
4843 		 * For this type of mapping we don't want the
4844 		 * <id> -> 'target-port' mapping to be made.  This
4845 		 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl
4846 		 * to fail, and the iostat(1M) long '-n' output will
4847 		 * still use the <pid>.  We do this because we just
4848 		 * made up the 'target-port' using the guid, and we
4849 		 * don't want to expose that fact in iostat output.
4850 		 */
4851 		by_id = 0;
4852 	}
4853 
4854 	/* find/establish <pid> given 'target-port' */
4855 	mutex_enter(&vhci_targetmap_mutex);
4856 	if (mod_hash_find(vhci_targetmap_byport,
4857 	    (mod_hash_key_t)target_port_dup, &hv) == 0) {
4858 		pid = (int)(intptr_t)hv;	/* mapping exists */
4859 	} else {
4860 		pid = vhci_targetmap_pid++;	/* new mapping */
4861 
4862 		(void) mod_hash_insert(vhci_targetmap_byport,
4863 		    (mod_hash_key_t)target_port_dup,
4864 		    (mod_hash_val_t)(intptr_t)pid);
4865 		if (by_id) {
4866 			(void) mod_hash_insert(vhci_targetmap_bypid,
4867 			    (mod_hash_key_t)(uintptr_t)pid,
4868 			    (mod_hash_val_t)(uintptr_t)target_port_dup);
4869 		}
4870 		target_port_dup = NULL;		/* owned by hash */
4871 	}
4872 	mutex_exit(&vhci_targetmap_mutex);
4873 
4874 	/* form kstat name */
4875 	(void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d",
4876 	    ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip),
4877 	    pid, ddi_driver_name(pdip), ddi_get_instance(pdip));
4878 
4879 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p "
4880 	    "kstat %s: pid %x <-> port %s\n", (void *)pip,
4881 	    ks_name, pid, target_port_dup));
4882 	if (target_port_dup)
4883 		kmem_free(target_port_dup, strlen(target_port_dup) + 1);
4884 
4885 	/* call mdi to create kstats with the name we built */
4886 	(void) mdi_pi_kstat_create(pip, ks_name);
4887 }
4888 
4889 /* ARGSUSED */
4890 static int
4891 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4892 {
4893 	scsi_hba_tran_t			*hba = NULL;
4894 	struct scsi_device		*psd = NULL;
4895 	scsi_vhci_lun_t			*vlun = NULL;
4896 	dev_info_t			*pdip = NULL;
4897 	dev_info_t			*cdip;
4898 	dev_info_t			*tgt_dip;
4899 	struct scsi_vhci		*vhci;
4900 	char				*guid;
4901 	struct scsi_failover_ops	*sfo;
4902 	scsi_vhci_priv_t		*svp = NULL;
4903 	struct scsi_address		*ap;
4904 	struct scsi_pkt			*pkt;
4905 	int				rval = MDI_FAILURE;
4906 	mpapi_item_list_t		*list_ptr;
4907 	mpapi_lu_data_t			*ld;
4908 
4909 	ASSERT(vdip != NULL);
4910 	ASSERT(pip != NULL);
4911 
4912 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4913 	ASSERT(vhci != NULL);
4914 
4915 	pdip = mdi_pi_get_phci(pip);
4916 	hba = ddi_get_driver_private(pdip);
4917 	ASSERT(hba != NULL);
4918 
4919 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4920 	ASSERT(svp != NULL);
4921 
4922 	cdip = mdi_pi_get_client(pip);
4923 	ASSERT(cdip != NULL);
4924 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
4925 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
4926 		VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid "
4927 		    "property failed"));
4928 		goto failure;
4929 	}
4930 
4931 	vlun = vhci_lun_lookup(cdip);
4932 	ASSERT(vlun != NULL);
4933 
4934 	ddi_prop_free(guid);
4935 
4936 	vlun->svl_dip = mdi_pi_get_client(pip);
4937 	ASSERT(vlun->svl_dip != NULL);
4938 
4939 	psd = svp->svp_psd;
4940 	ASSERT(psd != NULL);
4941 
4942 	/*
4943 	 * Get inquiry data into pathinfo related scsi_device structure.
4944 	 * Free sq_inq when pathinfo related scsi_device structure is destroyed
4945 	 * by vhci_pathinfo_uninit(). In other words, vhci maintains its own
4946 	 * copy of scsi_device and scsi_inquiry data on a per-path basis.
4947 	 */
4948 	if (scsi_probe(psd, SLEEP_FUNC) != SCSIPROBE_EXISTS) {
4949 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: "
4950 		    "scsi_probe failed path:%p rval:%x\n", (void *)pip, rval));
4951 		rval = MDI_FAILURE;
4952 		goto failure;
4953 	}
4954 
4955 	/*
4956 	 * See if we have a failover module to support the device.
4957 	 *
4958 	 * We re-probe to determine the failover ops for each path. This
4959 	 * is done in case there are any path-specific side-effects associated
4960 	 * with the sfo_device_probe implementation.
4961 	 *
4962 	 * Give the first successfull sfo_device_probe the opportunity to
4963 	 * establish 'ctpriv', vlun/client private data. The ctpriv will
4964 	 * then be passed into the failover module on all other sfo_device_*()
4965 	 * operations (and must be freed by sfo_device_unprobe implementation).
4966 	 *
4967 	 * NOTE: While sfo_device_probe is done once per path,
4968 	 * sfo_device_unprobe only occurs once - when the vlun is destroyed.
4969 	 *
4970 	 * NOTE: We don't currently support per-path fops private data
4971 	 * mechanism.
4972 	 */
4973 	sfo = vhci_dev_fo(vdip, psd,
4974 	    &vlun->svl_fops_ctpriv, &vlun->svl_fops_name);
4975 
4976 	/* check path configuration result with current vlun state */
4977 	if (((sfo && vlun->svl_fops) && (sfo != vlun->svl_fops)) ||
4978 	    (sfo && vlun->svl_not_supported) ||
4979 	    ((sfo == NULL) && vlun->svl_fops)) {
4980 		/* Getting different results for different paths. */
4981 		VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
4982 		    "!vhci_pathinfo_online: dev (path 0x%p) contradiction\n",
4983 		    (void *)pip));
4984 		cmn_err(CE_WARN, "scsi_vhci: failover contradiction: "
4985 		    "'%s'.vs.'%s': path %s\n",
4986 		    vlun->svl_fops ? vlun->svl_fops->sfo_name : "NULL",
4987 		    sfo ? sfo->sfo_name : "NULL", mdi_pi_pathname(pip));
4988 		vlun->svl_not_supported = 1;
4989 		rval = MDI_NOT_SUPPORTED;
4990 		goto done;
4991 	} else if (sfo == NULL) {
4992 		/* No failover module - device not supported under vHCI.  */
4993 		VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
4994 		    "!vhci_pathinfo_online: dev (path 0x%p) not "
4995 		    "supported\n", (void *)pip));
4996 
4997 		/* XXX does this contradict vhci_is_dev_supported ? */
4998 		vlun->svl_not_supported = 1;
4999 		rval = MDI_NOT_SUPPORTED;
5000 		goto done;
5001 	}
5002 
5003 	/* failover supported for device - save failover_ops in vlun */
5004 	vlun->svl_fops = sfo;
5005 	ASSERT(vlun->svl_fops_name != NULL);
5006 
5007 	/*
5008 	 * Obtain the device-type based mpxio options as specified in
5009 	 * scsi_vhci.conf file.
5010 	 *
5011 	 * NOTE: currently, the end result is a call to
5012 	 * mdi_set_lb_region_size().
5013 	 */
5014 	tgt_dip = psd->sd_dev;
5015 	ASSERT(tgt_dip != NULL);
5016 	vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd);
5017 
5018 	/*
5019 	 * The device probe or options in conf file may have set/changed the
5020 	 * lb policy, save the current value.
5021 	 */
5022 	vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip);
5023 
5024 	/*
5025 	 * if PGR is active, revalidate key and register on this path also,
5026 	 * if key is still valid
5027 	 */
5028 	sema_p(&vlun->svl_pgr_sema);
5029 	if (vlun->svl_pgr_active) {
5030 		rval = vhci_pgr_validate_and_register(svp);
5031 		if (rval != 1) {
5032 			rval = MDI_FAILURE;
5033 			sema_v(&vlun->svl_pgr_sema);
5034 			goto failure;
5035 		}
5036 	}
5037 	sema_v(&vlun->svl_pgr_sema);
5038 
5039 	if (svp->svp_new_path) {
5040 		/*
5041 		 * Last chance to perform any cleanup operations on this
5042 		 * new path before making this path completely online.
5043 		 */
5044 		svp->svp_new_path = 0;
5045 
5046 		/*
5047 		 * If scsi_vhci knows the lun is alread RESERVE'd,
5048 		 * then skip the issue of RELEASE on new path.
5049 		 */
5050 		if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) {
5051 			/*
5052 			 * Issue SCSI-2 RELEASE only for the first time on
5053 			 * a new path just in case the host rebooted and
5054 			 * a reservation is still pending on this path.
5055 			 * IBM Shark storage does not clear RESERVE upon
5056 			 * host reboot.
5057 			 */
5058 			ap = &psd->sd_address;
5059 			pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0,
5060 			    sizeof (struct scsi_arq_status), 0, 0,
5061 			    SLEEP_FUNC, NULL);
5062 			if (pkt == NULL) {
5063 				VHCI_DEBUG(1, (CE_NOTE, NULL,
5064 				    "!vhci_pathinfo_online: "
5065 				    "Release init_pkt failed :%p\n",
5066 				    (void *)pip));
5067 				rval = MDI_FAILURE;
5068 				goto failure;
5069 			}
5070 			pkt->pkt_cdbp[0] = SCMD_RELEASE;
5071 			pkt->pkt_time = 60;
5072 
5073 			VHCI_DEBUG(1, (CE_NOTE, NULL,
5074 			    "!vhci_path_online: path:%p "
5075 			    "Issued SCSI-2 RELEASE\n", (void *)pip));
5076 
5077 			/* Ignore the return value */
5078 			(void) vhci_do_scsi_cmd(pkt);
5079 			scsi_destroy_pkt(pkt);
5080 		}
5081 	}
5082 
5083 	rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci);
5084 	if (rval == MDI_FAILURE) {
5085 		goto failure;
5086 	}
5087 
5088 	/* Initialize MP-API data */
5089 	vhci_update_mpapi_data(vhci, vlun, pip);
5090 
5091 	/*
5092 	 * MP-API also needs the Inquiry data to be maintained in the
5093 	 * mp_vendor_prop_t structure, so find the lun and update its
5094 	 * structure with this data.
5095 	 */
5096 	list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL,
5097 	    MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun);
5098 	ld = (mpapi_lu_data_t *)list_ptr->item->idata;
5099 	if (ld != NULL) {
5100 		bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8);
5101 		bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16);
5102 		bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4);
5103 	} else {
5104 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: "
5105 		    "mpapi_lu_data_t is NULL"));
5106 	}
5107 
5108 	/* create kstats for path */
5109 	vhci_kstat_create_pathinfo(pip);
5110 
5111 done:
5112 	mutex_enter(&vhci_global_mutex);
5113 	cv_broadcast(&vhci_cv);
5114 	mutex_exit(&vhci_global_mutex);
5115 
5116 	if (vlun->svl_setcap_done) {
5117 		(void) vhci_pHCI_cap(ap, "sector-size",
5118 		    vlun->svl_sector_size, 1, pip);
5119 	}
5120 
5121 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n",
5122 	    (void *)pip));
5123 
5124 failure:
5125 	return (rval);
5126 }
5127 
5128 /*
5129  * path offline handler.  Release all bindings that will not be
5130  * released by the normal packet transport/completion code path.
5131  * Since we don't (presently) keep any bindings alive outside of
5132  * the in-transport packets (which will be released on completion)
5133  * there is not much to do here.
5134  */
5135 /* ARGSUSED */
5136 static int
5137 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5138 {
5139 	scsi_hba_tran_t		*hba = NULL;
5140 	struct scsi_device	*psd = NULL;
5141 	dev_info_t		*pdip = NULL;
5142 	dev_info_t		*cdip = NULL;
5143 	scsi_vhci_priv_t	*svp = NULL;
5144 
5145 	ASSERT(vdip != NULL);
5146 	ASSERT(pip != NULL);
5147 
5148 	pdip = mdi_pi_get_phci(pip);
5149 	ASSERT(pdip != NULL);
5150 	if (pdip == NULL) {
5151 		VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5152 		    "phci dip", (void *)pip));
5153 		return (MDI_FAILURE);
5154 	}
5155 
5156 	cdip = mdi_pi_get_client(pip);
5157 	ASSERT(cdip != NULL);
5158 	if (cdip == NULL) {
5159 		VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5160 		    "client dip", (void *)pip));
5161 		return (MDI_FAILURE);
5162 	}
5163 
5164 	hba = ddi_get_driver_private(pdip);
5165 	ASSERT(hba != NULL);
5166 
5167 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5168 	if (svp == NULL) {
5169 		/*
5170 		 * mdi_pathinfo node in INIT state can have vHCI private
5171 		 * information set to null
5172 		 */
5173 		VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5174 		    "svp is NULL for pip 0x%p\n", (void *)pip));
5175 		return (MDI_SUCCESS);
5176 	}
5177 
5178 	psd = svp->svp_psd;
5179 	ASSERT(psd != NULL);
5180 
5181 	mutex_enter(&svp->svp_mutex);
5182 
5183 	VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5184 	    "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip));
5185 	while (svp->svp_cmds != 0) {
5186 		if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
5187 		    drv_usectohz(vhci_path_quiesce_timeout * 1000000),
5188 		    TR_CLOCK_TICK) == -1) {
5189 			/*
5190 			 * The timeout time reached without the condition
5191 			 * being signaled.
5192 			 */
5193 			VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5194 			    "Timeout reached on path 0x%p without the cond\n",
5195 			    (void *)pip));
5196 			VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5197 			    "%d cmds still pending on path: 0x%p\n",
5198 			    svp->svp_cmds, (void *)pip));
5199 			break;
5200 		}
5201 	}
5202 	mutex_exit(&svp->svp_mutex);
5203 
5204 	/*
5205 	 * Check to see if this vlun has an active SCSI-II RESERVE. And this
5206 	 * is the pip for the path that has been reserved.
5207 	 * If so clear the reservation by sending a reset, so the host will not
5208 	 * get a reservation conflict.  Reset the flag VLUN_RESERVE_ACTIVE_FLG
5209 	 * for this lun.  Also a reset notify is sent to the target driver
5210 	 * just in case the POR check condition is cleared by some other layer
5211 	 * in the stack.
5212 	 */
5213 	if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
5214 		if (pip == svp->svp_svl->svl_resrv_pip) {
5215 			if (vhci_recovery_reset(svp->svp_svl,
5216 			    &svp->svp_psd->sd_address, TRUE,
5217 			    VHCI_DEPTH_TARGET) == 0) {
5218 				VHCI_DEBUG(1, (CE_NOTE, NULL,
5219 				    "!vhci_pathinfo_offline (pip:%p):"
5220 				    "reset failed, retrying\n", (void *)pip));
5221 				delay(1*drv_usectohz(1000000));
5222 				if (vhci_recovery_reset(svp->svp_svl,
5223 				    &svp->svp_psd->sd_address, TRUE,
5224 				    VHCI_DEPTH_TARGET) == 0) {
5225 					VHCI_DEBUG(1, (CE_NOTE, NULL,
5226 					    "!vhci_pathinfo_offline "
5227 					    "(pip:%p): reset failed, "
5228 					    "giving up!\n", (void *)pip));
5229 				}
5230 			}
5231 			svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
5232 		}
5233 	}
5234 
5235 	mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE);
5236 	vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED);
5237 
5238 	VHCI_DEBUG(1, (CE_NOTE, NULL,
5239 	    "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip));
5240 	return (MDI_SUCCESS);
5241 }
5242 
5243 
5244 /*
5245  * routine for SCSI VHCI IOCTL implementation.
5246  */
5247 /* ARGSUSED */
5248 static int
5249 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
5250 {
5251 	struct scsi_vhci		*vhci;
5252 	dev_info_t			*vdip;
5253 	mdi_pathinfo_t			*pip;
5254 	int				instance, held;
5255 	int				retval = 0;
5256 	caddr_t				phci_path = NULL, client_path = NULL;
5257 	caddr_t				paddr = NULL;
5258 	sv_iocdata_t			ioc;
5259 	sv_iocdata_t			*pioc = &ioc;
5260 	sv_switch_to_cntlr_iocdata_t	iocsc;
5261 	sv_switch_to_cntlr_iocdata_t	*piocsc = &iocsc;
5262 	caddr_t				s;
5263 	scsi_vhci_lun_t			*vlun;
5264 	struct scsi_failover_ops	*fo;
5265 	char				*pclass;
5266 
5267 	/* Check for validity of vhci structure */
5268 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
5269 	if (vhci == NULL) {
5270 		return (ENXIO);
5271 	}
5272 
5273 	mutex_enter(&vhci->vhci_mutex);
5274 	if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
5275 		mutex_exit(&vhci->vhci_mutex);
5276 		return (ENXIO);
5277 	}
5278 	mutex_exit(&vhci->vhci_mutex);
5279 
5280 	/* Get the vhci dip */
5281 	vdip = vhci->vhci_dip;
5282 	ASSERT(vdip != NULL);
5283 	instance = ddi_get_instance(vdip);
5284 
5285 	/* Allocate memory for getting parameters from userland */
5286 	phci_path	= kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5287 	client_path	= kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5288 	paddr		= kmem_zalloc(MAXNAMELEN, KM_SLEEP);
5289 
5290 	/*
5291 	 * Set a local variable indicating the ioctl name. Used for
5292 	 * printing debug strings.
5293 	 */
5294 	switch (cmd) {
5295 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5296 		s = "GET_CLIENT_MULTIPATH_INFO";
5297 		break;
5298 
5299 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5300 		s = "GET_PHCI_MULTIPATH_INFO";
5301 		break;
5302 
5303 	case SCSI_VHCI_GET_CLIENT_NAME:
5304 		s = "GET_CLIENT_NAME";
5305 		break;
5306 
5307 	case SCSI_VHCI_PATH_ONLINE:
5308 		s = "PATH_ONLINE";
5309 		break;
5310 
5311 	case SCSI_VHCI_PATH_OFFLINE:
5312 		s = "PATH_OFFLINE";
5313 		break;
5314 
5315 	case SCSI_VHCI_PATH_STANDBY:
5316 		s = "PATH_STANDBY";
5317 		break;
5318 
5319 	case SCSI_VHCI_PATH_TEST:
5320 		s = "PATH_TEST";
5321 		break;
5322 
5323 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5324 		s = "SWITCH_TO_CNTLR";
5325 		break;
5326 	case SCSI_VHCI_PATH_DISABLE:
5327 		s = "PATH_DISABLE";
5328 		break;
5329 	case SCSI_VHCI_PATH_ENABLE:
5330 		s = "PATH_ENABLE";
5331 		break;
5332 
5333 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5334 		s = "GET_TARGET_LONGNAME";
5335 		break;
5336 
5337 #ifdef	DEBUG
5338 	case SCSI_VHCI_CONFIGURE_PHCI:
5339 		s = "CONFIGURE_PHCI";
5340 		break;
5341 
5342 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5343 		s = "UNCONFIGURE_PHCI";
5344 		break;
5345 #endif
5346 
5347 	default:
5348 		s = "Unknown";
5349 		vhci_log(CE_NOTE, vdip,
5350 		    "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd);
5351 		retval = ENOTSUP;
5352 		break;
5353 	}
5354 	if (retval != 0) {
5355 		goto end;
5356 	}
5357 
5358 	VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s));
5359 
5360 	/*
5361 	 * Get IOCTL parameters from userland
5362 	 */
5363 	switch (cmd) {
5364 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5365 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5366 	case SCSI_VHCI_GET_CLIENT_NAME:
5367 	case SCSI_VHCI_PATH_ONLINE:
5368 	case SCSI_VHCI_PATH_OFFLINE:
5369 	case SCSI_VHCI_PATH_STANDBY:
5370 	case SCSI_VHCI_PATH_TEST:
5371 	case SCSI_VHCI_PATH_DISABLE:
5372 	case SCSI_VHCI_PATH_ENABLE:
5373 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5374 #ifdef	DEBUG
5375 	case SCSI_VHCI_CONFIGURE_PHCI:
5376 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5377 #endif
5378 		retval = vhci_get_iocdata((const void *)data, pioc, mode, s);
5379 		break;
5380 
5381 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5382 		retval = vhci_get_iocswitchdata((const void *)data, piocsc,
5383 		    mode, s);
5384 		break;
5385 	}
5386 	if (retval != 0) {
5387 		goto end;
5388 	}
5389 
5390 
5391 	/*
5392 	 * Process the IOCTL
5393 	 */
5394 	switch (cmd) {
5395 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5396 	{
5397 		uint_t		num_paths;	/* Num paths to client dev */
5398 		sv_path_info_t	*upibuf = NULL;	/* To keep userland values */
5399 		sv_path_info_t	*kpibuf = NULL; /* Kernel data for ioctls */
5400 		dev_info_t	*cdip;		/* Client device dip */
5401 
5402 		if (pioc->ret_elem == NULL) {
5403 			retval = EINVAL;
5404 			break;
5405 		}
5406 
5407 		/* Get client device path from user land */
5408 		if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5409 			retval = EFAULT;
5410 			break;
5411 		}
5412 
5413 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5414 		    "client <%s>", s, client_path));
5415 
5416 		/* Get number of paths to this client device */
5417 		if ((cdip = mdi_client_path2devinfo(vdip, client_path))
5418 		    == NULL) {
5419 			retval = ENXIO;
5420 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5421 			    "client dip doesn't exist. invalid path <%s>",
5422 			    s, client_path));
5423 			break;
5424 		}
5425 		num_paths = mdi_client_get_path_count(cdip);
5426 
5427 		if (ddi_copyout(&num_paths, pioc->ret_elem,
5428 		    sizeof (num_paths), mode)) {
5429 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5430 			    "num_paths copyout failed", s));
5431 			retval = EFAULT;
5432 			break;
5433 		}
5434 
5435 		/* If  user just wanted num_paths, then return */
5436 		if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5437 		    num_paths == 0) {
5438 			break;
5439 		}
5440 
5441 		/* Set num_paths to value as much as can be sent to userland */
5442 		if (num_paths > pioc->buf_elem) {
5443 			num_paths = pioc->buf_elem;
5444 		}
5445 
5446 		/* Allocate memory and get userland pointers */
5447 		if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5448 		    pioc, mode, s) != 0) {
5449 			retval = EFAULT;
5450 			break;
5451 		}
5452 		ASSERT(upibuf != NULL);
5453 		ASSERT(kpibuf != NULL);
5454 
5455 		/*
5456 		 * Get the path information and send it to userland.
5457 		 */
5458 		if (vhci_get_client_path_list(cdip, kpibuf, num_paths)
5459 		    != MDI_SUCCESS) {
5460 			retval = ENXIO;
5461 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5462 			break;
5463 		}
5464 
5465 		if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5466 		    pioc, mode, s)) {
5467 			retval = EFAULT;
5468 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5469 			break;
5470 		}
5471 
5472 		/* Free the memory allocated for path information */
5473 		vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5474 		break;
5475 	}
5476 
5477 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5478 	{
5479 		uint_t		num_paths;	/* Num paths to client dev */
5480 		sv_path_info_t	*upibuf = NULL;	/* To keep userland values */
5481 		sv_path_info_t	*kpibuf = NULL; /* Kernel data for ioctls */
5482 		dev_info_t	*pdip;		/* PHCI device dip */
5483 
5484 		if (pioc->ret_elem == NULL) {
5485 			retval = EINVAL;
5486 			break;
5487 		}
5488 
5489 		/* Get PHCI device path from user land */
5490 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5491 			retval = EFAULT;
5492 			break;
5493 		}
5494 
5495 		VHCI_DEBUG(6, (CE_WARN, vdip,
5496 		    "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path));
5497 
5498 		/* Get number of devices associated with this PHCI device */
5499 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5500 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5501 			    "phci dip doesn't exist. invalid path <%s>",
5502 			    s, phci_path));
5503 			retval = ENXIO;
5504 			break;
5505 		}
5506 
5507 		num_paths = mdi_phci_get_path_count(pdip);
5508 
5509 		if (ddi_copyout(&num_paths, pioc->ret_elem,
5510 		    sizeof (num_paths), mode)) {
5511 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5512 			    "num_paths copyout failed", s));
5513 			retval = EFAULT;
5514 			break;
5515 		}
5516 
5517 		/* If  user just wanted num_paths, then return */
5518 		if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5519 		    num_paths == 0) {
5520 			break;
5521 		}
5522 
5523 		/* Set num_paths to value as much as can be sent to userland */
5524 		if (num_paths > pioc->buf_elem) {
5525 			num_paths = pioc->buf_elem;
5526 		}
5527 
5528 		/* Allocate memory and get userland pointers */
5529 		if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5530 		    pioc, mode, s) != 0) {
5531 			retval = EFAULT;
5532 			break;
5533 		}
5534 		ASSERT(upibuf != NULL);
5535 		ASSERT(kpibuf != NULL);
5536 
5537 		/*
5538 		 * Get the path information and send it to userland.
5539 		 */
5540 		if (vhci_get_phci_path_list(pdip, kpibuf, num_paths)
5541 		    != MDI_SUCCESS) {
5542 			retval = ENXIO;
5543 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5544 			break;
5545 		}
5546 
5547 		if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5548 		    pioc, mode, s)) {
5549 			retval = EFAULT;
5550 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5551 			break;
5552 		}
5553 
5554 		/* Free the memory allocated for path information */
5555 		vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5556 		break;
5557 	}
5558 
5559 	case SCSI_VHCI_GET_CLIENT_NAME:
5560 	{
5561 		dev_info_t		*cdip, *pdip;
5562 
5563 		/* Get PHCI path and device address from user land */
5564 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5565 		    vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5566 			retval = EFAULT;
5567 			break;
5568 		}
5569 
5570 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5571 		    "phci <%s>, paddr <%s>", s, phci_path, paddr));
5572 
5573 		/* Get the PHCI dip */
5574 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5575 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5576 			    "phci dip doesn't exist. invalid path <%s>",
5577 			    s, phci_path));
5578 			retval = ENXIO;
5579 			break;
5580 		}
5581 
5582 		if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5583 			VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5584 			    "pathinfo doesn't exist. invalid device addr", s));
5585 			retval = ENXIO;
5586 			break;
5587 		}
5588 
5589 		/* Get the client device pathname and send to userland */
5590 		cdip = mdi_pi_get_client(pip);
5591 		vhci_ioc_devi_to_path(cdip, client_path);
5592 
5593 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5594 		    "client <%s>", s, client_path));
5595 
5596 		if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) {
5597 			retval = EFAULT;
5598 			break;
5599 		}
5600 		break;
5601 	}
5602 
5603 	case SCSI_VHCI_PATH_ONLINE:
5604 	case SCSI_VHCI_PATH_OFFLINE:
5605 	case SCSI_VHCI_PATH_STANDBY:
5606 	case SCSI_VHCI_PATH_TEST:
5607 	{
5608 		dev_info_t		*pdip;	/* PHCI dip */
5609 
5610 		/* Get PHCI path and device address from user land */
5611 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5612 		    vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5613 			retval = EFAULT;
5614 			break;
5615 		}
5616 
5617 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5618 		    "phci <%s>, paddr <%s>", s, phci_path, paddr));
5619 
5620 		/* Get the PHCI dip */
5621 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5622 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5623 			    "phci dip doesn't exist. invalid path <%s>",
5624 			    s, phci_path));
5625 			retval = ENXIO;
5626 			break;
5627 		}
5628 
5629 		if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5630 			VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5631 			    "pathinfo doesn't exist. invalid device addr", s));
5632 			retval = ENXIO;
5633 			break;
5634 		}
5635 
5636 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5637 		    "Calling MDI function to change device state", s));
5638 
5639 		switch (cmd) {
5640 		case SCSI_VHCI_PATH_ONLINE:
5641 			retval = mdi_pi_online(pip, 0);
5642 			break;
5643 
5644 		case SCSI_VHCI_PATH_OFFLINE:
5645 			retval = mdi_pi_offline(pip, 0);
5646 			break;
5647 
5648 		case SCSI_VHCI_PATH_STANDBY:
5649 			retval = mdi_pi_standby(pip, 0);
5650 			break;
5651 
5652 		case SCSI_VHCI_PATH_TEST:
5653 			break;
5654 		}
5655 		break;
5656 	}
5657 
5658 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5659 	{
5660 		dev_info_t *cdip;
5661 		struct scsi_device *devp;
5662 
5663 		/* Get the client device pathname */
5664 		if (ddi_copyin(piocsc->client, client_path,
5665 		    MAXPATHLEN, mode)) {
5666 			VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5667 			    "client_path copyin failed", s));
5668 			retval = EFAULT;
5669 			break;
5670 		}
5671 
5672 		/* Get the path class to which user wants to switch */
5673 		if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) {
5674 			VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5675 			    "controller_class copyin failed", s));
5676 			retval = EFAULT;
5677 			break;
5678 		}
5679 
5680 		/* Perform validity checks */
5681 		if ((cdip = mdi_client_path2devinfo(vdip,
5682 		    client_path)) == NULL) {
5683 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5684 			    "client dip doesn't exist. invalid path <%s>",
5685 			    s, client_path));
5686 			retval = ENXIO;
5687 			break;
5688 		}
5689 
5690 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func "
5691 		    "to switch controller"));
5692 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> "
5693 		    "class <%s>", client_path, paddr));
5694 
5695 		if (strcmp(paddr, PCLASS_PRIMARY) &&
5696 		    strcmp(paddr, PCLASS_SECONDARY)) {
5697 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5698 			    "invalid path class <%s>", s, paddr));
5699 			retval = ENXIO;
5700 			break;
5701 		}
5702 
5703 		devp = ddi_get_driver_private(cdip);
5704 		if (devp == NULL) {
5705 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5706 			    "invalid scsi device <%s>", s, client_path));
5707 			retval = ENXIO;
5708 			break;
5709 		}
5710 		vlun = ADDR2VLUN(&devp->sd_address);
5711 		ASSERT(vlun);
5712 
5713 		/*
5714 		 * Checking to see if device has only one pclass, PRIMARY.
5715 		 * If so this device doesn't support failovers.  Assumed
5716 		 * that the devices with one pclass is PRIMARY, as thats the
5717 		 * case today.  If this is not true and in future other
5718 		 * symmetric devices are supported with other pclass, this
5719 		 * IOCTL shall have to be overhauled anyways as now the only
5720 		 * arguments it accepts are PRIMARY and SECONDARY.
5721 		 */
5722 		fo = vlun->svl_fops;
5723 		if (fo->sfo_pathclass_next(PCLASS_PRIMARY, &pclass,
5724 		    vlun->svl_fops_ctpriv)) {
5725 			retval = ENOTSUP;
5726 			break;
5727 		}
5728 
5729 		VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
5730 		mutex_enter(&vlun->svl_mutex);
5731 		if (vlun->svl_active_pclass != NULL) {
5732 			if (strcmp(vlun->svl_active_pclass, paddr) == 0) {
5733 				mutex_exit(&vlun->svl_mutex);
5734 				retval = EALREADY;
5735 				VHCI_RELEASE_LUN(vlun);
5736 				break;
5737 			}
5738 		}
5739 		mutex_exit(&vlun->svl_mutex);
5740 		/* Call mdi function to cause  a switch over */
5741 		retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC);
5742 		if (retval == MDI_SUCCESS) {
5743 			retval = 0;
5744 		} else if (retval == MDI_BUSY) {
5745 			retval = EBUSY;
5746 		} else {
5747 			retval = EIO;
5748 		}
5749 		VHCI_RELEASE_LUN(vlun);
5750 		break;
5751 	}
5752 
5753 	case SCSI_VHCI_PATH_ENABLE:
5754 	case SCSI_VHCI_PATH_DISABLE:
5755 	{
5756 		dev_info_t	*cdip, *pdip;
5757 
5758 		/*
5759 		 * Get client device path from user land
5760 		 */
5761 		if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5762 			retval = EFAULT;
5763 			break;
5764 		}
5765 
5766 		/*
5767 		 * Get Phci device path from user land
5768 		 */
5769 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5770 			retval = EFAULT;
5771 			break;
5772 		}
5773 
5774 		/*
5775 		 * Get the devinfo for the Phci.
5776 		 */
5777 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5778 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5779 			    "phci dip doesn't exist. invalid path <%s>",
5780 			    s, phci_path));
5781 			retval = ENXIO;
5782 			break;
5783 		}
5784 
5785 		/*
5786 		 * If the client path is set to /scsi_vhci then we need
5787 		 * to do the operation on all the clients so set cdip to NULL.
5788 		 * Else, try to get the client dip.
5789 		 */
5790 		if (strcmp(client_path, "/scsi_vhci") == 0) {
5791 			cdip = NULL;
5792 		} else {
5793 			if ((cdip = mdi_client_path2devinfo(vdip,
5794 			    client_path)) == NULL) {
5795 				retval = ENXIO;
5796 				VHCI_DEBUG(1, (CE_WARN, NULL,
5797 				    "!vhci_ioctl: ioctl <%s> client dip "
5798 				    "doesn't exist. invalid path <%s>",
5799 				    s, client_path));
5800 				break;
5801 			}
5802 		}
5803 
5804 		if (cmd == SCSI_VHCI_PATH_ENABLE)
5805 			retval = mdi_pi_enable(cdip, pdip, USER_DISABLE);
5806 		else
5807 			retval = mdi_pi_disable(cdip, pdip, USER_DISABLE);
5808 
5809 		break;
5810 	}
5811 
5812 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5813 	{
5814 		uint_t		pid = pioc->buf_elem;
5815 		char		*target_port;
5816 		mod_hash_val_t	hv;
5817 
5818 		/* targetmap lookup of 'target-port' by <pid> */
5819 		if (mod_hash_find(vhci_targetmap_bypid,
5820 		    (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) {
5821 			/*
5822 			 * NOTE: failure to find the mapping is OK for guid
5823 			 * based 'target-port' values.
5824 			 */
5825 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5826 			    "targetport mapping doesn't exist: pid %d",
5827 			    s, pid));
5828 			retval = ENXIO;
5829 			break;
5830 		}
5831 
5832 		/* copyout 'target-port' result */
5833 		target_port = (char *)hv;
5834 		if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) {
5835 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5836 			    "targetport copyout failed: len: %d",
5837 			    s, (int)strlen(target_port)));
5838 			retval = EFAULT;
5839 		}
5840 		break;
5841 	}
5842 
5843 #ifdef	DEBUG
5844 	case SCSI_VHCI_CONFIGURE_PHCI:
5845 	{
5846 		dev_info_t		*pdip;
5847 
5848 		/* Get PHCI path and device address from user land */
5849 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5850 			retval = EFAULT;
5851 			break;
5852 		}
5853 
5854 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5855 		    "phci <%s>", s, phci_path));
5856 
5857 		/* Get the PHCI dip */
5858 		if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
5859 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5860 			    "phci dip doesn't exist. invalid path <%s>",
5861 			    s, phci_path));
5862 			retval = ENXIO;
5863 			break;
5864 		}
5865 
5866 		if (ndi_devi_config(pdip,
5867 		    NDI_DEVFS_CLEAN|NDI_DEVI_PERSIST) != NDI_SUCCESS) {
5868 			retval = EIO;
5869 		}
5870 
5871 		ddi_release_devi(pdip);
5872 		break;
5873 	}
5874 
5875 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5876 	{
5877 		dev_info_t		*pdip;
5878 
5879 		/* Get PHCI path and device address from user land */
5880 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5881 			retval = EFAULT;
5882 			break;
5883 		}
5884 
5885 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5886 		    "phci <%s>", s, phci_path));
5887 
5888 		/* Get the PHCI dip */
5889 		if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
5890 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5891 			    "phci dip doesn't exist. invalid path <%s>",
5892 			    s, phci_path));
5893 			retval = ENXIO;
5894 			break;
5895 		}
5896 
5897 		if (ndi_devi_unconfig(pdip,
5898 		    NDI_DEVI_REMOVE|NDI_DEVFS_CLEAN) != NDI_SUCCESS) {
5899 			retval = EBUSY;
5900 		}
5901 
5902 		ddi_release_devi(pdip);
5903 		break;
5904 	}
5905 #endif
5906 	}
5907 
5908 end:
5909 	/* Free the memory allocated above */
5910 	if (phci_path != NULL) {
5911 		kmem_free(phci_path, MAXPATHLEN);
5912 	}
5913 	if (client_path != NULL) {
5914 		kmem_free(client_path, MAXPATHLEN);
5915 	}
5916 	if (paddr != NULL) {
5917 		kmem_free(paddr, MAXNAMELEN);
5918 	}
5919 	return (retval);
5920 }
5921 
5922 /*
5923  * devctl IOCTL support for client device DR
5924  */
5925 /* ARGSUSED */
5926 int
5927 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
5928     int *rvalp)
5929 {
5930 	dev_info_t *self;
5931 	dev_info_t *child;
5932 	scsi_hba_tran_t *hba;
5933 	struct devctl_iocdata *dcp;
5934 	struct scsi_vhci *vhci;
5935 	int rv = 0;
5936 	int retval = 0;
5937 	scsi_vhci_priv_t *svp;
5938 	mdi_pathinfo_t  *pip;
5939 
5940 	if ((vhci = ddi_get_soft_state(vhci_softstate,
5941 	    MINOR2INST(getminor(dev)))) == NULL)
5942 		return (ENXIO);
5943 
5944 	/*
5945 	 * check if :devctl minor device has been opened
5946 	 */
5947 	mutex_enter(&vhci->vhci_mutex);
5948 	if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
5949 		mutex_exit(&vhci->vhci_mutex);
5950 		return (ENXIO);
5951 	}
5952 	mutex_exit(&vhci->vhci_mutex);
5953 
5954 	self = vhci->vhci_dip;
5955 	hba = ddi_get_driver_private(self);
5956 	if (hba == NULL)
5957 		return (ENXIO);
5958 
5959 	/*
5960 	 * We can use the generic implementation for these ioctls
5961 	 */
5962 	switch (cmd) {
5963 	case DEVCTL_DEVICE_GETSTATE:
5964 	case DEVCTL_DEVICE_ONLINE:
5965 	case DEVCTL_DEVICE_OFFLINE:
5966 	case DEVCTL_DEVICE_REMOVE:
5967 	case DEVCTL_BUS_GETSTATE:
5968 		return (ndi_devctl_ioctl(self, cmd, arg, mode, 0));
5969 	}
5970 
5971 	/*
5972 	 * read devctl ioctl data
5973 	 */
5974 	if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
5975 		return (EFAULT);
5976 
5977 	switch (cmd) {
5978 
5979 	case DEVCTL_DEVICE_RESET:
5980 		/*
5981 		 * lookup and hold child device
5982 		 */
5983 		if ((child = ndi_devi_find(self, ndi_dc_getname(dcp),
5984 		    ndi_dc_getaddr(dcp))) == NULL) {
5985 			rv = ENXIO;
5986 			break;
5987 		}
5988 		retval = mdi_select_path(child, NULL,
5989 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
5990 		    NULL, &pip);
5991 		if ((retval != MDI_SUCCESS) || (pip == NULL)) {
5992 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:"
5993 			    "Unable to get a path, dip 0x%p", (void *)child));
5994 			rv = ENXIO;
5995 			break;
5996 		}
5997 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5998 		if (vhci_recovery_reset(svp->svp_svl,
5999 		    &svp->svp_psd->sd_address, TRUE,
6000 		    VHCI_DEPTH_TARGET) == 0) {
6001 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6002 			    "!vhci_ioctl(pip:%p): "
6003 			    "reset failed\n", (void *)pip));
6004 			rv = ENXIO;
6005 		}
6006 		mdi_rele_path(pip);
6007 		break;
6008 
6009 	case DEVCTL_BUS_QUIESCE:
6010 	case DEVCTL_BUS_UNQUIESCE:
6011 	case DEVCTL_BUS_RESET:
6012 	case DEVCTL_BUS_RESETALL:
6013 #ifdef	DEBUG
6014 	case DEVCTL_BUS_CONFIGURE:
6015 	case DEVCTL_BUS_UNCONFIGURE:
6016 #endif
6017 		rv = ENOTSUP;
6018 		break;
6019 
6020 	default:
6021 		rv = ENOTTY;
6022 	} /* end of outer switch */
6023 
6024 	ndi_dc_freehdl(dcp);
6025 	return (rv);
6026 }
6027 
6028 /*
6029  * Routine to get the PHCI pathname from ioctl structures in userland
6030  */
6031 /* ARGSUSED */
6032 static int
6033 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path,
6034 	int mode, caddr_t s)
6035 {
6036 	int retval = 0;
6037 
6038 	if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) {
6039 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> "
6040 		    "phci_path copyin failed", s));
6041 		retval = EFAULT;
6042 	}
6043 	return (retval);
6044 
6045 }
6046 
6047 
6048 /*
6049  * Routine to get the Client device pathname from ioctl structures in userland
6050  */
6051 /* ARGSUSED */
6052 static int
6053 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path,
6054 	int mode, caddr_t s)
6055 {
6056 	int retval = 0;
6057 
6058 	if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) {
6059 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: "
6060 		    "ioctl <%s> client_path copyin failed", s));
6061 		retval = EFAULT;
6062 	}
6063 	return (retval);
6064 }
6065 
6066 
6067 /*
6068  * Routine to get physical device address from ioctl structure in userland
6069  */
6070 /* ARGSUSED */
6071 static int
6072 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s)
6073 {
6074 	int retval = 0;
6075 
6076 	if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) {
6077 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: "
6078 		    "ioctl <%s> device addr copyin failed", s));
6079 		retval = EFAULT;
6080 	}
6081 	return (retval);
6082 }
6083 
6084 
6085 /*
6086  * Routine to send client device pathname to userland.
6087  */
6088 /* ARGSUSED */
6089 static int
6090 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc,
6091 	int mode, caddr_t s)
6092 {
6093 	int retval = 0;
6094 
6095 	if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) {
6096 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: "
6097 		    "ioctl <%s> client_path copyout failed", s));
6098 		retval = EFAULT;
6099 	}
6100 	return (retval);
6101 }
6102 
6103 
6104 /*
6105  * Routine to translated dev_info pointer (dip) to device pathname.
6106  */
6107 static void
6108 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path)
6109 {
6110 	(void) ddi_pathname(dip, path);
6111 }
6112 
6113 
6114 /*
6115  * vhci_get_phci_path_list:
6116  *		get information about devices associated with a
6117  *		given PHCI device.
6118  *
6119  * Return Values:
6120  *		path information elements
6121  */
6122 int
6123 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf,
6124 	uint_t num_elems)
6125 {
6126 	uint_t			count, done;
6127 	mdi_pathinfo_t		*pip;
6128 	sv_path_info_t		*ret_pip;
6129 	int			status;
6130 	size_t			prop_size;
6131 	int			circular;
6132 
6133 	/*
6134 	 * Get the PHCI structure and retrieve the path information
6135 	 * from the GUID hash table.
6136 	 */
6137 
6138 	ret_pip = pibuf;
6139 	count = 0;
6140 
6141 	ndi_devi_enter(pdip, &circular);
6142 
6143 	done = (count >= num_elems);
6144 	pip = mdi_get_next_client_path(pdip, NULL);
6145 	while (pip && !done) {
6146 		mdi_pi_lock(pip);
6147 		(void) ddi_pathname(mdi_pi_get_phci(pip),
6148 		    ret_pip->device.ret_phci);
6149 		(void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6150 		(void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6151 		    &ret_pip->ret_ext_state);
6152 
6153 		status = mdi_prop_size(pip, &prop_size);
6154 		if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6155 			*ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6156 		}
6157 
6158 #ifdef DEBUG
6159 		if (status != MDI_SUCCESS) {
6160 			VHCI_DEBUG(2, (CE_WARN, NULL,
6161 			    "!vhci_get_phci_path_list: "
6162 			    "phci <%s>, prop size failure 0x%x",
6163 			    ret_pip->device.ret_phci, status));
6164 		}
6165 #endif /* DEBUG */
6166 
6167 
6168 		if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6169 		    prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6170 			status = mdi_prop_pack(pip,
6171 			    &ret_pip->ret_prop.buf,
6172 			    ret_pip->ret_prop.buf_size);
6173 
6174 #ifdef DEBUG
6175 			if (status != MDI_SUCCESS) {
6176 				VHCI_DEBUG(2, (CE_WARN, NULL,
6177 				    "!vhci_get_phci_path_list: "
6178 				    "phci <%s>, prop pack failure 0x%x",
6179 				    ret_pip->device.ret_phci, status));
6180 			}
6181 #endif /* DEBUG */
6182 		}
6183 
6184 		mdi_pi_unlock(pip);
6185 		pip = mdi_get_next_client_path(pdip, pip);
6186 		ret_pip++;
6187 		count++;
6188 		done = (count >= num_elems);
6189 	}
6190 
6191 	ndi_devi_exit(pdip, circular);
6192 
6193 	return (MDI_SUCCESS);
6194 }
6195 
6196 
6197 /*
6198  * vhci_get_client_path_list:
6199  *		get information about various paths associated with a
6200  *		given client device.
6201  *
6202  * Return Values:
6203  *		path information elements
6204  */
6205 int
6206 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf,
6207 	uint_t num_elems)
6208 {
6209 	uint_t			count, done;
6210 	mdi_pathinfo_t		*pip;
6211 	sv_path_info_t		*ret_pip;
6212 	int			status;
6213 	size_t			prop_size;
6214 	int			circular;
6215 
6216 	ret_pip = pibuf;
6217 	count = 0;
6218 
6219 	ndi_devi_enter(cdip, &circular);
6220 
6221 	done = (count >= num_elems);
6222 	pip = mdi_get_next_phci_path(cdip, NULL);
6223 	while (pip && !done) {
6224 		mdi_pi_lock(pip);
6225 		(void) ddi_pathname(mdi_pi_get_phci(pip),
6226 		    ret_pip->device.ret_phci);
6227 		(void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6228 		(void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6229 		    &ret_pip->ret_ext_state);
6230 
6231 		status = mdi_prop_size(pip, &prop_size);
6232 		if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6233 			*ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6234 		}
6235 
6236 #ifdef DEBUG
6237 		if (status != MDI_SUCCESS) {
6238 			VHCI_DEBUG(2, (CE_WARN, NULL,
6239 			    "!vhci_get_client_path_list: "
6240 			    "phci <%s>, prop size failure 0x%x",
6241 			    ret_pip->device.ret_phci, status));
6242 		}
6243 #endif /* DEBUG */
6244 
6245 
6246 		if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6247 		    prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6248 			status = mdi_prop_pack(pip,
6249 			    &ret_pip->ret_prop.buf,
6250 			    ret_pip->ret_prop.buf_size);
6251 
6252 #ifdef DEBUG
6253 			if (status != MDI_SUCCESS) {
6254 				VHCI_DEBUG(2, (CE_WARN, NULL,
6255 				    "!vhci_get_client_path_list: "
6256 				    "phci <%s>, prop pack failure 0x%x",
6257 				    ret_pip->device.ret_phci, status));
6258 			}
6259 #endif /* DEBUG */
6260 		}
6261 
6262 		mdi_pi_unlock(pip);
6263 		pip = mdi_get_next_phci_path(cdip, pip);
6264 		ret_pip++;
6265 		count++;
6266 		done = (count >= num_elems);
6267 	}
6268 
6269 	ndi_devi_exit(cdip, circular);
6270 
6271 	return (MDI_SUCCESS);
6272 }
6273 
6274 
6275 /*
6276  * Routine to get ioctl argument structure from userland.
6277  */
6278 /* ARGSUSED */
6279 static int
6280 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s)
6281 {
6282 	int	retval = 0;
6283 
6284 #ifdef  _MULTI_DATAMODEL
6285 	switch (ddi_model_convert_from(mode & FMODELS)) {
6286 	case DDI_MODEL_ILP32:
6287 	{
6288 		sv_iocdata32_t	ioc32;
6289 
6290 		if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6291 			retval = EFAULT;
6292 			break;
6293 		}
6294 		pioc->client	= (caddr_t)(uintptr_t)ioc32.client;
6295 		pioc->phci	= (caddr_t)(uintptr_t)ioc32.phci;
6296 		pioc->addr	= (caddr_t)(uintptr_t)ioc32.addr;
6297 		pioc->buf_elem	= (uint_t)ioc32.buf_elem;
6298 		pioc->ret_buf	= (sv_path_info_t *)(uintptr_t)ioc32.ret_buf;
6299 		pioc->ret_elem	= (uint_t *)(uintptr_t)ioc32.ret_elem;
6300 		break;
6301 	}
6302 
6303 	case DDI_MODEL_NONE:
6304 		if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6305 			retval = EFAULT;
6306 			break;
6307 		}
6308 		break;
6309 	}
6310 #else   /* _MULTI_DATAMODEL */
6311 	if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6312 		retval = EFAULT;
6313 	}
6314 #endif  /* _MULTI_DATAMODEL */
6315 
6316 #ifdef DEBUG
6317 	if (retval) {
6318 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6319 		    "iocdata copyin failed", s));
6320 	}
6321 #endif
6322 
6323 	return (retval);
6324 }
6325 
6326 
6327 /*
6328  * Routine to get the ioctl argument for ioctl causing controller switchover.
6329  */
6330 /* ARGSUSED */
6331 static int
6332 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc,
6333     int mode, caddr_t s)
6334 {
6335 	int	retval = 0;
6336 
6337 #ifdef  _MULTI_DATAMODEL
6338 	switch (ddi_model_convert_from(mode & FMODELS)) {
6339 	case DDI_MODEL_ILP32:
6340 	{
6341 		sv_switch_to_cntlr_iocdata32_t	ioc32;
6342 
6343 		if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6344 			retval = EFAULT;
6345 			break;
6346 		}
6347 		piocsc->client	= (caddr_t)(uintptr_t)ioc32.client;
6348 		piocsc->class	= (caddr_t)(uintptr_t)ioc32.class;
6349 		break;
6350 	}
6351 
6352 	case DDI_MODEL_NONE:
6353 		if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6354 			retval = EFAULT;
6355 		}
6356 		break;
6357 	}
6358 #else   /* _MULTI_DATAMODEL */
6359 	if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6360 		retval = EFAULT;
6361 	}
6362 #endif  /* _MULTI_DATAMODEL */
6363 
6364 #ifdef DEBUG
6365 	if (retval) {
6366 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6367 		    "switch_to_cntlr_iocdata copyin failed", s));
6368 	}
6369 #endif
6370 
6371 	return (retval);
6372 }
6373 
6374 
6375 /*
6376  * Routine to allocate memory for the path information structures.
6377  * It allocates two chunks of memory - one for keeping userland
6378  * pointers/values for path information and path properties, second for
6379  * keeping allocating kernel memory for path properties. These path
6380  * properties are finally copied to userland.
6381  */
6382 /* ARGSUSED */
6383 static int
6384 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf,
6385     uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6386 {
6387 	sv_path_info_t	*pi;
6388 	uint_t		bufsize;
6389 	int		retval = 0;
6390 	int		index;
6391 
6392 	/* Allocate memory */
6393 	*upibuf = (sv_path_info_t *)
6394 	    kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6395 	ASSERT(*upibuf != NULL);
6396 	*kpibuf = (sv_path_info_t *)
6397 	    kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6398 	ASSERT(*kpibuf != NULL);
6399 
6400 	/*
6401 	 * Get the path info structure from the user space.
6402 	 * We are interested in the following fields:
6403 	 *	- user size of buffer for per path properties.
6404 	 *	- user address of buffer for path info properties.
6405 	 *	- user pointer for returning actual buffer size
6406 	 * Keep these fields in the 'upibuf' structures.
6407 	 * Allocate buffer for per path info properties in kernel
6408 	 * structure ('kpibuf').
6409 	 * Size of these buffers will be equal to the size of buffers
6410 	 * in the user space.
6411 	 */
6412 #ifdef  _MULTI_DATAMODEL
6413 	switch (ddi_model_convert_from(mode & FMODELS)) {
6414 	case DDI_MODEL_ILP32:
6415 	{
6416 		sv_path_info32_t	*src;
6417 		sv_path_info32_t	pi32;
6418 
6419 		src  = (sv_path_info32_t *)pioc->ret_buf;
6420 		pi = (sv_path_info_t *)*upibuf;
6421 		for (index = 0; index < num_paths; index++, src++, pi++) {
6422 			if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) {
6423 				retval = EFAULT;
6424 				break;
6425 			}
6426 
6427 			pi->ret_prop.buf_size	=
6428 			    (uint_t)pi32.ret_prop.buf_size;
6429 			pi->ret_prop.ret_buf_size =
6430 			    (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size;
6431 			pi->ret_prop.buf	=
6432 			    (caddr_t)(uintptr_t)pi32.ret_prop.buf;
6433 		}
6434 		break;
6435 	}
6436 
6437 	case DDI_MODEL_NONE:
6438 		if (ddi_copyin(pioc->ret_buf, *upibuf,
6439 		    sizeof (sv_path_info_t) * num_paths, mode)) {
6440 			retval = EFAULT;
6441 		}
6442 		break;
6443 	}
6444 #else   /* _MULTI_DATAMODEL */
6445 	if (ddi_copyin(pioc->ret_buf, *upibuf,
6446 	    sizeof (sv_path_info_t) * num_paths, mode)) {
6447 		retval = EFAULT;
6448 	}
6449 #endif  /* _MULTI_DATAMODEL */
6450 
6451 	if (retval != 0) {
6452 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: "
6453 		    "ioctl <%s> normal: path_info copyin failed", s));
6454 		kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths);
6455 		kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths);
6456 		*upibuf = NULL;
6457 		*kpibuf = NULL;
6458 		return (retval);
6459 	}
6460 
6461 	/*
6462 	 * Allocate memory for per path properties.
6463 	 */
6464 	for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) {
6465 		bufsize = (*upibuf)[index].ret_prop.buf_size;
6466 
6467 		if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) {
6468 			pi->ret_prop.buf_size = bufsize;
6469 			pi->ret_prop.buf = (caddr_t)
6470 			    kmem_zalloc(bufsize, KM_SLEEP);
6471 			ASSERT(pi->ret_prop.buf != NULL);
6472 		} else {
6473 			pi->ret_prop.buf_size = 0;
6474 			pi->ret_prop.buf = NULL;
6475 		}
6476 
6477 		if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) {
6478 			pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc(
6479 			    sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP);
6480 			ASSERT(pi->ret_prop.ret_buf_size != NULL);
6481 		} else {
6482 			pi->ret_prop.ret_buf_size = NULL;
6483 		}
6484 	}
6485 
6486 	return (0);
6487 }
6488 
6489 
6490 /*
6491  * Routine to free memory for the path information structures.
6492  * This is the memory which was allocated earlier.
6493  */
6494 /* ARGSUSED */
6495 static void
6496 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6497     uint_t num_paths)
6498 {
6499 	sv_path_info_t	*pi;
6500 	int		index;
6501 
6502 	/* Free memory for per path properties */
6503 	for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) {
6504 		if (pi->ret_prop.ret_buf_size != NULL) {
6505 			kmem_free(pi->ret_prop.ret_buf_size,
6506 			    sizeof (*pi->ret_prop.ret_buf_size));
6507 		}
6508 
6509 		if (pi->ret_prop.buf != NULL) {
6510 			kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size);
6511 		}
6512 	}
6513 
6514 	/* Free memory for path info structures */
6515 	kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths);
6516 	kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths);
6517 }
6518 
6519 
6520 /*
6521  * Routine to copy path information and path properties to userland.
6522  */
6523 /* ARGSUSED */
6524 static int
6525 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6526     uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6527 {
6528 	int			retval = 0, index;
6529 	sv_path_info_t		*upi_ptr;
6530 	sv_path_info32_t	*upi32_ptr;
6531 
6532 #ifdef  _MULTI_DATAMODEL
6533 	switch (ddi_model_convert_from(mode & FMODELS)) {
6534 	case DDI_MODEL_ILP32:
6535 		goto copy_32bit;
6536 
6537 	case DDI_MODEL_NONE:
6538 		goto copy_normal;
6539 	}
6540 #else   /* _MULTI_DATAMODEL */
6541 
6542 	goto copy_normal;
6543 
6544 #endif  /* _MULTI_DATAMODEL */
6545 
6546 copy_normal:
6547 
6548 	/*
6549 	 * Copy path information and path properties to user land.
6550 	 * Pointer fields inside the path property structure were
6551 	 * saved in the 'upibuf' structure earlier.
6552 	 */
6553 	upi_ptr = pioc->ret_buf;
6554 	for (index = 0; index < num_paths; index++) {
6555 		if (ddi_copyout(kpibuf[index].device.ret_ct,
6556 		    upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6557 			retval = EFAULT;
6558 			break;
6559 		}
6560 
6561 		if (ddi_copyout(kpibuf[index].ret_addr,
6562 		    upi_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6563 			retval = EFAULT;
6564 			break;
6565 		}
6566 
6567 		if (ddi_copyout(&kpibuf[index].ret_state,
6568 		    &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state),
6569 		    mode)) {
6570 			retval = EFAULT;
6571 			break;
6572 		}
6573 
6574 		if (ddi_copyout(&kpibuf[index].ret_ext_state,
6575 		    &upi_ptr[index].ret_ext_state,
6576 		    sizeof (kpibuf[index].ret_ext_state), mode)) {
6577 			retval = EFAULT;
6578 			break;
6579 		}
6580 
6581 		if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6582 		    ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6583 		    upibuf[index].ret_prop.ret_buf_size,
6584 		    sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6585 			retval = EFAULT;
6586 			break;
6587 		}
6588 
6589 		if ((kpibuf[index].ret_prop.buf != NULL) &&
6590 		    ddi_copyout(kpibuf[index].ret_prop.buf,
6591 		    upibuf[index].ret_prop.buf,
6592 		    upibuf[index].ret_prop.buf_size, mode)) {
6593 			retval = EFAULT;
6594 			break;
6595 		}
6596 	}
6597 
6598 #ifdef DEBUG
6599 	if (retval) {
6600 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6601 		    "normal: path_info copyout failed", s));
6602 	}
6603 #endif
6604 
6605 	return (retval);
6606 
6607 copy_32bit:
6608 	/*
6609 	 * Copy path information and path properties to user land.
6610 	 * Pointer fields inside the path property structure were
6611 	 * saved in the 'upibuf' structure earlier.
6612 	 */
6613 	upi32_ptr = (sv_path_info32_t *)pioc->ret_buf;
6614 	for (index = 0; index < num_paths; index++) {
6615 		if (ddi_copyout(kpibuf[index].device.ret_ct,
6616 		    upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6617 			retval = EFAULT;
6618 			break;
6619 		}
6620 
6621 		if (ddi_copyout(kpibuf[index].ret_addr,
6622 		    upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6623 			retval = EFAULT;
6624 			break;
6625 		}
6626 
6627 		if (ddi_copyout(&kpibuf[index].ret_state,
6628 		    &upi32_ptr[index].ret_state,
6629 		    sizeof (kpibuf[index].ret_state), mode)) {
6630 			retval = EFAULT;
6631 			break;
6632 		}
6633 
6634 		if (ddi_copyout(&kpibuf[index].ret_ext_state,
6635 		    &upi32_ptr[index].ret_ext_state,
6636 		    sizeof (kpibuf[index].ret_ext_state), mode)) {
6637 			retval = EFAULT;
6638 			break;
6639 		}
6640 		if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6641 		    ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6642 		    upibuf[index].ret_prop.ret_buf_size,
6643 		    sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6644 			retval = EFAULT;
6645 			break;
6646 		}
6647 
6648 		if ((kpibuf[index].ret_prop.buf != NULL) &&
6649 		    ddi_copyout(kpibuf[index].ret_prop.buf,
6650 		    upibuf[index].ret_prop.buf,
6651 		    upibuf[index].ret_prop.buf_size, mode)) {
6652 			retval = EFAULT;
6653 			break;
6654 		}
6655 	}
6656 
6657 #ifdef DEBUG
6658 	if (retval) {
6659 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6660 		    "normal: path_info copyout failed", s));
6661 	}
6662 #endif
6663 
6664 	return (retval);
6665 }
6666 
6667 
6668 /*
6669  * vhci_failover()
6670  * This routine expects VHCI_HOLD_LUN before being invoked.  It can be invoked
6671  * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC.  For Asynchronous failovers
6672  * this routine shall VHCI_RELEASE_LUN on exiting.  For synchronous failovers
6673  * it is the callers responsibility to release lun.
6674  */
6675 
6676 /* ARGSUSED */
6677 static int
6678 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags)
6679 {
6680 	char			*guid;
6681 	scsi_vhci_lun_t		*vlun = NULL;
6682 	struct scsi_vhci	*vhci;
6683 	mdi_pathinfo_t		*pip, *npip;
6684 	char			*s_pclass, *pclass1, *pclass2, *pclass;
6685 	char			active_pclass_copy[255], *active_pclass_ptr;
6686 	char			*ptr1, *ptr2;
6687 	mdi_pathinfo_state_t	pi_state;
6688 	uint32_t		pi_ext_state;
6689 	scsi_vhci_priv_t	*svp;
6690 	struct scsi_device	*sd;
6691 	struct scsi_failover_ops	*sfo;
6692 	int			sps; /* mdi_select_path() status */
6693 	int			activation_done = 0;
6694 	int			rval, retval = MDI_FAILURE;
6695 	int			reserve_pending, check_condition, UA_condition;
6696 	struct scsi_pkt		*pkt;
6697 	struct buf		*bp;
6698 
6699 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
6700 	sd = ddi_get_driver_private(cdip);
6701 	vlun = ADDR2VLUN(&sd->sd_address);
6702 	ASSERT(vlun != 0);
6703 	ASSERT(VHCI_LUN_IS_HELD(vlun));
6704 	guid = vlun->svl_lun_wwn;
6705 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid));
6706 	vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s "
6707 	    "(GUID %s)", ddi_node_name(cdip), guid);
6708 
6709 	/*
6710 	 * Lets maintain a local copy of the vlun->svl_active_pclass
6711 	 * for the rest of the processing. Accessing the field
6712 	 * directly in the loop below causes loop logic to break
6713 	 * especially when the field gets updated by other threads
6714 	 * update path status etc and causes 'paths are not currently
6715 	 * available' condition to be declared prematurely.
6716 	 */
6717 	mutex_enter(&vlun->svl_mutex);
6718 	if (vlun->svl_active_pclass != NULL) {
6719 		(void) strlcpy(active_pclass_copy, vlun->svl_active_pclass,
6720 		    sizeof (active_pclass_copy));
6721 		active_pclass_ptr = &active_pclass_copy[0];
6722 		mutex_exit(&vlun->svl_mutex);
6723 		if (vhci_quiesce_paths(vdip, cdip, vlun, guid,
6724 		    active_pclass_ptr) != 0) {
6725 			retval = MDI_FAILURE;
6726 		}
6727 	} else {
6728 		/*
6729 		 * can happen only when the available path to device
6730 		 * discovered is a STANDBY path.
6731 		 */
6732 		mutex_exit(&vlun->svl_mutex);
6733 		active_pclass_copy[0] = '\0';
6734 		active_pclass_ptr = NULL;
6735 	}
6736 
6737 	sfo = vlun->svl_fops;
6738 	ASSERT(sfo != NULL);
6739 	pclass1 = s_pclass = active_pclass_ptr;
6740 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid,
6741 	    (s_pclass == NULL ? "<none>" : s_pclass)));
6742 
6743 next_pathclass:
6744 
6745 	rval = sfo->sfo_pathclass_next(pclass1, &pclass2,
6746 	    vlun->svl_fops_ctpriv);
6747 	if (rval == ENOENT) {
6748 		if (s_pclass == NULL) {
6749 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): "
6750 			    "failed, no more pathclasses\n", guid));
6751 			goto done;
6752 		} else {
6753 			(void) sfo->sfo_pathclass_next(NULL, &pclass2,
6754 			    vlun->svl_fops_ctpriv);
6755 		}
6756 	} else if (rval == EINVAL) {
6757 		vhci_log(CE_NOTE, vdip, "!Failover operation failed for "
6758 		    "device %s (GUID %s): Invalid path-class %s",
6759 		    ddi_node_name(cdip), guid,
6760 		    ((pclass1 == NULL) ? "<none>" : pclass1));
6761 		goto done;
6762 	}
6763 	if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) {
6764 		/*
6765 		 * paths are not currently available
6766 		 */
6767 		vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable"
6768 		    " for device %s (GUID %s)",
6769 		    ddi_node_name(cdip), guid);
6770 		goto done;
6771 	}
6772 	pip = npip = NULL;
6773 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering "
6774 	    "%s as failover destination\n", guid, pclass2));
6775 	sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip);
6776 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
6777 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no "
6778 		    "STANDBY paths found (status:%x)!\n", guid, sps));
6779 		pclass1 = pclass2;
6780 		goto next_pathclass;
6781 	}
6782 	do {
6783 		pclass = NULL;
6784 		if ((mdi_prop_lookup_string(npip, "path-class",
6785 		    &pclass) != MDI_SUCCESS) || (strcmp(pclass2,
6786 		    pclass) != 0)) {
6787 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6788 			    "!vhci_failover(5.5)(%s): skipping path "
6789 			    "%p(%s)...\n", guid, (void *)npip, pclass));
6790 			pip = npip;
6791 			sps = mdi_select_path(cdip, NULL,
6792 			    MDI_SELECT_STANDBY_PATH, pip, &npip);
6793 			mdi_rele_path(pip);
6794 			(void) mdi_prop_free(pclass);
6795 			continue;
6796 		}
6797 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
6798 
6799 		/*
6800 		 * Issue READ at non-zer block on this STANDBY path.
6801 		 * Purple returns
6802 		 * 1. RESERVATION_CONFLICT if reservation is pending
6803 		 * 2. POR check condition if it reset happened.
6804 		 * 2. failover Check Conditions if one is already in progress.
6805 		 */
6806 		reserve_pending = 0;
6807 		check_condition = 0;
6808 		UA_condition = 0;
6809 
6810 		bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
6811 		    (struct buf *)NULL, DEV_BSIZE, B_READ, NULL, NULL);
6812 		if (!bp) {
6813 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6814 			    "vhci_failover !No resources (buf)\n"));
6815 			mdi_rele_path(npip);
6816 			goto done;
6817 		}
6818 		pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
6819 		    CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
6820 		    PKT_CONSISTENT, NULL, NULL);
6821 		if (pkt) {
6822 			(void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t)
6823 			    pkt->pkt_cdbp, SCMD_READ, 1, 1, 0);
6824 			pkt->pkt_flags = FLAG_NOINTR;
6825 check_path_again:
6826 			pkt->pkt_path_instance = mdi_pi_get_path_instance(npip);
6827 			pkt->pkt_time = 3*30;
6828 
6829 			if (scsi_transport(pkt) == TRAN_ACCEPT) {
6830 				switch (pkt->pkt_reason) {
6831 				case CMD_CMPLT:
6832 					switch (SCBP_C(pkt)) {
6833 					case STATUS_GOOD:
6834 						/* Already failed over */
6835 						activation_done = 1;
6836 						break;
6837 					case STATUS_RESERVATION_CONFLICT:
6838 						reserve_pending = 1;
6839 						break;
6840 					case STATUS_CHECK:
6841 						check_condition = 1;
6842 						break;
6843 					}
6844 				}
6845 			}
6846 			if (check_condition &&
6847 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
6848 				uint8_t *sns, skey, asc, ascq;
6849 				sns = (uint8_t *)
6850 				    &(((struct scsi_arq_status *)(uintptr_t)
6851 				    (pkt->pkt_scbp))->sts_sensedata);
6852 				skey = scsi_sense_key(sns);
6853 				asc = scsi_sense_asc(sns);
6854 				ascq = scsi_sense_ascq(sns);
6855 				if (skey == KEY_UNIT_ATTENTION &&
6856 				    asc == 0x29) {
6857 					/* Already failed over */
6858 					VHCI_DEBUG(1, (CE_NOTE, NULL,
6859 					    "!vhci_failover(7)(%s): "
6860 					    "path 0x%p POR UA condition\n",
6861 					    guid, (void *)npip));
6862 					if (UA_condition == 0) {
6863 						UA_condition = 1;
6864 						goto check_path_again;
6865 					}
6866 				} else {
6867 					activation_done = 0;
6868 					VHCI_DEBUG(1, (CE_NOTE, NULL,
6869 					    "!vhci_failover(%s): path 0x%p "
6870 					    "unhandled chkcond %x %x %x\n",
6871 					    guid, (void *)npip, skey,
6872 					    asc, ascq));
6873 				}
6874 			}
6875 			scsi_destroy_pkt(pkt);
6876 		}
6877 		scsi_free_consistent_buf(bp);
6878 
6879 		if (activation_done) {
6880 			mdi_rele_path(npip);
6881 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
6882 			    "path 0x%p already failedover\n", guid,
6883 			    (void *)npip));
6884 			break;
6885 		}
6886 		if (reserve_pending && (vlun->svl_xlf_capable == 0)) {
6887 			(void) vhci_recovery_reset(vlun,
6888 			    &svp->svp_psd->sd_address,
6889 			    FALSE, VHCI_DEPTH_ALL);
6890 		}
6891 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): "
6892 		    "activating path 0x%p(psd:%p)\n", guid, (void *)npip,
6893 		    (void *)svp->svp_psd));
6894 		if (sfo->sfo_path_activate(svp->svp_psd, pclass2,
6895 		    vlun->svl_fops_ctpriv) == 0) {
6896 			activation_done = 1;
6897 			mdi_rele_path(npip);
6898 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
6899 			    "path 0x%p successfully activated\n", guid,
6900 			    (void *)npip));
6901 			break;
6902 		}
6903 		pip = npip;
6904 		sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH,
6905 		    pip, &npip);
6906 		mdi_rele_path(pip);
6907 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
6908 	if (activation_done == 0) {
6909 		pclass1 = pclass2;
6910 		goto next_pathclass;
6911 	}
6912 
6913 	/*
6914 	 * if we are here, we have succeeded in activating path npip of
6915 	 * pathclass pclass2; let us validate all paths of pclass2 by
6916 	 * "ping"-ing each one and mark the good ones ONLINE
6917 	 * Also, set the state of the paths belonging to the previously
6918 	 * active pathclass to STANDBY
6919 	 */
6920 	pip = npip = NULL;
6921 	sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
6922 	    MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH),
6923 	    NULL, &npip);
6924 	if (npip == NULL || sps != MDI_SUCCESS) {
6925 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for "
6926 		    "device %s (GUID %s): paths may be busy\n",
6927 		    ddi_node_name(cdip), guid));
6928 		goto done;
6929 	}
6930 	do {
6931 		(void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state);
6932 		if (mdi_prop_lookup_string(npip, "path-class", &pclass)
6933 		    != MDI_SUCCESS) {
6934 			pip = npip;
6935 			sps = mdi_select_path(cdip, NULL,
6936 			    (MDI_SELECT_ONLINE_PATH |
6937 			    MDI_SELECT_STANDBY_PATH |
6938 			    MDI_SELECT_USER_DISABLE_PATH),
6939 			    pip, &npip);
6940 			mdi_rele_path(pip);
6941 			continue;
6942 		}
6943 		if (strcmp(pclass, pclass2) == 0) {
6944 			if (pi_state == MDI_PATHINFO_STATE_STANDBY) {
6945 				svp = (scsi_vhci_priv_t *)
6946 				    mdi_pi_get_vhci_private(npip);
6947 				VHCI_DEBUG(1, (CE_NOTE, NULL,
6948 				    "!vhci_failover(8)(%s): "
6949 				    "pinging path 0x%p\n",
6950 				    guid, (void *)npip));
6951 				if (sfo->sfo_path_ping(svp->svp_psd,
6952 				    vlun->svl_fops_ctpriv) == 1) {
6953 					mdi_pi_set_state(npip,
6954 					    MDI_PATHINFO_STATE_ONLINE);
6955 					VHCI_DEBUG(1, (CE_NOTE, NULL,
6956 					    "!vhci_failover(9)(%s): "
6957 					    "path 0x%p ping successful, "
6958 					    "marked online\n", guid,
6959 					    (void *)npip));
6960 					MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO);
6961 				}
6962 			}
6963 		} else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass)
6964 		    == 0)) {
6965 			if (pi_state == MDI_PATHINFO_STATE_ONLINE) {
6966 				mdi_pi_set_state(npip,
6967 				    MDI_PATHINFO_STATE_STANDBY);
6968 				VHCI_DEBUG(1, (CE_NOTE, NULL,
6969 				    "!vhci_failover(10)(%s): path 0x%p marked "
6970 				    "STANDBY\n", guid, (void *)npip));
6971 				MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM);
6972 			}
6973 		}
6974 		(void) mdi_prop_free(pclass);
6975 		pip = npip;
6976 		sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
6977 		    MDI_SELECT_STANDBY_PATH|MDI_SELECT_USER_DISABLE_PATH),
6978 		    pip, &npip);
6979 		mdi_rele_path(pip);
6980 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
6981 
6982 	/*
6983 	 * Update the AccessState of related MP-API TPGs
6984 	 */
6985 	(void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
6986 
6987 	vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully "
6988 	    "for device %s (GUID %s): failed over from %s to %s",
6989 	    ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" :
6990 	    s_pclass), pclass2);
6991 	ptr1 = kmem_alloc(strlen(pclass2)+1, KM_SLEEP);
6992 	(void) strlcpy(ptr1, pclass2, (strlen(pclass2)+1));
6993 	mutex_enter(&vlun->svl_mutex);
6994 	ptr2 = vlun->svl_active_pclass;
6995 	vlun->svl_active_pclass = ptr1;
6996 	mutex_exit(&vlun->svl_mutex);
6997 	if (ptr2) {
6998 		kmem_free(ptr2, strlen(ptr2)+1);
6999 	}
7000 	mutex_enter(&vhci->vhci_mutex);
7001 	scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
7002 	    &vhci->vhci_reset_notify_listf);
7003 	/* All reservations are cleared upon these resets. */
7004 	vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
7005 	mutex_exit(&vhci->vhci_mutex);
7006 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active "
7007 	    "pathclass for %s is now %s\n", guid, pclass2));
7008 	retval = MDI_SUCCESS;
7009 
7010 done:
7011 	if (flags == MDI_FAILOVER_ASYNC) {
7012 		VHCI_RELEASE_LUN(vlun);
7013 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
7014 		    "releasing lun, as failover was ASYNC\n"));
7015 	} else {
7016 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
7017 		    "NOT releasing lun, as failover was SYNC\n"));
7018 	}
7019 	return (retval);
7020 }
7021 
7022 /*
7023  * vhci_client_attached is called after the successful attach of a
7024  * client devinfo node.
7025  */
7026 static void
7027 vhci_client_attached(dev_info_t *cdip)
7028 {
7029 	mdi_pathinfo_t	*pip;
7030 	int		circular;
7031 
7032 	/*
7033 	 * At this point the client has attached and it's instance number is
7034 	 * valid, so we can set up kstats.  We need to do this here because it
7035 	 * is possible for paths to go online prior to client attach, in which
7036 	 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online
7037 	 * was a noop.
7038 	 */
7039 	ndi_devi_enter(cdip, &circular);
7040 	for (pip = mdi_get_next_phci_path(cdip, NULL); pip;
7041 	    pip = mdi_get_next_phci_path(cdip, pip))
7042 		vhci_kstat_create_pathinfo(pip);
7043 	ndi_devi_exit(cdip, circular);
7044 }
7045 
7046 /*
7047  * quiesce all of the online paths
7048  */
7049 static int
7050 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun,
7051 	char *guid, char *active_pclass_ptr)
7052 {
7053 	scsi_vhci_priv_t	*svp;
7054 	char			*s_pclass = NULL;
7055 	mdi_pathinfo_t		*npip, *pip;
7056 	int			sps;
7057 
7058 	/* quiesce currently active paths */
7059 	s_pclass = NULL;
7060 	pip = npip = NULL;
7061 	sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip);
7062 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
7063 		return (1);
7064 	}
7065 	do {
7066 		if (mdi_prop_lookup_string(npip, "path-class",
7067 		    &s_pclass) != MDI_SUCCESS) {
7068 			mdi_rele_path(npip);
7069 			vhci_log(CE_NOTE, vdip, "!Failover operation failed "
7070 			    "for device %s (GUID %s) due to an internal "
7071 			    "error", ddi_node_name(cdip), guid);
7072 			return (1);
7073 		}
7074 		if (strcmp(s_pclass, active_pclass_ptr) == 0) {
7075 			/*
7076 			 * quiesce path. Free s_pclass since
7077 			 * we don't need it anymore
7078 			 */
7079 			VHCI_DEBUG(1, (CE_NOTE, NULL,
7080 			    "!vhci_failover(2)(%s): failing over "
7081 			    "from %s; quiescing path %p\n",
7082 			    guid, s_pclass, (void *)npip));
7083 			(void) mdi_prop_free(s_pclass);
7084 			svp = (scsi_vhci_priv_t *)
7085 			    mdi_pi_get_vhci_private(npip);
7086 			if (svp == NULL) {
7087 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7088 				    "!vhci_failover(2.5)(%s): no "
7089 				    "client priv! %p offlined?\n",
7090 				    guid, (void *)npip));
7091 				pip = npip;
7092 				sps = mdi_select_path(cdip, NULL,
7093 				    MDI_SELECT_ONLINE_PATH, pip, &npip);
7094 				mdi_rele_path(pip);
7095 				continue;
7096 			}
7097 			if (scsi_abort(&svp->svp_psd->sd_address, NULL)
7098 			    == 0) {
7099 				(void) vhci_recovery_reset(vlun,
7100 				    &svp->svp_psd->sd_address, FALSE,
7101 				    VHCI_DEPTH_TARGET);
7102 			}
7103 			mutex_enter(&svp->svp_mutex);
7104 			if (svp->svp_cmds == 0) {
7105 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7106 				    "!vhci_failover(3)(%s):"
7107 				    "quiesced path %p\n", guid, (void *)npip));
7108 			} else {
7109 				while (svp->svp_cmds != 0) {
7110 					cv_wait(&svp->svp_cv, &svp->svp_mutex);
7111 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7112 					    "!vhci_failover(3.cv)(%s):"
7113 					    "quiesced path %p\n", guid,
7114 					    (void *)npip));
7115 				}
7116 			}
7117 			mutex_exit(&svp->svp_mutex);
7118 		} else {
7119 			/*
7120 			 * make sure we freeup the memory
7121 			 */
7122 			(void) mdi_prop_free(s_pclass);
7123 		}
7124 		pip = npip;
7125 		sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH,
7126 		    pip, &npip);
7127 		mdi_rele_path(pip);
7128 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
7129 	return (0);
7130 }
7131 
7132 static struct scsi_vhci_lun *
7133 vhci_lun_lookup(dev_info_t *tgt_dip)
7134 {
7135 	return ((struct scsi_vhci_lun *)
7136 	    mdi_client_get_vhci_private(tgt_dip));
7137 }
7138 
7139 static struct scsi_vhci_lun *
7140 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc)
7141 {
7142 	struct scsi_vhci_lun *svl;
7143 
7144 	if (svl = vhci_lun_lookup(tgt_dip)) {
7145 		return (svl);
7146 	}
7147 
7148 	svl = kmem_zalloc(sizeof (*svl), KM_SLEEP);
7149 	svl->svl_lun_wwn = kmem_zalloc(strlen(guid)+1, KM_SLEEP);
7150 	(void) strcpy(svl->svl_lun_wwn,  guid);
7151 	mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL);
7152 	cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL);
7153 	sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL);
7154 	svl->svl_waiting_for_activepath = 1;
7155 	svl->svl_sector_size = 1;
7156 	mdi_client_set_vhci_private(tgt_dip, svl);
7157 	*didalloc = 1;
7158 	VHCI_DEBUG(1, (CE_NOTE, NULL,
7159 	    "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n",
7160 	    guid, (void *)svl));
7161 	return (svl);
7162 }
7163 
7164 static void
7165 vhci_lun_free(dev_info_t *tgt_dip)
7166 {
7167 	struct scsi_vhci_lun *dvlp;
7168 	char *guid;
7169 	struct scsi_device *sd;
7170 
7171 	/*
7172 	 * The scsi_device was set to driver private during child node
7173 	 * initialization in the scsi_hba_bus_ctl().
7174 	 */
7175 	sd = (struct scsi_device *)ddi_get_driver_private(tgt_dip);
7176 
7177 	dvlp = (struct scsi_vhci_lun *)
7178 	    mdi_client_get_vhci_private(tgt_dip);
7179 	ASSERT(dvlp != NULL);
7180 
7181 	mdi_client_set_vhci_private(tgt_dip, NULL);
7182 
7183 	guid = dvlp->svl_lun_wwn;
7184 	ASSERT(guid != NULL);
7185 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid));
7186 
7187 	mutex_enter(&dvlp->svl_mutex);
7188 	if (dvlp->svl_active_pclass != NULL) {
7189 		kmem_free(dvlp->svl_active_pclass,
7190 		    strlen(dvlp->svl_active_pclass)+1);
7191 	}
7192 	dvlp->svl_active_pclass = NULL;
7193 	mutex_exit(&dvlp->svl_mutex);
7194 
7195 	if (dvlp->svl_lun_wwn != NULL) {
7196 		kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn)+1);
7197 	}
7198 	dvlp->svl_lun_wwn = NULL;
7199 
7200 	if (dvlp->svl_fops_name) {
7201 		kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name)+1);
7202 	}
7203 	dvlp->svl_fops_name = NULL;
7204 
7205 	if (dvlp->svl_fops_ctpriv != NULL &&
7206 	    dvlp->svl_fops != NULL) {
7207 		dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv);
7208 	}
7209 
7210 	if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG)
7211 		taskq_destroy(dvlp->svl_taskq);
7212 
7213 	mutex_destroy(&dvlp->svl_mutex);
7214 	cv_destroy(&dvlp->svl_cv);
7215 	sema_destroy(&dvlp->svl_pgr_sema);
7216 	kmem_free(dvlp, sizeof (*dvlp));
7217 	/*
7218 	 * vhci_lun_free may be called before the tgt_dip
7219 	 * initialization so check if the sd is NULL.
7220 	 */
7221 	if (sd != NULL)
7222 		scsi_device_hba_private_set(sd, NULL);
7223 }
7224 
7225 int
7226 vhci_do_scsi_cmd(struct scsi_pkt *pkt)
7227 {
7228 	int	err = 0;
7229 	int	retry_cnt = 0;
7230 	uint8_t	*sns, skey;
7231 
7232 #ifdef DEBUG
7233 	if (vhci_debug > 5) {
7234 		vhci_print_cdb(pkt->pkt_address.a_hba_tran->tran_hba_dip,
7235 		    CE_WARN, "Vhci command", pkt->pkt_cdbp);
7236 	}
7237 #endif
7238 
7239 retry:
7240 	err = scsi_poll(pkt);
7241 	if (err) {
7242 		if (pkt->pkt_cdbp[0] == SCMD_RELEASE) {
7243 			if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) {
7244 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7245 				    "!v_s_do_s_c: RELEASE conflict\n"));
7246 				return (0);
7247 			}
7248 		}
7249 		if (retry_cnt++ < 3) {
7250 			VHCI_DEBUG(1, (CE_WARN, NULL,
7251 			    "!v_s_do_s_c:retry packet 0x%p "
7252 			    "status 0x%x reason %s",
7253 			    (void *)pkt, SCBP_C(pkt),
7254 			    scsi_rname(pkt->pkt_reason)));
7255 			if ((pkt->pkt_reason == CMD_CMPLT) &&
7256 			    (SCBP_C(pkt) == STATUS_CHECK) &&
7257 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
7258 				sns = (uint8_t *)
7259 				    &(((struct scsi_arq_status *)(uintptr_t)
7260 				    (pkt->pkt_scbp))->sts_sensedata);
7261 				skey = scsi_sense_key(sns);
7262 				VHCI_DEBUG(1, (CE_WARN, NULL,
7263 				    "!v_s_do_s_c:retry "
7264 				    "packet 0x%p  sense data %s", (void *)pkt,
7265 				    scsi_sname(skey)));
7266 			}
7267 			goto retry;
7268 		}
7269 		VHCI_DEBUG(1, (CE_WARN, NULL,
7270 		    "!v_s_do_s_c: failed transport 0x%p 0x%x",
7271 		    (void *)pkt, SCBP_C(pkt)));
7272 		return (0);
7273 	}
7274 
7275 	switch (pkt->pkt_reason) {
7276 		case CMD_TIMEOUT:
7277 			VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed "
7278 			    "out (pkt 0x%p)", (void *)pkt));
7279 			return (0);
7280 		case CMD_CMPLT:
7281 			switch (SCBP_C(pkt)) {
7282 				case STATUS_GOOD:
7283 					break;
7284 				case STATUS_CHECK:
7285 					if (pkt->pkt_state & STATE_ARQ_DONE) {
7286 						sns = (uint8_t *)&(((
7287 						    struct scsi_arq_status *)
7288 						    (uintptr_t)
7289 						    (pkt->pkt_scbp))->
7290 						    sts_sensedata);
7291 						skey = scsi_sense_key(sns);
7292 						if ((skey ==
7293 						    KEY_UNIT_ATTENTION) ||
7294 						    (skey ==
7295 						    KEY_NOT_READY)) {
7296 							/*
7297 							 * clear unit attn.
7298 							 */
7299 
7300 							VHCI_DEBUG(1,
7301 							    (CE_WARN, NULL,
7302 							    "!v_s_do_s_c: "
7303 							    "retry "
7304 							    "packet 0x%p sense "
7305 							    "data %s",
7306 							    (void *)pkt,
7307 							    scsi_sname
7308 							    (skey)));
7309 							goto retry;
7310 						}
7311 						VHCI_DEBUG(4, (CE_WARN, NULL,
7312 						    "!ARQ while "
7313 						    "transporting "
7314 						    "(pkt 0x%p)",
7315 						    (void *)pkt));
7316 						return (0);
7317 					}
7318 					return (0);
7319 				default:
7320 					VHCI_DEBUG(1, (CE_WARN, NULL,
7321 					    "!Bad status returned "
7322 					    "(pkt 0x%p, status %x)",
7323 					    (void *)pkt, SCBP_C(pkt)));
7324 					return (0);
7325 			}
7326 			break;
7327 		case CMD_INCOMPLETE:
7328 		case CMD_RESET:
7329 		case CMD_ABORTED:
7330 		case CMD_TRAN_ERR:
7331 			if (retry_cnt++ < 1) {
7332 				VHCI_DEBUG(1, (CE_WARN, NULL,
7333 				    "!v_s_do_s_c: retry packet 0x%p %s",
7334 				    (void *)pkt, scsi_rname(pkt->pkt_reason)));
7335 				goto retry;
7336 			}
7337 			/* FALLTHROUGH */
7338 		default:
7339 			VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not "
7340 			    "complete successfully (pkt 0x%p,"
7341 			    "reason %x)", (void *)pkt, pkt->pkt_reason));
7342 			return (0);
7343 	}
7344 	return (1);
7345 }
7346 
7347 static int
7348 vhci_quiesce_lun(struct scsi_vhci_lun *vlun)
7349 {
7350 	mdi_pathinfo_t		*pip, *spip;
7351 	dev_info_t		*cdip;
7352 	struct scsi_vhci_priv	*svp;
7353 	mdi_pathinfo_state_t	pstate;
7354 	uint32_t		p_ext_state;
7355 	int			circular;
7356 
7357 	cdip = vlun->svl_dip;
7358 	pip = spip = NULL;
7359 	ndi_devi_enter(cdip, &circular);
7360 	pip = mdi_get_next_phci_path(cdip, NULL);
7361 	while (pip != NULL) {
7362 		(void) mdi_pi_get_state2(pip, &pstate, &p_ext_state);
7363 		if (pstate != MDI_PATHINFO_STATE_ONLINE) {
7364 			spip = pip;
7365 			pip = mdi_get_next_phci_path(cdip, spip);
7366 			continue;
7367 		}
7368 		mdi_hold_path(pip);
7369 		ndi_devi_exit(cdip, circular);
7370 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
7371 		mutex_enter(&svp->svp_mutex);
7372 		while (svp->svp_cmds != 0) {
7373 			if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
7374 			    drv_usectohz(vhci_path_quiesce_timeout * 1000000),
7375 			    TR_CLOCK_TICK) == -1) {
7376 				mutex_exit(&svp->svp_mutex);
7377 				mdi_rele_path(pip);
7378 				VHCI_DEBUG(1, (CE_WARN, NULL,
7379 				    "Quiesce of lun is not successful "
7380 				    "vlun: 0x%p.", (void *)vlun));
7381 				return (0);
7382 			}
7383 		}
7384 		mutex_exit(&svp->svp_mutex);
7385 		ndi_devi_enter(cdip, &circular);
7386 		spip = pip;
7387 		pip = mdi_get_next_phci_path(cdip, spip);
7388 		mdi_rele_path(spip);
7389 	}
7390 	ndi_devi_exit(cdip, circular);
7391 	return (1);
7392 }
7393 
7394 static int
7395 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp)
7396 {
7397 	scsi_vhci_lun_t		*vlun;
7398 	vhci_prout_t		*prout;
7399 	int			rval, success;
7400 	mdi_pathinfo_t		*pip, *npip;
7401 	scsi_vhci_priv_t	*osvp;
7402 	dev_info_t		*cdip;
7403 	uchar_t			cdb_1;
7404 	uchar_t			temp_res_key[MHIOC_RESV_KEY_SIZE];
7405 
7406 
7407 	/*
7408 	 * see if there are any other paths available; if none,
7409 	 * then there is nothing to do.
7410 	 */
7411 	cdip = svp->svp_svl->svl_dip;
7412 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7413 	    MDI_SELECT_STANDBY_PATH, NULL, &pip);
7414 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7415 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7416 		    "%s%d: vhci_pgr_validate_and_register: first path\n",
7417 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7418 		return (1);
7419 	}
7420 
7421 	vlun = svp->svp_svl;
7422 	prout = &vlun->svl_prout;
7423 	ASSERT(vlun->svl_pgr_active != 0);
7424 
7425 	/*
7426 	 * When the path was busy/offlined, some other host might have
7427 	 * cleared this key. Validate key on some other path first.
7428 	 * If it fails, return failure.
7429 	 */
7430 
7431 	npip = pip;
7432 	pip = NULL;
7433 	success = 0;
7434 
7435 	/* Save the res key */
7436 	bcopy(prout->res_key, temp_res_key, MHIOC_RESV_KEY_SIZE);
7437 
7438 	/*
7439 	 * Sometimes CDB from application can be a Register_And_Ignore.
7440 	 * Instead of validation, this cdb would result in force registration.
7441 	 * Convert it to normal cdb for validation.
7442 	 * After that be sure to restore the cdb.
7443 	 */
7444 	cdb_1 = vlun->svl_cdb[1];
7445 	vlun->svl_cdb[1] &= 0xe0;
7446 
7447 	do {
7448 		osvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
7449 		if (osvp == NULL) {
7450 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7451 			    "vhci_pgr_validate_and_register: no "
7452 			    "client priv! 0x%p offlined?\n",
7453 			    (void *)npip));
7454 			goto next_path_1;
7455 		}
7456 
7457 		if (osvp == svp) {
7458 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7459 			    "vhci_pgr_validate_and_register: same svp 0x%p"
7460 			    " npip 0x%p vlun 0x%p\n",
7461 			    (void *)svp, (void *)npip, (void *)vlun));
7462 			goto next_path_1;
7463 		}
7464 
7465 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7466 		    "vhci_pgr_validate_and_register: First validate on"
7467 		    " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy"
7468 		    " cdb1 %x\n", (void *)osvp, (void *)vlun,
7469 		    (void *)curthread, vlun->svl_cdb[1]));
7470 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:");
7471 
7472 		bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7473 
7474 		VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy",
7475 		    (void *)vlun));
7476 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7477 
7478 		rval = vhci_do_prout(osvp);
7479 		if (rval == 1) {
7480 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7481 			    "%s%d: vhci_pgr_validate_and_register: key"
7482 			    " validated thread 0x%p\n", ddi_driver_name(cdip),
7483 			    ddi_get_instance(cdip), (void *)curthread));
7484 			pip = npip;
7485 			success = 1;
7486 			break;
7487 		} else {
7488 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7489 			    "vhci_pgr_validate_and_register: First validation"
7490 			    " on osvp 0x%p failed %x\n", (void *)osvp, rval));
7491 			vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:");
7492 		}
7493 
7494 		/*
7495 		 * Try other paths
7496 		 */
7497 next_path_1:
7498 		pip = npip;
7499 		rval = mdi_select_path(cdip, NULL,
7500 		    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
7501 		    pip, &npip);
7502 		mdi_rele_path(pip);
7503 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
7504 
7505 
7506 	/* Be sure to restore original cdb */
7507 	vlun->svl_cdb[1] = cdb_1;
7508 
7509 	/* Restore the res_key */
7510 	bcopy(temp_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7511 
7512 	/*
7513 	 * If key could not be registered on any path for the first time,
7514 	 * return success as online should still continue.
7515 	 */
7516 	if (success == 0) {
7517 		return (1);
7518 	}
7519 
7520 	ASSERT(pip != NULL);
7521 
7522 	/*
7523 	 * Force register on new path
7524 	 */
7525 	cdb_1 = vlun->svl_cdb[1];		/* store the cdb */
7526 
7527 	vlun->svl_cdb[1] &= 0xe0;
7528 	vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
7529 
7530 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: ");
7531 
7532 	bcopy(prout->active_service_key, prout->service_key,
7533 	    MHIOC_RESV_KEY_SIZE);
7534 	bcopy(prout->active_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7535 
7536 	vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: ");
7537 
7538 	rval = vhci_do_prout(svp);
7539 	vlun->svl_cdb[1] = cdb_1;		/* restore the cdb */
7540 	if (rval != 1) {
7541 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7542 		    "vhci_pgr_validate_and_register: register on new"
7543 		    " path 0x%p svp 0x%p failed %x\n",
7544 		    (void *)pip, (void *)svp, rval));
7545 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: ");
7546 		mdi_rele_path(pip);
7547 		return (0);
7548 	}
7549 
7550 	if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) {
7551 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7552 		    "vhci_pgr_validate_and_register: zero service key\n"));
7553 		mdi_rele_path(pip);
7554 		return (rval);
7555 	}
7556 
7557 	/*
7558 	 * While the key was force registered, some other host might have
7559 	 * cleared the key. Re-validate key on another pre-existing path
7560 	 * before declaring success.
7561 	 */
7562 	npip = pip;
7563 	pip = NULL;
7564 
7565 	/*
7566 	 * Sometimes CDB from application can be Register and Ignore.
7567 	 * Instead of validation, it would result in force registration.
7568 	 * Convert it to normal cdb for validation.
7569 	 * After that be sure to restore the cdb.
7570 	 */
7571 	cdb_1 = vlun->svl_cdb[1];
7572 	vlun->svl_cdb[1] &= 0xe0;
7573 	success = 0;
7574 
7575 	do {
7576 		osvp = (scsi_vhci_priv_t *)
7577 		    mdi_pi_get_vhci_private(npip);
7578 		if (osvp == NULL) {
7579 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7580 			    "vhci_pgr_validate_and_register: no "
7581 			    "client priv! 0x%p offlined?\n",
7582 			    (void *)npip));
7583 			goto next_path_2;
7584 		}
7585 
7586 		if (osvp == svp) {
7587 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7588 			    "vhci_pgr_validate_and_register: same osvp 0x%p"
7589 			    " npip 0x%p vlun 0x%p\n",
7590 			    (void *)svp, (void *)npip, (void *)vlun));
7591 			goto next_path_2;
7592 		}
7593 
7594 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7595 		    "vhci_pgr_validate_and_register: Re-validation on"
7596 		    " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n",
7597 		    (void *)osvp, (void *)vlun, vlun->svl_cdb[1]));
7598 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7599 
7600 		bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7601 
7602 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7603 
7604 		rval = vhci_do_prout(osvp);
7605 		if (rval == 1) {
7606 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7607 			    "%s%d: vhci_pgr_validate_and_register: key"
7608 			    " validated thread 0x%p\n", ddi_driver_name(cdip),
7609 			    ddi_get_instance(cdip), (void *)curthread));
7610 			pip = npip;
7611 			success = 1;
7612 			break;
7613 		} else {
7614 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7615 			    "vhci_pgr_validate_and_register: Re-validation on"
7616 			    " osvp 0x%p failed %x\n", (void *)osvp, rval));
7617 			vhci_print_prout_keys(vlun,
7618 			    "v_pgr_val_reg: reval failed: ");
7619 		}
7620 
7621 		/*
7622 		 * Try other paths
7623 		 */
7624 next_path_2:
7625 		pip = npip;
7626 		rval = mdi_select_path(cdip, NULL,
7627 		    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
7628 		    pip, &npip);
7629 		mdi_rele_path(pip);
7630 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
7631 
7632 	/* Be sure to restore original cdb */
7633 	vlun->svl_cdb[1] = cdb_1;
7634 
7635 	if (success == 1) {
7636 		/* Successfully validated registration */
7637 		mdi_rele_path(pip);
7638 		return (1);
7639 	}
7640 
7641 	VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed"));
7642 
7643 	/*
7644 	 * key invalid, back out by registering key value of 0
7645 	 */
7646 	VHCI_DEBUG(4, (CE_NOTE, NULL,
7647 	    "vhci_pgr_validate_and_register: backout on"
7648 	    " svp 0x%p being done\n", (void *)svp));
7649 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7650 
7651 	bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7652 	bzero(prout->service_key, MHIOC_RESV_KEY_SIZE);
7653 
7654 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7655 
7656 	/*
7657 	 * Get a new path
7658 	 */
7659 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7660 	    MDI_SELECT_STANDBY_PATH, NULL, &pip);
7661 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7662 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7663 		    "%s%d: vhci_pgr_validate_and_register: no valid pip\n",
7664 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7665 		return (0);
7666 	}
7667 
7668 	if ((rval = vhci_do_prout(svp)) != 1) {
7669 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7670 		    "vhci_pgr_validate_and_register: backout on"
7671 		    " svp 0x%p failed\n", (void *)svp));
7672 		vhci_print_prout_keys(vlun, "backout failed");
7673 
7674 		VHCI_DEBUG(4, (CE_WARN, NULL,
7675 		    "%s%d: vhci_pgr_validate_and_register: key"
7676 		    " validation and backout failed", ddi_driver_name(cdip),
7677 		    ddi_get_instance(cdip)));
7678 		if (rval == VHCI_PGR_ILLEGALOP) {
7679 			VHCI_DEBUG(4, (CE_WARN, NULL,
7680 			    "%s%d: vhci_pgr_validate_and_register: key"
7681 			    " already cleared", ddi_driver_name(cdip),
7682 			    ddi_get_instance(cdip)));
7683 			rval = 1;
7684 		} else
7685 			rval = 0;
7686 	} else {
7687 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7688 		    "%s%d: vhci_pgr_validate_and_register: key"
7689 		    " validation failed, key backed out\n",
7690 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7691 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: ");
7692 	}
7693 	mdi_rele_path(pip);
7694 
7695 	return (rval);
7696 }
7697 
7698 /*
7699  * taskq routine to dispatch a scsi cmd to vhci_scsi_start.  This ensures
7700  * that vhci_scsi_start is not called in interrupt context.
7701  * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we
7702  * need to complete the command if something goes wrong.
7703  */
7704 static void
7705 vhci_dispatch_scsi_start(void *arg)
7706 {
7707 	struct vhci_pkt *vpkt	= (struct vhci_pkt *)arg;
7708 	struct scsi_pkt *tpkt	= vpkt->vpkt_tgt_pkt;
7709 	int rval		= TRAN_BUSY;
7710 
7711 	VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending"
7712 	    " scsi-2 reserve for 0x%p\n",
7713 	    (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7714 
7715 	/*
7716 	 * To prevent the taskq from being called recursively we set the
7717 	 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states.
7718 	 */
7719 	vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ;
7720 
7721 	/*
7722 	 * Wait for the transport to get ready to send packets
7723 	 * and if it times out, it will return something other than
7724 	 * TRAN_BUSY. The vhci_reserve_delay may want to
7725 	 * get tuned for other transports and is therefore a global.
7726 	 * Using delay since this routine is called by taskq dispatch
7727 	 * and not called during interrupt context.
7728 	 */
7729 	while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address),
7730 	    vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) {
7731 		delay(drv_usectohz(vhci_reserve_delay));
7732 	}
7733 
7734 	switch (rval) {
7735 	case TRAN_ACCEPT:
7736 		return;
7737 
7738 	default:
7739 		/*
7740 		 * This pkt shall be retried, and to ensure another taskq
7741 		 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ
7742 		 * flag.
7743 		 */
7744 		vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
7745 
7746 		/* Ensure that the pkt is retried without a reset */
7747 		tpkt->pkt_reason = CMD_ABORTED;
7748 		tpkt->pkt_statistics |= STAT_ABORTED;
7749 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: "
7750 		    "TRAN_rval %d returned for dip 0x%p", rval,
7751 		    (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7752 		break;
7753 	}
7754 
7755 	/*
7756 	 * vpkt_org_vpkt should always be NULL here if the retry command
7757 	 * has been successfully dispatched.  If vpkt_org_vpkt != NULL at
7758 	 * this point, it is an error so restore the original vpkt and
7759 	 * return an error to the target driver so it can retry the
7760 	 * command as appropriate.
7761 	 */
7762 	if (vpkt->vpkt_org_vpkt != NULL) {
7763 		struct vhci_pkt		*new_vpkt = vpkt;
7764 		scsi_vhci_priv_t	*svp = (scsi_vhci_priv_t *)
7765 		    mdi_pi_get_vhci_private(vpkt->vpkt_path);
7766 
7767 		vpkt = vpkt->vpkt_org_vpkt;
7768 
7769 		vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason;
7770 		vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics;
7771 
7772 		vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
7773 		    new_vpkt->vpkt_tgt_pkt);
7774 
7775 		tpkt = vpkt->vpkt_tgt_pkt;
7776 	}
7777 
7778 	scsi_hba_pkt_comp(tpkt);
7779 }
7780 
7781 static void
7782 vhci_initiate_auto_failback(void *arg)
7783 {
7784 	struct scsi_vhci_lun	*vlun = (struct scsi_vhci_lun *)arg;
7785 	dev_info_t		*vdip, *cdip;
7786 	int			held;
7787 
7788 	cdip = vlun->svl_dip;
7789 	vdip = ddi_get_parent(cdip);
7790 
7791 	VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
7792 
7793 	/*
7794 	 * Perform a final check to see if the active path class is indeed
7795 	 * not the preferred path class.  As in the time the auto failback
7796 	 * was dispatched, an external failover could have been detected.
7797 	 * [Some other host could have detected this condition and triggered
7798 	 *  the auto failback before].
7799 	 * In such a case if we go ahead with failover we will be negating the
7800 	 * whole purpose of auto failback.
7801 	 */
7802 	mutex_enter(&vlun->svl_mutex);
7803 	if (vlun->svl_active_pclass != NULL) {
7804 		char				*best_pclass;
7805 		struct scsi_failover_ops	*fo;
7806 
7807 		fo = vlun->svl_fops;
7808 
7809 		(void) fo->sfo_pathclass_next(NULL, &best_pclass,
7810 		    vlun->svl_fops_ctpriv);
7811 		if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) {
7812 			mutex_exit(&vlun->svl_mutex);
7813 			VHCI_RELEASE_LUN(vlun);
7814 			VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating "
7815 			    "auto failback for %s as %s pathclass already "
7816 			    "active.\n", vlun->svl_lun_wwn, best_pclass));
7817 			return;
7818 		}
7819 	}
7820 	mutex_exit(&vlun->svl_mutex);
7821 	if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC)
7822 	    == MDI_SUCCESS) {
7823 		vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7824 		    "succeeded for device %s (GUID %s)",
7825 		    ddi_node_name(cdip), vlun->svl_lun_wwn);
7826 	} else {
7827 		vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7828 		    "failed for device %s (GUID %s)",
7829 		    ddi_node_name(cdip), vlun->svl_lun_wwn);
7830 	}
7831 	VHCI_RELEASE_LUN(vlun);
7832 }
7833 
7834 #ifdef DEBUG
7835 static void
7836 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys)
7837 {
7838 	vhci_clean_print(NULL, 5, "Current PGR Keys",
7839 	    (uchar_t *)prin, numkeys * 8);
7840 }
7841 #endif
7842 
7843 static void
7844 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg)
7845 {
7846 	int			i;
7847 	vhci_prout_t		*prout;
7848 	char			buf1[4*MHIOC_RESV_KEY_SIZE + 1];
7849 	char			buf2[4*MHIOC_RESV_KEY_SIZE + 1];
7850 	char			buf3[4*MHIOC_RESV_KEY_SIZE + 1];
7851 	char			buf4[4*MHIOC_RESV_KEY_SIZE + 1];
7852 
7853 	prout = &vlun->svl_prout;
7854 
7855 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7856 		(void) sprintf(&buf1[4*i], "[%02x]", prout->res_key[i]);
7857 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7858 		(void) sprintf(&buf2[(4*i)], "[%02x]", prout->service_key[i]);
7859 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7860 		(void) sprintf(&buf3[4*i], "[%02x]", prout->active_res_key[i]);
7861 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7862 		(void) sprintf(&buf4[4*i], "[%02x]",
7863 		    prout->active_service_key[i]);
7864 
7865 	/* Printing all in one go. Otherwise it will jumble up */
7866 	VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n"
7867 	    "res_key:          : %s\n"
7868 	    "service_key       : %s\n"
7869 	    "active_res_key    : %s\n"
7870 	    "active_service_key: %s\n",
7871 	    msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4));
7872 }
7873 
7874 /*
7875  * Called from vhci_scsi_start to update the pHCI pkt with target packet.
7876  */
7877 static void
7878 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt)
7879 {
7880 
7881 	ASSERT(vpkt->vpkt_hba_pkt);
7882 
7883 	vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags;
7884 	vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE;
7885 
7886 	if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) ||
7887 	    MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) {
7888 		/*
7889 		 * Polled Command is requested or HBA is in
7890 		 * suspended state
7891 		 */
7892 		vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR;
7893 		vpkt->vpkt_hba_pkt->pkt_comp = NULL;
7894 	} else {
7895 		vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr;
7896 	}
7897 	vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time;
7898 	bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp,
7899 	    vpkt->vpkt_tgt_init_cdblen);
7900 	vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid;
7901 
7902 	/* Re-initialize the following pHCI packet state information */
7903 	vpkt->vpkt_hba_pkt->pkt_state = 0;
7904 	vpkt->vpkt_hba_pkt->pkt_statistics = 0;
7905 	vpkt->vpkt_hba_pkt->pkt_reason = 0;
7906 }
7907 
7908 static int
7909 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op,
7910     void *arg, void *result)
7911 {
7912 	int ret = DDI_SUCCESS;
7913 
7914 	/*
7915 	 * Generic processing in MPxIO framework
7916 	 */
7917 	ret = mdi_bus_power(parent, impl_arg, op, arg, result);
7918 
7919 	switch (ret) {
7920 	case MDI_SUCCESS:
7921 		ret = DDI_SUCCESS;
7922 		break;
7923 	case MDI_FAILURE:
7924 		ret = DDI_FAILURE;
7925 		break;
7926 	default:
7927 		break;
7928 	}
7929 
7930 	return (ret);
7931 }
7932 
7933 static int
7934 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
7935     mdi_pathinfo_t *pip)
7936 {
7937 	dev_info_t		*cdip;
7938 	mdi_pathinfo_t		*npip = NULL;
7939 	scsi_vhci_priv_t	*svp = NULL;
7940 	struct scsi_address	*pap = NULL;
7941 	scsi_hba_tran_t		*hba = NULL;
7942 	int			sps;
7943 	int			mps_flag;
7944 	int			rval = 0;
7945 
7946 	mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH);
7947 	if (pip) {
7948 		/*
7949 		 * If the call is from vhci_pathinfo_state_change,
7950 		 * then this path was busy and is becoming ready to accept IO.
7951 		 */
7952 		ASSERT(ap != NULL);
7953 		hba = ap->a_hba_tran;
7954 		ASSERT(hba != NULL);
7955 		rval = scsi_ifsetcap(ap, cap, val, whom);
7956 
7957 		VHCI_DEBUG(2, (CE_NOTE, NULL,
7958 		    "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n",
7959 		    (void *)pip, (void *)ap, rval));
7960 
7961 		return (rval);
7962 	}
7963 
7964 	/*
7965 	 * Set capability on all the pHCIs.
7966 	 * If any path is busy, then the capability would be set by
7967 	 * vhci_pathinfo_state_change.
7968 	 */
7969 
7970 	cdip = ADDR2DIP(ap);
7971 	ASSERT(cdip != NULL);
7972 	sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip);
7973 	if ((sps != MDI_SUCCESS) || (pip == NULL)) {
7974 		VHCI_DEBUG(2, (CE_WARN, NULL,
7975 		    "!vhci_pHCI_cap: Unable to get a path, dip 0x%p",
7976 		    (void *)cdip));
7977 		return (0);
7978 	}
7979 
7980 again:
7981 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
7982 	if (svp == NULL) {
7983 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
7984 		    "priv is NULL, pip 0x%p", (void *)pip));
7985 		mdi_rele_path(pip);
7986 		return (rval);
7987 	}
7988 
7989 	if (svp->svp_psd == NULL) {
7990 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
7991 		    "psd is NULL, pip 0x%p, svp 0x%p",
7992 		    (void *)pip, (void *)svp));
7993 		mdi_rele_path(pip);
7994 		return (rval);
7995 	}
7996 
7997 	pap = &svp->svp_psd->sd_address;
7998 	ASSERT(pap != NULL);
7999 	hba = pap->a_hba_tran;
8000 	ASSERT(hba != NULL);
8001 
8002 	if (hba->tran_setcap != NULL) {
8003 		rval = scsi_ifsetcap(pap, cap, val, whom);
8004 
8005 		VHCI_DEBUG(2, (CE_NOTE, NULL,
8006 		    "!vhci_pHCI_cap: path %p, ap %p, rval %x\n",
8007 		    (void *)pip, (void *)ap, rval));
8008 
8009 		/*
8010 		 * Select next path and issue the setcap, repeat
8011 		 * until all paths are exhausted
8012 		 */
8013 		sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip);
8014 		if ((sps != MDI_SUCCESS) || (npip == NULL)) {
8015 			mdi_rele_path(pip);
8016 			return (1);
8017 		}
8018 		mdi_rele_path(pip);
8019 		pip = npip;
8020 		goto again;
8021 	}
8022 	mdi_rele_path(pip);
8023 	return (rval);
8024 }
8025 
8026 static int
8027 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8028     void *arg, dev_info_t **child)
8029 {
8030 	char *guid;
8031 
8032 	if (vhci_bus_config_debug)
8033 		flags |= NDI_DEVI_DEBUG;
8034 
8035 	if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE)
8036 		guid = vhci_devnm_to_guid((char *)arg);
8037 	else
8038 		guid = NULL;
8039 
8040 	if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid)
8041 	    == MDI_SUCCESS)
8042 		return (NDI_SUCCESS);
8043 	else
8044 		return (NDI_FAILURE);
8045 }
8046 
8047 static int
8048 vhci_scsi_bus_unconfig(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8049     void *arg)
8050 {
8051 	if (vhci_bus_config_debug)
8052 		flags |= NDI_DEVI_DEBUG;
8053 
8054 	return (ndi_busop_bus_unconfig(pdip, flags, op, arg));
8055 }
8056 
8057 /*
8058  * Take the original vhci_pkt, create a duplicate of the pkt for resending
8059  * as though it originated in ssd.
8060  */
8061 static struct scsi_pkt *
8062 vhci_create_retry_pkt(struct vhci_pkt *vpkt)
8063 {
8064 	struct vhci_pkt *new_vpkt = NULL;
8065 	struct scsi_pkt	*pkt = NULL;
8066 
8067 	scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
8068 	    mdi_pi_get_vhci_private(vpkt->vpkt_path);
8069 
8070 	/*
8071 	 * Ensure consistent data at completion time by setting PKT_CONSISTENT
8072 	 */
8073 	pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt,
8074 	    vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
8075 	    vpkt->vpkt_tgt_init_scblen, 0, PKT_CONSISTENT, NULL_FUNC, NULL);
8076 	if (pkt != NULL) {
8077 		new_vpkt = TGTPKT2VHCIPKT(pkt);
8078 
8079 		pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address;
8080 		pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags;
8081 		pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time;
8082 		pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp;
8083 
8084 		pkt->pkt_resid = 0;
8085 		pkt->pkt_statistics = 0;
8086 		pkt->pkt_reason = 0;
8087 
8088 		bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp,
8089 		    pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen);
8090 
8091 		/*
8092 		 * Save a pointer to the original vhci_pkt
8093 		 */
8094 		new_vpkt->vpkt_org_vpkt = vpkt;
8095 	}
8096 
8097 	return (pkt);
8098 }
8099 
8100 /*
8101  * Copy the successful completion information from the hba packet into
8102  * the original target pkt from the upper layer.  Returns the original
8103  * vpkt and destroys the new vpkt from the internal retry.
8104  */
8105 static struct vhci_pkt *
8106 vhci_sync_retry_pkt(struct vhci_pkt *vpkt)
8107 {
8108 	struct vhci_pkt		*ret_vpkt = NULL;
8109 	struct scsi_pkt		*tpkt = NULL;
8110 	struct scsi_pkt		*hba_pkt = NULL;
8111 	scsi_vhci_priv_t	*svp = (scsi_vhci_priv_t *)
8112 	    mdi_pi_get_vhci_private(vpkt->vpkt_path);
8113 
8114 	ASSERT(vpkt->vpkt_org_vpkt != NULL);
8115 	VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt "
8116 	    "completed successfully!\n"));
8117 
8118 	ret_vpkt = vpkt->vpkt_org_vpkt;
8119 	tpkt = ret_vpkt->vpkt_tgt_pkt;
8120 	hba_pkt = vpkt->vpkt_hba_pkt;
8121 
8122 	/*
8123 	 * Copy the good status into the target driver's packet
8124 	 */
8125 	*(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp);
8126 	tpkt->pkt_resid = hba_pkt->pkt_resid;
8127 	tpkt->pkt_state = hba_pkt->pkt_state;
8128 	tpkt->pkt_statistics = hba_pkt->pkt_statistics;
8129 	tpkt->pkt_reason = hba_pkt->pkt_reason;
8130 
8131 	/*
8132 	 * Destroy the internally created vpkt for the retry
8133 	 */
8134 	vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
8135 	    vpkt->vpkt_tgt_pkt);
8136 
8137 	return (ret_vpkt);
8138 }
8139 
8140 /* restart the request sense request */
8141 static void
8142 vhci_uscsi_restart_sense(void *arg)
8143 {
8144 	struct buf 	*rqbp;
8145 	struct buf 	*bp;
8146 	struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg;
8147 	mp_uscsi_cmd_t 	*mp_uscmdp;
8148 
8149 	VHCI_DEBUG(4, (CE_WARN, NULL,
8150 	    "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt));
8151 
8152 	if (scsi_transport(rqpkt) != TRAN_ACCEPT) {
8153 		/* if it fails - need to wakeup the original command */
8154 		mp_uscmdp = rqpkt->pkt_private;
8155 		bp = mp_uscmdp->cmdbp;
8156 		rqbp = mp_uscmdp->rqbp;
8157 		ASSERT(mp_uscmdp && bp && rqbp);
8158 		scsi_free_consistent_buf(rqbp);
8159 		scsi_destroy_pkt(rqpkt);
8160 		bp->b_resid = bp->b_bcount;
8161 		bioerror(bp, EIO);
8162 		biodone(bp);
8163 	}
8164 }
8165 
8166 /*
8167  * auto-rqsense is not enabled so we have to retrieve the request sense
8168  * manually.
8169  */
8170 static int
8171 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp)
8172 {
8173 	struct buf 		*rqbp, *cmdbp;
8174 	struct scsi_pkt 	*rqpkt;
8175 	int			rval = 0;
8176 
8177 	cmdbp = mp_uscmdp->cmdbp;
8178 	ASSERT(cmdbp != NULL);
8179 
8180 	VHCI_DEBUG(4, (CE_WARN, NULL,
8181 	    "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p",
8182 	    (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp));
8183 	/* set up the packet information and cdb */
8184 	if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL,
8185 	    SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) {
8186 		return (-1);
8187 	}
8188 
8189 	if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp,
8190 	    CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) {
8191 		scsi_free_consistent_buf(rqbp);
8192 		return (-1);
8193 	}
8194 
8195 	(void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp,
8196 	    SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0);
8197 
8198 	mp_uscmdp->rqbp = rqbp;
8199 	rqbp->b_private = mp_uscmdp;
8200 	rqpkt->pkt_flags |= FLAG_SENSING;
8201 	rqpkt->pkt_time = 60;
8202 	rqpkt->pkt_comp = vhci_uscsi_iodone;
8203 	rqpkt->pkt_private = mp_uscmdp;
8204 
8205 	/*
8206 	 * NOTE: This code path is related to MPAPI uscsi(7I), so path
8207 	 * selection is not based on path_instance.
8208 	 */
8209 	if (scsi_pkt_allocated_correctly(rqpkt))
8210 		rqpkt->pkt_path_instance = 0;
8211 
8212 	/* get her done */
8213 	switch (scsi_transport(rqpkt)) {
8214 	case TRAN_ACCEPT:
8215 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8216 		    "transport accepted."));
8217 		break;
8218 	case TRAN_BUSY:
8219 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8220 		    "transport busy, setting timeout."));
8221 		vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt,
8222 		    (drv_usectohz(5 * 1000000)));
8223 		break;
8224 	default:
8225 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8226 		    "transport failed"));
8227 		scsi_free_consistent_buf(rqbp);
8228 		scsi_destroy_pkt(rqpkt);
8229 		rval = -1;
8230 	}
8231 
8232 	return (rval);
8233 }
8234 
8235 /*
8236  * done routine for the mpapi uscsi command - this is behaving as though
8237  * FLAG_DIAGNOSE is set meaning there are no retries except for a manual
8238  * request sense.
8239  */
8240 void
8241 vhci_uscsi_iodone(struct scsi_pkt *pkt)
8242 {
8243 	struct buf 			*bp;
8244 	mp_uscsi_cmd_t 			*mp_uscmdp;
8245 	struct uscsi_cmd 		*uscmdp;
8246 	struct scsi_arq_status 		*arqstat;
8247 	int 				err;
8248 
8249 	mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private;
8250 	uscmdp = mp_uscmdp->uscmdp;
8251 	bp = mp_uscmdp->cmdbp;
8252 	ASSERT(bp != NULL);
8253 	VHCI_DEBUG(4, (CE_WARN, NULL,
8254 	    "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p",
8255 	    (void *)bp, (void *)pkt, (void *)mp_uscmdp));
8256 	/* Save the status and the residual into the uscsi_cmd struct */
8257 	uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK);
8258 	uscmdp->uscsi_resid = bp->b_resid;
8259 
8260 	/* return on a very successful command */
8261 	if (pkt->pkt_reason == CMD_CMPLT &&
8262 	    SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) &&
8263 	    pkt->pkt_resid == 0) {
8264 		mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8265 		scsi_destroy_pkt(pkt);
8266 		biodone(bp);
8267 		return;
8268 	}
8269 	VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x "
8270 	    " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld",
8271 	    pkt->pkt_reason, pkt->pkt_resid,
8272 	    pkt->pkt_state, bp->b_bcount, bp->b_resid));
8273 
8274 	err = EIO;
8275 
8276 	arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
8277 	if (pkt->pkt_reason != CMD_CMPLT) {
8278 		/*
8279 		 * The command did not complete.
8280 		 */
8281 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8282 		    "vhci_uscsi_iodone: command did not complete."
8283 		    " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags));
8284 		if (pkt->pkt_flags & FLAG_SENSING) {
8285 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8286 		} else if (pkt->pkt_reason == CMD_TIMEOUT) {
8287 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR);
8288 			err = ETIMEDOUT;
8289 		}
8290 	} else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) {
8291 		/*
8292 		 * The auto-rqsense happened, and the packet has a filled-in
8293 		 * scsi_arq_status structure, pointed to by pkt_scbp.
8294 		 */
8295 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8296 		    "vhci_uscsi_iodone: received auto-requested sense"));
8297 		if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8298 			/* get the amount of data to copy into rqbuf */
8299 			int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid;
8300 			rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8301 			uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8302 			uscmdp->uscsi_rqstatus =
8303 			    *((char *)&arqstat->sts_rqpkt_status);
8304 			if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen &&
8305 			    rqlen != 0) {
8306 				bcopy(&(arqstat->sts_sensedata),
8307 				    uscmdp->uscsi_rqbuf, rqlen);
8308 			}
8309 			mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8310 			VHCI_DEBUG(4, (CE_NOTE, NULL,
8311 			    "vhci_uscsi_iodone: ARQ "
8312 			    "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d "
8313 			    "xfer: %d rqpkt_resid: %d\n",
8314 			    uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid,
8315 			    uscmdp->uscsi_rqlen, rqlen,
8316 			    arqstat->sts_rqpkt_resid));
8317 		}
8318 	} else if (pkt->pkt_flags & FLAG_SENSING) {
8319 		struct buf *rqbp;
8320 		struct scsi_status *rqstatus;
8321 
8322 		rqstatus = (struct scsi_status *)pkt->pkt_scbp;
8323 		/* a manual request sense was done - get the information */
8324 		if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8325 			int rqlen = SENSE_LENGTH - pkt->pkt_resid;
8326 
8327 			rqbp = mp_uscmdp->rqbp;
8328 			/* get the amount of data to copy into rqbuf */
8329 			rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8330 			uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8331 			uscmdp->uscsi_rqstatus = *((char *)rqstatus);
8332 			if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) {
8333 				bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf,
8334 				    rqlen);
8335 			}
8336 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8337 			scsi_free_consistent_buf(rqbp);
8338 		}
8339 		VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING"
8340 		    "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n",
8341 		    uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid));
8342 	} else {
8343 		struct scsi_status *status =
8344 		    (struct scsi_status *)pkt->pkt_scbp;
8345 		/*
8346 		 * Command completed and we're not getting sense. Check for
8347 		 * errors and decide what to do next.
8348 		 */
8349 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8350 		    "vhci_uscsi_iodone: command appears complete: reason: %x",
8351 		    pkt->pkt_reason));
8352 		if (status->sts_chk) {
8353 			/* need to manually get the request sense */
8354 			if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) {
8355 				scsi_destroy_pkt(pkt);
8356 				return;
8357 			}
8358 		} else {
8359 			VHCI_DEBUG(4, (CE_NOTE, NULL,
8360 			    "vhci_chk_err: appears complete"));
8361 			err = 0;
8362 			mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8363 			if (pkt->pkt_resid) {
8364 				bp->b_resid += pkt->pkt_resid;
8365 			}
8366 		}
8367 	}
8368 
8369 	if (err) {
8370 		if (bp->b_resid == 0)
8371 			bp->b_resid = bp->b_bcount;
8372 		bioerror(bp, err);
8373 		bp->b_flags |= B_ERROR;
8374 	}
8375 
8376 	scsi_destroy_pkt(pkt);
8377 	biodone(bp);
8378 
8379 	VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit"));
8380 }
8381 
8382 /*
8383  * start routine for the mpapi uscsi command
8384  */
8385 int
8386 vhci_uscsi_iostart(struct buf *bp)
8387 {
8388 	struct scsi_pkt 	*pkt;
8389 	struct uscsi_cmd	*uscmdp;
8390 	mp_uscsi_cmd_t 		*mp_uscmdp;
8391 	int			stat_size, rval;
8392 	int			retry = 0;
8393 
8394 	ASSERT(bp->b_private != NULL);
8395 
8396 	mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private;
8397 	uscmdp = mp_uscmdp->uscmdp;
8398 	if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8399 		stat_size = SENSE_LENGTH;
8400 	} else {
8401 		stat_size = 1;
8402 	}
8403 
8404 	pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen,
8405 	    stat_size, 0, 0, SLEEP_FUNC, NULL);
8406 	if (pkt == NULL) {
8407 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8408 		    "vhci_uscsi_iostart: rval: EINVAL"));
8409 		bp->b_resid = bp->b_bcount;
8410 		uscmdp->uscsi_resid = bp->b_bcount;
8411 		bioerror(bp, EINVAL);
8412 		biodone(bp);
8413 		return (EINVAL);
8414 	}
8415 
8416 	pkt->pkt_time = uscmdp->uscsi_timeout;
8417 	bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen);
8418 	pkt->pkt_comp = vhci_uscsi_iodone;
8419 	pkt->pkt_private = mp_uscmdp;
8420 	if (uscmdp->uscsi_flags & USCSI_SILENT)
8421 		pkt->pkt_flags |= FLAG_SILENT;
8422 	if (uscmdp->uscsi_flags & USCSI_ISOLATE)
8423 		pkt->pkt_flags |= FLAG_ISOLATE;
8424 	if (uscmdp->uscsi_flags & USCSI_DIAGNOSE)
8425 		pkt->pkt_flags |= FLAG_DIAGNOSE;
8426 	if (uscmdp->uscsi_flags & USCSI_RENEGOT) {
8427 		pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
8428 	}
8429 	VHCI_DEBUG(4, (CE_WARN, NULL,
8430 	    "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p"
8431 	    " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p"
8432 	    " stat_size: %d",
8433 	    (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp,
8434 	    (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen,
8435 	    (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size));
8436 
8437 	/*
8438 	 * NOTE: This code path is related to MPAPI uscsi(7I), so path
8439 	 * selection is not based on path_instance.
8440 	 */
8441 	if (scsi_pkt_allocated_correctly(pkt))
8442 		pkt->pkt_path_instance = 0;
8443 
8444 	while (((rval = scsi_transport(pkt)) == TRAN_BUSY) &&
8445 	    retry < vhci_uscsi_retry_count) {
8446 		delay(drv_usectohz(vhci_uscsi_delay));
8447 		retry++;
8448 	}
8449 	if (retry >= vhci_uscsi_retry_count) {
8450 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8451 		    "vhci_uscsi_iostart: tran_busy - retry: %d", retry));
8452 	}
8453 	switch (rval) {
8454 	case TRAN_ACCEPT:
8455 		rval =  0;
8456 		break;
8457 
8458 	default:
8459 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8460 		    "vhci_uscsi_iostart: rval: %d count: %ld res: %ld",
8461 		    rval, bp->b_bcount, bp->b_resid));
8462 		bp->b_resid = bp->b_bcount;
8463 		uscmdp->uscsi_resid = bp->b_bcount;
8464 		bioerror(bp, EIO);
8465 		scsi_destroy_pkt(pkt);
8466 		biodone(bp);
8467 		rval = EIO;
8468 		MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8469 		break;
8470 	}
8471 	VHCI_DEBUG(4, (CE_NOTE, NULL,
8472 	    "vhci_uscsi_iostart: exit: rval: %d", rval));
8473 	return (rval);
8474 }
8475 
8476 /* ARGSUSED */
8477 static struct scsi_failover_ops *
8478 vhci_dev_fo(dev_info_t *vdip, struct scsi_device *psd,
8479     void **ctprivp, char **fo_namep)
8480 {
8481 	struct scsi_failover_ops	*sfo;
8482 	char				*sfo_name;
8483 	char				*override;
8484 	struct scsi_failover		*sf;
8485 
8486 	ASSERT(psd && psd->sd_inq);
8487 	if ((psd == NULL) || (psd->sd_inq == NULL)) {
8488 		VHCI_DEBUG(1, (CE_NOTE, NULL,
8489 		    "!vhci_dev_fo:return NULL no scsi_device or inquiry"));
8490 		return (NULL);
8491 	}
8492 
8493 	/*
8494 	 * Determine if device is supported under scsi_vhci, and select
8495 	 * failover module.
8496 	 *
8497 	 * See if there is a scsi_vhci.conf file override for this devices's
8498 	 * VID/PID. The following values can be returned:
8499 	 *
8500 	 * NULL		If the NULL is returned then there is no scsi_vhci.conf
8501 	 *		override.  For NULL, we determine the failover_ops for
8502 	 *		this device by checking the sfo_device_probe entry
8503 	 *		point for each 'fops' module, in order.
8504 	 *
8505 	 *		NOTE: Correct operation may depend on module ordering
8506 	 *		of 'specific' (failover modules that are completely
8507 	 *		VID/PID table based) to 'generic' (failover modules
8508 	 *		that based on T10 standards like TPGS).  Currently,
8509 	 *		the value of 'ddi-forceload' in scsi_vhci.conf is used
8510 	 *		to establish the module list and probe order.
8511 	 *
8512 	 * "NONE"	If value "NONE" is returned then there is a
8513 	 *		scsi_vhci.conf VID/PID override to indicate the device
8514 	 *		should not be supported under scsi_vhci (even if there
8515 	 *		is an 'fops' module supporting the device).
8516 	 *
8517 	 * "<other>"	If another value is returned then that value is the
8518 	 *		name of the 'fops' module that should be used.
8519 	 */
8520 	sfo = NULL;	/* "NONE" */
8521 	override = scsi_get_device_type_string(
8522 	    "scsi-vhci-failover-override", vdip, psd);
8523 	if (override == NULL) {
8524 		/* NULL: default: select based on sfo_device_probe results */
8525 		for (sf = scsi_failover_table; sf->sf_mod; sf++) {
8526 			if ((sf->sf_sfo == NULL) ||
8527 			    sf->sf_sfo->sfo_device_probe(psd, psd->sd_inq,
8528 			    ctprivp) == SFO_DEVICE_PROBE_PHCI)
8529 				continue;
8530 
8531 			/* found failover module, supported under scsi_vhci */
8532 			sfo = sf->sf_sfo;
8533 			if (fo_namep && (*fo_namep == NULL)) {
8534 				sfo_name = i_ddi_strdup(sfo->sfo_name,
8535 				    KM_SLEEP);
8536 				*fo_namep = sfo_name;
8537 			}
8538 			break;
8539 		}
8540 	} else if (strcasecmp(override, "NONE")) {
8541 		/* !"NONE": select based on driver.conf specified name */
8542 		for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) {
8543 			if ((sf->sf_sfo == NULL) ||
8544 			    (sf->sf_sfo->sfo_name == NULL) ||
8545 			    strcmp(override, sf->sf_sfo->sfo_name))
8546 				continue;
8547 
8548 			/*
8549 			 * NOTE: If sfo_device_probe() has side-effects,
8550 			 * including setting *ctprivp, these are not going
8551 			 * to occur with override config.
8552 			 */
8553 
8554 			/* found failover module, supported under scsi_vhci */
8555 			sfo = sf->sf_sfo;
8556 			if (fo_namep && (*fo_namep == NULL)) {
8557 				sfo_name = kmem_alloc(strlen("conf ") +
8558 				    strlen(sfo->sfo_name) + 1, KM_SLEEP);
8559 				(void) sprintf(sfo_name, "conf %s",
8560 				    sfo->sfo_name);
8561 				*fo_namep = sfo_name;
8562 			}
8563 			break;
8564 		}
8565 	}
8566 	if (override)
8567 		kmem_free(override, strlen(override) + 1);
8568 	return (sfo);
8569 }
8570 
8571 /*
8572  * Determine the device described by cinfo should be enumerated under
8573  * the vHCI or the pHCI - if there is a failover ops then device is
8574  * supported under vHCI.  By agreement with SCSA cinfo is a pointer
8575  * to a scsi_device structure associated with a decorated pHCI probe node.
8576  */
8577 /* ARGSUSED */
8578 int
8579 vhci_is_dev_supported(dev_info_t *vdip, dev_info_t *pdip, void *cinfo)
8580 {
8581 	struct scsi_device	*psd = (struct scsi_device *)cinfo;
8582 
8583 	return (vhci_dev_fo(vdip, psd, NULL, NULL) ? MDI_SUCCESS : MDI_FAILURE);
8584 }
8585 
8586 
8587 #ifdef DEBUG
8588 extern struct scsi_key_strings scsi_cmds[];
8589 
8590 static char *
8591 vhci_print_scsi_cmd(char cmd)
8592 {
8593 	char tmp[64];
8594 	char *cpnt;
8595 
8596 	cpnt = scsi_cmd_name(cmd, scsi_cmds, tmp);
8597 	/* tmp goes out of scope on return and caller sees garbage */
8598 	if (cpnt == tmp) {
8599 		cpnt = "Unknown Command";
8600 	}
8601 	return (cpnt);
8602 }
8603 
8604 extern uchar_t	scsi_cdb_size[];
8605 
8606 static void
8607 vhci_print_cdb(dev_info_t *dip, uint_t level, char *title, uchar_t *cdb)
8608 {
8609 	int len = scsi_cdb_size[CDB_GROUPID(cdb[0])];
8610 	char buf[256];
8611 
8612 	if (level == CE_NOTE) {
8613 		vhci_log(level, dip, "path cmd %s\n",
8614 		    vhci_print_scsi_cmd(*cdb));
8615 		return;
8616 	}
8617 
8618 	(void) sprintf(buf, "%s for cmd(%s)", title, vhci_print_scsi_cmd(*cdb));
8619 	vhci_clean_print(dip, level, buf, cdb, len);
8620 }
8621 
8622 static void
8623 vhci_clean_print(dev_info_t *dev, uint_t level, char *title, uchar_t *data,
8624     int len)
8625 {
8626 	int	i;
8627 	int 	c;
8628 	char	*format;
8629 	char	buf[256];
8630 	uchar_t	byte;
8631 
8632 	(void) sprintf(buf, "%s:\n", title);
8633 	vhci_log(level, dev, "%s", buf);
8634 	level = CE_CONT;
8635 	for (i = 0; i < len; ) {
8636 		buf[0] = 0;
8637 		for (c = 0; c < 8 && i < len; c++, i++) {
8638 			byte = (uchar_t)data[i];
8639 			if (byte < 0x10)
8640 				format = "0x0%x ";
8641 			else
8642 				format = "0x%x ";
8643 			(void) sprintf(&buf[(int)strlen(buf)], format, byte);
8644 		}
8645 		(void) sprintf(&buf[(int)strlen(buf)], "\n");
8646 
8647 		vhci_log(level, dev, "%s\n", buf);
8648 	}
8649 }
8650 #endif
8651 static void
8652 vhci_invalidate_mpapi_lu(struct scsi_vhci *vhci, scsi_vhci_lun_t *vlun)
8653 {
8654 	char			*svl_wwn;
8655 	mpapi_item_list_t	*ilist;
8656 	mpapi_lu_data_t		*ld;
8657 
8658 	if (vlun == NULL) {
8659 		return;
8660 	} else {
8661 		svl_wwn = vlun->svl_lun_wwn;
8662 	}
8663 
8664 	ilist = vhci->mp_priv->obj_hdr_list[MP_OBJECT_TYPE_MULTIPATH_LU]->head;
8665 
8666 	while (ilist != NULL) {
8667 		ld = (mpapi_lu_data_t *)(ilist->item->idata);
8668 		if ((ld != NULL) && (strncmp(ld->prop.name, svl_wwn,
8669 		    strlen(svl_wwn)) == 0)) {
8670 			ld->valid = 0;
8671 			VHCI_DEBUG(6, (CE_WARN, NULL,
8672 			    "vhci_invalidate_mpapi_lu: "
8673 			    "Invalidated LU(%s)", svl_wwn));
8674 			return;
8675 		}
8676 		ilist = ilist->next;
8677 	}
8678 	VHCI_DEBUG(6, (CE_WARN, NULL, "vhci_invalidate_mpapi_lu: "
8679 	    "Could not find LU(%s) to invalidate.", svl_wwn));
8680 }
8681