xref: /illumos-gate/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c (revision 581cede61ac9c14d8d4ea452562a567189eead78)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2000 to 2009, LSI Corporation.
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms of all code within
32  * this file that is exclusively owned by LSI, with or without
33  * modification, is permitted provided that, in addition to the CDDL 1.0
34  * License requirements, the following conditions are met:
35  *
36  *    Neither the name of the author nor the names of its contributors may be
37  *    used to endorse or promote products derived from this software without
38  *    specific prior written permission.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
43  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
44  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
46  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
47  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
48  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
49  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
51  * DAMAGE.
52  */
53 
54 /*
55  * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
56  *
57  */
58 
59 #if defined(lint) || defined(DEBUG)
60 #define	MPTSAS_DEBUG
61 #endif
62 
63 /*
64  * standard header files.
65  */
66 #include <sys/note.h>
67 #include <sys/scsi/scsi.h>
68 #include <sys/pci.h>
69 #include <sys/file.h>
70 #include <sys/policy.h>
71 #include <sys/sysevent.h>
72 #include <sys/sysevent/eventdefs.h>
73 #include <sys/sysevent/dr.h>
74 #include <sys/sata/sata_defs.h>
75 
76 #pragma pack(1)
77 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
78 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
79 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
81 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
83 #pragma pack()
84 
85 /*
86  * private header files.
87  *
88  */
89 #include <sys/scsi/impl/scsi_reset_notify.h>
90 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
91 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
92 #include <sys/raidioctl.h>
93 
94 #include <sys/fs/dv_node.h>	/* devfs_clean */
95 
96 /*
97  * FMA header files
98  */
99 #include <sys/ddifm.h>
100 #include <sys/fm/protocol.h>
101 #include <sys/fm/util.h>
102 #include <sys/fm/io/ddi.h>
103 
104 /*
105  * autoconfiguration data and routines.
106  */
107 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
108 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
109 static int mptsas_power(dev_info_t *dip, int component, int level);
110 
111 /*
112  * cb_ops function
113  */
114 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
115 	cred_t *credp, int *rval);
116 #ifndef	__sparc
117 static int mptsas_quiesce(dev_info_t *devi);
118 #endif	/* __sparc */
119 
120 /*
121  * Resource initilaization for hardware
122  */
123 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
124 static void mptsas_disable_bus_master(mptsas_t *mpt);
125 static void mptsas_hba_fini(mptsas_t *mpt);
126 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
127 static int mptsas_alloc_request_frames(mptsas_t *mpt);
128 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
129 static int mptsas_alloc_free_queue(mptsas_t *mpt);
130 static int mptsas_alloc_post_queue(mptsas_t *mpt);
131 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
132 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
133 
134 /*
135  * SCSA function prototypes
136  */
137 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
138 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
139 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
140 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
141 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
142     int tgtonly);
143 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
144 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
145     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
146 	int tgtlen, int flags, int (*callback)(), caddr_t arg);
147 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
148 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
149     struct scsi_pkt *pkt);
150 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
151     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
152 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
153     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
154 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
155     void (*callback)(caddr_t), caddr_t arg);
156 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
157 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
158 static int mptsas_scsi_quiesce(dev_info_t *dip);
159 static int mptsas_scsi_unquiesce(dev_info_t *dip);
160 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
161     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
162 
163 /*
164  * SMP functions
165  */
166 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
167 static int mptsas_getcap(struct sas_addr *ap, char *cap);
168 static int mptsas_capchk(char *cap, int tgtonly, int *cidxp);
169 
170 /*
171  * internal function prototypes.
172  */
173 static int mptsas_quiesce_bus(mptsas_t *mpt);
174 static int mptsas_unquiesce_bus(mptsas_t *mpt);
175 
176 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
177 static void mptsas_free_handshake_msg(mptsas_t *mpt);
178 
179 static void mptsas_ncmds_checkdrain(void *arg);
180 
181 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
182 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
183 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
184 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
185 
186 static int mptsas_do_detach(dev_info_t *dev);
187 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
188 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
189     struct scsi_pkt *pkt);
190 
191 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
192 static void mptsas_handle_event(void *args);
193 static int mptsas_handle_event_sync(void *args);
194 static void mptsas_handle_dr(void *args);
195 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
196     dev_info_t *pdip);
197 
198 static void mptsas_restart_cmd(void *);
199 
200 static void mptsas_flush_hba(mptsas_t *mpt);
201 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
202 	uint8_t tasktype);
203 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
204     uchar_t reason, uint_t stat);
205 
206 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
207 static void mptsas_process_intr(mptsas_t *mpt,
208     pMpi2ReplyDescriptorsUnion_t reply_desc_union);
209 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
210     pMpi2ReplyDescriptorsUnion_t reply_desc);
211 static void mptsas_handle_address_reply(mptsas_t *mpt,
212     pMpi2ReplyDescriptorsUnion_t reply_desc);
213 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
214 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
215     uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
216 
217 static void mptsas_watch(void *arg);
218 static void mptsas_watchsubr(mptsas_t *mpt);
219 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl);
220 
221 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
222 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
223     uint8_t *data, uint32_t request_size, uint32_t reply_size,
224     uint32_t data_size, uint32_t direction, uint8_t *dataout,
225     uint32_t dataout_size, short timeout, int mode);
226 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
227 
228 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
229     int cmdlen, int tgtlen, int statuslen, int kf);
230 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
231 
232 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
233 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
234 
235 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
236     int kmflags);
237 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
238 
239 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
240     mptsas_cmd_t *cmd);
241 static void mptsas_check_task_mgt(mptsas_t *mpt,
242     pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
243 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
244     mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
245     int *resid);
246 
247 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
248 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
249 
250 static void mptsas_restart_hba(mptsas_t *mpt);
251 static void mptsas_restart_waitq(mptsas_t *mpt);
252 
253 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
254 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
255 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
256 
257 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
258 static void mptsas_doneq_empty(mptsas_t *mpt);
259 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
260 
261 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
262 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
263 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
264 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
265 
266 
267 static void mptsas_start_watch_reset_delay();
268 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
269 static void mptsas_watch_reset_delay(void *arg);
270 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
271 
272 /*
273  * helper functions
274  */
275 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
276 
277 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
278 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
279 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
280     int lun);
281 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
282     int lun);
283 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
284 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
285 
286 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
287     int *lun);
288 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
289 
290 static mptsas_target_t *mptsas_phy_to_tgt(dev_info_t *pdip, uint8_t phy);
291 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt, int port,
292     uint64_t wwid);
293 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt, int port,
294     uint64_t wwid);
295 
296 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
297     uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
298 
299 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
300     uint16_t *handle, mptsas_target_t **pptgt);
301 static void mptsas_update_phymask(mptsas_t *mpt);
302 
303 /*
304  * Enumeration / DR functions
305  */
306 static void mptsas_config_all(dev_info_t *pdip);
307 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
308     dev_info_t **lundip);
309 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
310     dev_info_t **lundip);
311 
312 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
313 static int mptsas_offline_target(dev_info_t *pdip, char *name);
314 
315 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
316     dev_info_t **dip);
317 
318 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
319 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
320     dev_info_t **dip, mptsas_target_t *ptgt);
321 
322 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
323     dev_info_t **dip, mptsas_target_t *ptgt, int lun);
324 
325 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
326     char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
327 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
328     char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
329     int lun);
330 
331 static void mptsas_offline_missed_luns(dev_info_t *pdip,
332     uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
333 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
334     mdi_pathinfo_t *rpip, uint_t flags);
335 
336 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
337     dev_info_t **smp_dip);
338 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
339     uint_t flags);
340 
341 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
342     int mode, int *rval);
343 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
344     int mode, int *rval);
345 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
346     int mode, int *rval);
347 static void mptsas_record_event(void *args);
348 
349 static void mptsas_hash_init(mptsas_hash_table_t *hashtab);
350 static void mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen);
351 static void mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data);
352 static void * mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
353     uint8_t key2);
354 static void * mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
355     uint8_t key2);
356 static void * mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos);
357 
358 mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t, uint64_t,
359     uint32_t, uint8_t, uint8_t);
360 static mptsas_smp_t *mptsas_smp_alloc(mptsas_hash_table_t *hashtab,
361     mptsas_smp_t *data);
362 static void mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
363     uint8_t physport);
364 static void mptsas_tgt_free(mptsas_hash_table_t *, uint64_t, uint8_t);
365 static void * mptsas_search_by_devhdl(mptsas_hash_table_t *, uint16_t);
366 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
367     dev_info_t **smp_dip);
368 
369 /*
370  * Power management functions
371  */
372 static void mptsas_idle_pm(void *arg);
373 static int mptsas_init_pm(mptsas_t *mpt);
374 
375 /*
376  * MPT MSI tunable:
377  *
378  * By default MSI is enabled on all supported platforms.
379  */
380 boolean_t mptsas_enable_msi = B_TRUE;
381 
382 static int mptsas_add_intrs(mptsas_t *, int);
383 static void mptsas_rem_intrs(mptsas_t *);
384 
385 /*
386  * FMA Prototypes
387  */
388 static void mptsas_fm_init(mptsas_t *mpt);
389 static void mptsas_fm_fini(mptsas_t *mpt);
390 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
391 
392 extern pri_t minclsyspri, maxclsyspri;
393 
394 /*
395  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).  It is
396  * under this device that the paths to a physical device are created when
397  * MPxIO is used.
398  */
399 extern dev_info_t	*scsi_vhci_dip;
400 
401 /*
402  * Tunable timeout value for Inquiry VPD page 0x83
403  * By default the value is 30 seconds.
404  */
405 int mptsas_inq83_retry_timeout = 30;
406 
407 /*
408  * This is used to allocate memory for message frame storage, not for
409  * data I/O DMA. All message frames must be stored in the first 4G of
410  * physical memory.
411  */
412 ddi_dma_attr_t mptsas_dma_attrs = {
413 	DMA_ATTR_V0,	/* attribute layout version		*/
414 	0x0ull,		/* address low - should be 0 (longlong)	*/
415 	0xffffffffull,	/* address high - 32-bit max range	*/
416 	0x00ffffffull,	/* count max - max DMA object size	*/
417 	4,		/* allocation alignment requirements	*/
418 	0x78,		/* burstsizes - binary encoded values	*/
419 	1,		/* minxfer - gran. of DMA engine	*/
420 	0x00ffffffull,	/* maxxfer - gran. of DMA engine	*/
421 	0xffffffffull,	/* max segment size (DMA boundary)	*/
422 	MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length	*/
423 	512,		/* granularity - device transfer size	*/
424 	0		/* flags, set to 0			*/
425 };
426 
427 /*
428  * This is used for data I/O DMA memory allocation. (full 64-bit DMA
429  * physical addresses are supported.)
430  */
431 ddi_dma_attr_t mptsas_dma_attrs64 = {
432 	DMA_ATTR_V0,	/* attribute layout version		*/
433 	0x0ull,		/* address low - should be 0 (longlong)	*/
434 	0xffffffffffffffffull,	/* address high - 64-bit max	*/
435 	0x00ffffffull,	/* count max - max DMA object size	*/
436 	4,		/* allocation alignment requirements	*/
437 	0x78,		/* burstsizes - binary encoded values	*/
438 	1,		/* minxfer - gran. of DMA engine	*/
439 	0x00ffffffull,	/* maxxfer - gran. of DMA engine	*/
440 	0xffffffffull,	/* max segment size (DMA boundary)	*/
441 	MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length	*/
442 	512,		/* granularity - device transfer size	*/
443 	DDI_DMA_RELAXED_ORDERING	/* flags, enable relaxed ordering */
444 };
445 
446 ddi_device_acc_attr_t mptsas_dev_attr = {
447 	DDI_DEVICE_ATTR_V0,
448 	DDI_STRUCTURE_LE_ACC,
449 	DDI_STRICTORDER_ACC
450 };
451 
452 static struct cb_ops mptsas_cb_ops = {
453 	scsi_hba_open,		/* open */
454 	scsi_hba_close,		/* close */
455 	nodev,			/* strategy */
456 	nodev,			/* print */
457 	nodev,			/* dump */
458 	nodev,			/* read */
459 	nodev,			/* write */
460 	mptsas_ioctl,		/* ioctl */
461 	nodev,			/* devmap */
462 	nodev,			/* mmap */
463 	nodev,			/* segmap */
464 	nochpoll,		/* chpoll */
465 	ddi_prop_op,		/* cb_prop_op */
466 	NULL,			/* streamtab */
467 	D_MP,			/* cb_flag */
468 	CB_REV,			/* rev */
469 	nodev,			/* aread */
470 	nodev			/* awrite */
471 };
472 
473 static struct dev_ops mptsas_ops = {
474 	DEVO_REV,		/* devo_rev, */
475 	0,			/* refcnt  */
476 	ddi_no_info,		/* info */
477 	nulldev,		/* identify */
478 	nulldev,		/* probe */
479 	mptsas_attach,		/* attach */
480 	mptsas_detach,		/* detach */
481 	nodev,			/* reset */
482 	&mptsas_cb_ops,		/* driver operations */
483 	NULL,			/* bus operations */
484 	mptsas_power,		/* power management */
485 #ifdef	__sparc
486 	ddi_quiesce_not_needed
487 #else
488 	mptsas_quiesce		/* quiesce */
489 #endif	/* __sparc */
490 };
491 
492 
493 #define	MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.17"
494 #define	CDATE "MPTSAS was compiled on "__DATE__
495 /* LINTED E_STATIC_UNUSED */
496 static char *MPTWASCOMPILEDON = CDATE;
497 
498 static struct modldrv modldrv = {
499 	&mod_driverops,	/* Type of module. This one is a driver */
500 	MPTSAS_MOD_STRING, /* Name of the module. */
501 	&mptsas_ops,	/* driver ops */
502 };
503 
504 static struct modlinkage modlinkage = {
505 	MODREV_1, &modldrv, NULL
506 };
507 #define	TARGET_PROP	"target"
508 #define	LUN_PROP	"lun"
509 #define	SAS_PROP	"sas-mpt"
510 #define	MDI_GUID	"wwn"
511 #define	NDI_GUID	"guid"
512 #define	MPTSAS_DEV_GONE	"mptsas_dev_gone"
513 
514 /*
515  * Local static data
516  */
517 #if defined(MPTSAS_DEBUG)
518 uint32_t mptsas_debug_flags = 0;
519 #endif	/* defined(MPTSAS_DEBUG) */
520 uint32_t mptsas_debug_resets = 0;
521 
522 static kmutex_t		mptsas_global_mutex;
523 static void		*mptsas_state;		/* soft	state ptr */
524 static krwlock_t	mptsas_global_rwlock;
525 
526 static kmutex_t		mptsas_log_mutex;
527 static char		mptsas_log_buf[256];
528 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
529 
530 static mptsas_t *mptsas_head, *mptsas_tail;
531 static clock_t mptsas_scsi_watchdog_tick;
532 static clock_t mptsas_tick;
533 static timeout_id_t mptsas_reset_watch;
534 static timeout_id_t mptsas_timeout_id;
535 static int mptsas_timeouts_enabled = 0;
536 
537 /*
538  * warlock directives
539  */
540 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
541 	mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
542 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
543 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
544 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
545 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
546 
547 #ifdef MPTSAS_DEBUG
548 void debug_enter(char *);
549 #endif
550 
551 /*
552  * Notes:
553  *	- scsi_hba_init(9F) initializes SCSI HBA modules
554  *	- must call scsi_hba_fini(9F) if modload() fails
555  */
556 int
557 _init(void)
558 {
559 	int status;
560 	/* CONSTCOND */
561 	ASSERT(NO_COMPETING_THREADS);
562 
563 	NDBG0(("_init"));
564 
565 	status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
566 	    MPTSAS_INITIAL_SOFT_SPACE);
567 	if (status != 0) {
568 		return (status);
569 	}
570 
571 	if ((status = scsi_hba_init(&modlinkage)) != 0) {
572 		ddi_soft_state_fini(&mptsas_state);
573 		return (status);
574 	}
575 
576 	mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
577 	rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
578 	mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
579 
580 	if ((status = mod_install(&modlinkage)) != 0) {
581 		mutex_destroy(&mptsas_log_mutex);
582 		rw_destroy(&mptsas_global_rwlock);
583 		mutex_destroy(&mptsas_global_mutex);
584 		ddi_soft_state_fini(&mptsas_state);
585 		scsi_hba_fini(&modlinkage);
586 	}
587 
588 	return (status);
589 }
590 
591 /*
592  * Notes:
593  *	- scsi_hba_fini(9F) uninitializes SCSI HBA modules
594  */
595 int
596 _fini(void)
597 {
598 	int	status;
599 	/* CONSTCOND */
600 	ASSERT(NO_COMPETING_THREADS);
601 
602 	NDBG0(("_fini"));
603 
604 	if ((status = mod_remove(&modlinkage)) == 0) {
605 		ddi_soft_state_fini(&mptsas_state);
606 		scsi_hba_fini(&modlinkage);
607 		mutex_destroy(&mptsas_global_mutex);
608 		rw_destroy(&mptsas_global_rwlock);
609 		mutex_destroy(&mptsas_log_mutex);
610 	}
611 	return (status);
612 }
613 
614 /*
615  * The loadable-module _info(9E) entry point
616  */
617 int
618 _info(struct modinfo *modinfop)
619 {
620 	/* CONSTCOND */
621 	ASSERT(NO_COMPETING_THREADS);
622 	NDBG0(("mptsas _info"));
623 
624 	return (mod_info(&modlinkage, modinfop));
625 }
626 
627 
628 static int
629 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
630 {
631 	dev_info_t		*pdip;
632 	mptsas_t		*mpt;
633 	scsi_hba_tran_t		*hba_tran;
634 	char			*iport = NULL;
635 	char			phymask[8];
636 	uint8_t			phy_mask = 0;
637 	int			physport = -1;
638 	int			dynamic_port = 0;
639 	uint32_t		page_address;
640 	char			initiator_wwnstr[MPTSAS_WWN_STRLEN];
641 	int			rval = DDI_FAILURE;
642 	int			i = 0;
643 	uint64_t		wwid = 0;
644 	uint8_t			portwidth = 0;
645 
646 	/* CONSTCOND */
647 	ASSERT(NO_COMPETING_THREADS);
648 
649 	switch (cmd) {
650 	case DDI_ATTACH:
651 		break;
652 
653 	case DDI_RESUME:
654 		/*
655 		 * If this a scsi-iport node, nothing to do here.
656 		 */
657 		return (DDI_SUCCESS);
658 
659 	default:
660 		return (DDI_FAILURE);
661 	}
662 
663 	pdip = ddi_get_parent(dip);
664 
665 	if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
666 	    NULL) {
667 		cmn_err(CE_WARN, "Failed attach iport because fail to "
668 		    "get tran vector for the HBA node");
669 		return (DDI_FAILURE);
670 	}
671 
672 	mpt = TRAN2MPT(hba_tran);
673 	ASSERT(mpt != NULL);
674 	if (mpt == NULL)
675 		return (DDI_FAILURE);
676 
677 	if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
678 	    NULL) {
679 		mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
680 		    "get tran vector for the iport node");
681 		return (DDI_FAILURE);
682 	}
683 
684 	/*
685 	 * Overwrite parent's tran_hba_private to iport's tran vector
686 	 */
687 	hba_tran->tran_hba_private = mpt;
688 
689 	ddi_report_dev(dip);
690 
691 	/*
692 	 * Get SAS address for initiator port according dev_handle
693 	 */
694 	iport = ddi_get_name_addr(dip);
695 	if (iport && strncmp(iport, "v0", 2) == 0) {
696 		return (DDI_SUCCESS);
697 	}
698 
699 	mutex_enter(&mpt->m_mutex);
700 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
701 		bzero(phymask, sizeof (phymask));
702 		(void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
703 		if (strcmp(phymask, iport) == 0) {
704 			break;
705 		}
706 	}
707 
708 	if (i == MPTSAS_MAX_PHYS) {
709 		mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
710 		    "seems not exist", iport);
711 		mutex_exit(&mpt->m_mutex);
712 		return (DDI_FAILURE);
713 	}
714 
715 	phy_mask = mpt->m_phy_info[i].phy_mask;
716 	physport = mpt->m_phy_info[i].port_num;
717 
718 	if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
719 		dynamic_port = 1;
720 	else
721 		dynamic_port = 0;
722 
723 	page_address = (MPI2_SASPORT_PGAD_FORM_PORT_NUM |
724 	    (MPI2_SASPORT_PGAD_PORTNUMBER_MASK & physport));
725 
726 	rval = mptsas_get_sas_port_page0(mpt, page_address, &wwid, &portwidth);
727 	if (rval != DDI_SUCCESS) {
728 		mptsas_log(mpt, CE_WARN, "Failed attach port %s because get"
729 		    "SAS address of initiator failed!", iport);
730 		mutex_exit(&mpt->m_mutex);
731 		return (DDI_FAILURE);
732 	}
733 	mutex_exit(&mpt->m_mutex);
734 
735 	bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
736 	(void) sprintf(initiator_wwnstr, "%016"PRIx64,
737 	    wwid);
738 
739 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
740 	    "initiator-port", initiator_wwnstr) !=
741 	    DDI_PROP_SUCCESS) {
742 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "initiator-port");
743 		return (DDI_FAILURE);
744 	}
745 
746 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
747 	    "phymask", phy_mask) !=
748 	    DDI_PROP_SUCCESS) {
749 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
750 		return (DDI_FAILURE);
751 	}
752 
753 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
754 	    "dynamic-port", dynamic_port) !=
755 	    DDI_PROP_SUCCESS) {
756 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
757 		return (DDI_FAILURE);
758 	}
759 	/*
760 	 * register sas hba iport with mdi (MPxIO/vhci)
761 	 */
762 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
763 	    dip, 0) == MDI_SUCCESS) {
764 		mpt->m_mpxio_enable = TRUE;
765 	}
766 	return (DDI_SUCCESS);
767 }
768 
769 /*
770  * Notes:
771  *	Set up all device state and allocate data structures,
772  *	mutexes, condition variables, etc. for device operation.
773  *	Add interrupts needed.
774  *	Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
775  */
776 static int
777 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
778 {
779 	mptsas_t		*mpt = NULL;
780 	int			instance, i, j;
781 	int			doneq_thread_num;
782 	char			buf[64];
783 	char			intr_added = 0;
784 	char			map_setup = 0;
785 	char			config_setup = 0;
786 	char			hba_attach_setup = 0;
787 	char			sas_attach_setup = 0;
788 	char			mutex_init_done = 0;
789 	char			event_taskq_create = 0;
790 	char			dr_taskq_create = 0;
791 	char			doneq_thread_create = 0;
792 	scsi_hba_tran_t		*hba_tran;
793 	int			intr_types;
794 	uint_t			mem_bar = MEM_SPACE;
795 	uint8_t			mask = 0x0;
796 	int			tran_flags = 0;
797 	int			rval = DDI_FAILURE;
798 
799 	/* CONSTCOND */
800 	ASSERT(NO_COMPETING_THREADS);
801 
802 	if (scsi_hba_iport_unit_address(dip)) {
803 		return (mptsas_iport_attach(dip, cmd));
804 	}
805 
806 	switch (cmd) {
807 	case DDI_ATTACH:
808 		break;
809 
810 	case DDI_RESUME:
811 		if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
812 			return (DDI_FAILURE);
813 
814 		mpt = TRAN2MPT(hba_tran);
815 
816 		if (!mpt) {
817 			return (DDI_FAILURE);
818 		}
819 
820 		/*
821 		 * Reset hardware and softc to "no outstanding commands"
822 		 * Note	that a check condition can result on first command
823 		 * to a	target.
824 		 */
825 		mutex_enter(&mpt->m_mutex);
826 
827 		/*
828 		 * raise power.
829 		 */
830 		if (mpt->m_options & MPTSAS_OPT_PM) {
831 			mutex_exit(&mpt->m_mutex);
832 			(void) pm_busy_component(dip, 0);
833 			if (mpt->m_power_level != PM_LEVEL_D0) {
834 				rval = pm_raise_power(dip, 0, PM_LEVEL_D0);
835 			} else {
836 				rval = pm_power_has_changed(dip, 0,
837 				    PM_LEVEL_D0);
838 			}
839 			if (rval == DDI_SUCCESS) {
840 				mutex_enter(&mpt->m_mutex);
841 			} else {
842 				/*
843 				 * The pm_raise_power() call above failed,
844 				 * and that can only occur if we were unable
845 				 * to reset the hardware.  This is probably
846 				 * due to unhealty hardware, and because
847 				 * important filesystems(such as the root
848 				 * filesystem) could be on the attached disks,
849 				 * it would not be a good idea to continue,
850 				 * as we won't be entirely certain we are
851 				 * writing correct data.  So we panic() here
852 				 * to not only prevent possible data corruption,
853 				 * but to give developers or end users a hope
854 				 * of identifying and correcting any problems.
855 				 */
856 				fm_panic("mptsas could not reset hardware "
857 				    "during resume");
858 			}
859 		}
860 
861 		mpt->m_suspended = 0;
862 
863 		/*
864 		 * Reinitialize ioc
865 		 */
866 		if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
867 			mutex_exit(&mpt->m_mutex);
868 			if (mpt->m_options & MPTSAS_OPT_PM) {
869 				(void) pm_idle_component(dip, 0);
870 			}
871 			fm_panic("mptsas init chip fail during resume");
872 		}
873 		/*
874 		 * mptsas_update_driver_data needs interrupts so enable them
875 		 * first.
876 		 */
877 		MPTSAS_ENABLE_INTR(mpt);
878 		mptsas_update_driver_data(mpt);
879 
880 		/* start requests, if possible */
881 		mptsas_restart_hba(mpt);
882 
883 		mutex_exit(&mpt->m_mutex);
884 
885 		/*
886 		 * Restart watch thread
887 		 */
888 		mutex_enter(&mptsas_global_mutex);
889 		if (mptsas_timeout_id == 0) {
890 			mptsas_timeout_id = timeout(mptsas_watch, NULL,
891 			    mptsas_tick);
892 			mptsas_timeouts_enabled = 1;
893 		}
894 		mutex_exit(&mptsas_global_mutex);
895 
896 		/* report idle status to pm framework */
897 		if (mpt->m_options & MPTSAS_OPT_PM) {
898 			(void) pm_idle_component(dip, 0);
899 		}
900 
901 		return (DDI_SUCCESS);
902 
903 	default:
904 		return (DDI_FAILURE);
905 
906 	}
907 
908 	instance = ddi_get_instance(dip);
909 
910 	/*
911 	 * Allocate softc information.
912 	 */
913 	if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
914 		mptsas_log(NULL, CE_WARN,
915 		    "mptsas%d: cannot allocate soft state", instance);
916 		goto fail;
917 	}
918 
919 	mpt = ddi_get_soft_state(mptsas_state, instance);
920 
921 	if (mpt == NULL) {
922 		mptsas_log(NULL, CE_WARN,
923 		    "mptsas%d: cannot get soft state", instance);
924 		goto fail;
925 	}
926 
927 	/* Allocate a transport structure */
928 	hba_tran = mpt->m_tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
929 	ASSERT(mpt->m_tran != NULL);
930 
931 	/* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
932 	scsi_size_clean(dip);
933 
934 	mpt->m_dip = dip;
935 	mpt->m_instance = instance;
936 
937 	/* Make a per-instance copy of the structures */
938 	mpt->m_io_dma_attr = mptsas_dma_attrs64;
939 	mpt->m_msg_dma_attr = mptsas_dma_attrs;
940 	mpt->m_dev_acc_attr = mptsas_dev_attr;
941 
942 	/*
943 	 * Initialize FMA
944 	 */
945 	mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
946 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
947 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
948 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
949 
950 	mptsas_fm_init(mpt);
951 
952 	if (pci_config_setup(mpt->m_dip,
953 	    &mpt->m_config_handle) != DDI_SUCCESS) {
954 		mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
955 		goto fail;
956 	}
957 	config_setup++;
958 
959 	if (mptsas_alloc_handshake_msg(mpt,
960 	    sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
961 		mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
962 		goto fail;
963 	}
964 
965 	/*
966 	 * This is a workaround for a XMITS ASIC bug which does not
967 	 * drive the CBE upper bits.
968 	 */
969 	if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
970 	    PCI_STAT_PERROR) {
971 		pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
972 		    PCI_STAT_PERROR);
973 	}
974 
975 	/*
976 	 * Setup configuration space
977 	 */
978 	if (mptsas_config_space_init(mpt) == FALSE) {
979 		mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
980 		goto fail;
981 	}
982 
983 	if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
984 	    0, 0, &mpt->m_dev_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
985 		mptsas_log(mpt, CE_WARN, "map setup failed");
986 		goto fail;
987 	}
988 	map_setup++;
989 
990 	/*
991 	 * A taskq is created for dealing with the event handler
992 	 */
993 	if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
994 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
995 		mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
996 		goto fail;
997 	}
998 	event_taskq_create++;
999 
1000 	/*
1001 	 * A taskq is created for dealing with dr events
1002 	 */
1003 	if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1004 	    "mptsas_dr_taskq",
1005 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1006 		mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1007 		    "failed");
1008 		goto fail;
1009 	}
1010 	dr_taskq_create++;
1011 
1012 	mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1013 	    0, "mptsas_doneq_thread_threshold_prop", 10);
1014 	mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1015 	    0, "mptsas_doneq_length_threshold_prop", 8);
1016 	mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1017 	    0, "mptsas_doneq_thread_n_prop", 8);
1018 
1019 	if (mpt->m_doneq_thread_n) {
1020 		cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1021 		mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1022 
1023 		mutex_enter(&mpt->m_doneq_mutex);
1024 		mpt->m_doneq_thread_id =
1025 		    kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1026 		    * mpt->m_doneq_thread_n, KM_SLEEP);
1027 
1028 		for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1029 			cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1030 			    CV_DRIVER, NULL);
1031 			mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1032 			    MUTEX_DRIVER, NULL);
1033 			mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1034 			mpt->m_doneq_thread_id[j].flag |=
1035 			    MPTSAS_DONEQ_THREAD_ACTIVE;
1036 			mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1037 			mpt->m_doneq_thread_id[j].arg.t = j;
1038 			mpt->m_doneq_thread_id[j].threadp =
1039 			    thread_create(NULL, 0, mptsas_doneq_thread,
1040 			    &mpt->m_doneq_thread_id[j].arg,
1041 			    0, &p0, TS_RUN, minclsyspri);
1042 			mpt->m_doneq_thread_id[j].donetail =
1043 			    &mpt->m_doneq_thread_id[j].doneq;
1044 			mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1045 		}
1046 		mutex_exit(&mpt->m_doneq_mutex);
1047 		doneq_thread_create++;
1048 	}
1049 
1050 	/* Get supported interrupt types */
1051 	if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
1052 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
1053 		    "failed\n");
1054 		goto fail;
1055 	}
1056 
1057 	NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
1058 
1059 	if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
1060 		/*
1061 		 * Try MSI, but fall back to FIXED
1062 		 */
1063 		if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
1064 			NDBG0(("Using MSI interrupt type"));
1065 			mpt->m_intr_type = DDI_INTR_TYPE_MSI;
1066 			goto intr_done;
1067 		}
1068 	}
1069 
1070 	if (intr_types & DDI_INTR_TYPE_FIXED) {
1071 
1072 		if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
1073 			NDBG0(("Using FIXED interrupt type"));
1074 			mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
1075 
1076 			goto intr_done;
1077 		}
1078 
1079 		NDBG0(("FIXED interrupt registration failed"));
1080 	}
1081 
1082 	goto fail;
1083 
1084 intr_done:
1085 	intr_added++;
1086 
1087 	/* Initialize mutex used in interrupt handler */
1088 	mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1089 	    DDI_INTR_PRI(mpt->m_intr_pri));
1090 	mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1091 	    DDI_INTR_PRI(mpt->m_intr_pri));
1092 	cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1093 	cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1094 	cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1095 	cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1096 	mutex_init_done++;
1097 
1098 	/*
1099 	 * Disable hardware interrupt since we're not ready to
1100 	 * handle it yet.
1101 	 */
1102 	MPTSAS_DISABLE_INTR(mpt);
1103 
1104 	/*
1105 	 * Enable interrupts
1106 	 */
1107 	if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
1108 		/* Call ddi_intr_block_enable() for MSI interrupts */
1109 		(void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
1110 	} else {
1111 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
1112 		for (i = 0; i < mpt->m_intr_cnt; i++) {
1113 			(void) ddi_intr_enable(mpt->m_htable[i]);
1114 		}
1115 	}
1116 
1117 	mutex_enter(&mpt->m_mutex);
1118 	/*
1119 	 * Initialize power management component
1120 	 */
1121 	if (mpt->m_options & MPTSAS_OPT_PM) {
1122 		if (mptsas_init_pm(mpt)) {
1123 			mutex_exit(&mpt->m_mutex);
1124 			mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1125 			    "failed");
1126 			goto fail;
1127 		}
1128 	}
1129 
1130 	/*
1131 	 * Initialize chip
1132 	 */
1133 	if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1134 		mutex_exit(&mpt->m_mutex);
1135 		mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1136 		goto fail;
1137 	}
1138 	mutex_exit(&mpt->m_mutex);
1139 
1140 	/*
1141 	 * initialize SCSI HBA transport structure
1142 	 */
1143 	hba_tran->tran_hba_private	= mpt;
1144 	hba_tran->tran_tgt_private	= NULL;
1145 
1146 	hba_tran->tran_tgt_init		= mptsas_scsi_tgt_init;
1147 	hba_tran->tran_tgt_free		= mptsas_scsi_tgt_free;
1148 
1149 	hba_tran->tran_start		= mptsas_scsi_start;
1150 	hba_tran->tran_reset		= mptsas_scsi_reset;
1151 	hba_tran->tran_abort		= mptsas_scsi_abort;
1152 	hba_tran->tran_getcap		= mptsas_scsi_getcap;
1153 	hba_tran->tran_setcap		= mptsas_scsi_setcap;
1154 	hba_tran->tran_init_pkt		= mptsas_scsi_init_pkt;
1155 	hba_tran->tran_destroy_pkt	= mptsas_scsi_destroy_pkt;
1156 
1157 	hba_tran->tran_dmafree		= mptsas_scsi_dmafree;
1158 	hba_tran->tran_sync_pkt		= mptsas_scsi_sync_pkt;
1159 	hba_tran->tran_reset_notify	= mptsas_scsi_reset_notify;
1160 
1161 	hba_tran->tran_get_bus_addr	= mptsas_get_bus_addr;
1162 	hba_tran->tran_get_name		= mptsas_get_name;
1163 
1164 	hba_tran->tran_quiesce		= mptsas_scsi_quiesce;
1165 	hba_tran->tran_unquiesce	= mptsas_scsi_unquiesce;
1166 	hba_tran->tran_bus_reset	= NULL;
1167 
1168 	hba_tran->tran_add_eventcall	= NULL;
1169 	hba_tran->tran_get_eventcookie	= NULL;
1170 	hba_tran->tran_post_event	= NULL;
1171 	hba_tran->tran_remove_eventcall	= NULL;
1172 
1173 	hba_tran->tran_bus_config	= mptsas_bus_config;
1174 
1175 	hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
1176 
1177 	if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
1178 		goto fail;
1179 	}
1180 
1181 	/*
1182 	 * Register the iport for multiple port HBA
1183 	 */
1184 	/*
1185 	 * initial value of mask is 0
1186 	 */
1187 	mutex_enter(&mpt->m_mutex);
1188 	for (i = 0; i < mpt->m_num_phys; i++) {
1189 		uint8_t	phy_mask = 0x00;
1190 		char phy_mask_name[8];
1191 		uint8_t current_port;
1192 
1193 		if (mpt->m_phy_info[i].attached_devhdl == 0)
1194 			continue;
1195 
1196 		bzero(phy_mask_name, sizeof (phy_mask_name));
1197 
1198 		current_port = mpt->m_phy_info[i].port_num;
1199 
1200 		if ((mask & (1 << i)) != 0)
1201 			continue;
1202 
1203 		for (j = 0; j < mpt->m_num_phys; j++) {
1204 			if (mpt->m_phy_info[j].attached_devhdl &&
1205 			    (mpt->m_phy_info[j].port_num == current_port)) {
1206 				phy_mask |= (1 << j);
1207 			}
1208 		}
1209 		mask = mask | phy_mask;
1210 
1211 		for (j = 0; j < mpt->m_num_phys; j++) {
1212 			if ((phy_mask >> j) & 0x01) {
1213 				mpt->m_phy_info[j].phy_mask = phy_mask;
1214 			}
1215 		}
1216 
1217 		(void) sprintf(phy_mask_name, "%x", phy_mask);
1218 
1219 		mutex_exit(&mpt->m_mutex);
1220 		/*
1221 		 * register a iport
1222 		 */
1223 		(void) scsi_hba_iport_register(dip, phy_mask_name);
1224 		mutex_enter(&mpt->m_mutex);
1225 	}
1226 	mutex_exit(&mpt->m_mutex);
1227 	/*
1228 	 * register a virtual port for RAID volume always
1229 	 */
1230 	(void) scsi_hba_iport_register(dip, "v0");
1231 	/*
1232 	 * All children of the HBA are iports. We need tran was cloned.
1233 	 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
1234 	 * inherited to iport's tran vector.
1235 	 */
1236 	tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
1237 
1238 	if (scsi_hba_attach_setup(dip, &mpt->m_msg_dma_attr,
1239 	    hba_tran, tran_flags) != DDI_SUCCESS) {
1240 		mptsas_log(mpt, CE_WARN, "hba attach setup failed");
1241 		goto fail;
1242 	}
1243 	hba_attach_setup++;
1244 
1245 	mpt->m_smptran = sas_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
1246 	ASSERT(mpt->m_smptran != NULL);
1247 	mpt->m_smptran->tran_hba_private = mpt;
1248 	mpt->m_smptran->tran_smp_start = mptsas_smp_start;
1249 	mpt->m_smptran->tran_sas_getcap = mptsas_getcap;
1250 	if (sas_hba_attach_setup(dip, mpt->m_smptran) != DDI_SUCCESS) {
1251 		mptsas_log(mpt, CE_WARN, "smp attach setup failed");
1252 		goto fail;
1253 	}
1254 	sas_attach_setup++;
1255 	/*
1256 	 * Initialize smp hash table
1257 	 */
1258 	mptsas_hash_init(&mpt->m_active->m_smptbl);
1259 	mpt->m_smp_devhdl = 0xFFFF;
1260 
1261 	/*
1262 	 * create kmem cache for packets
1263 	 */
1264 	(void) sprintf(buf, "mptsas%d_cache", instance);
1265 	mpt->m_kmem_cache = kmem_cache_create(buf,
1266 	    sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
1267 	    mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
1268 	    NULL, (void *)mpt, NULL, 0);
1269 
1270 	if (mpt->m_kmem_cache == NULL) {
1271 		mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
1272 		goto fail;
1273 	}
1274 
1275 	/*
1276 	 * create kmem cache for extra SGL frames if SGL cannot
1277 	 * be accomodated into main request frame.
1278 	 */
1279 	(void) sprintf(buf, "mptsas%d_cache_frames", instance);
1280 	mpt->m_cache_frames = kmem_cache_create(buf,
1281 	    sizeof (mptsas_cache_frames_t), 8,
1282 	    mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
1283 	    NULL, (void *)mpt, NULL, 0);
1284 
1285 	if (mpt->m_cache_frames == NULL) {
1286 		mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
1287 		goto fail;
1288 	}
1289 
1290 	mpt->m_scsi_reset_delay	= ddi_prop_get_int(DDI_DEV_T_ANY,
1291 	    dip, 0, "scsi-reset-delay",	SCSI_DEFAULT_RESET_DELAY);
1292 	if (mpt->m_scsi_reset_delay == 0) {
1293 		mptsas_log(mpt, CE_NOTE,
1294 		    "scsi_reset_delay of 0 is not recommended,"
1295 		    " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1296 		mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1297 	}
1298 
1299 	/*
1300 	 * Initialize the wait and done FIFO queue
1301 	 */
1302 	mpt->m_donetail = &mpt->m_doneq;
1303 	mpt->m_waitqtail = &mpt->m_waitq;
1304 
1305 	mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1306 	mpt->m_tx_draining = 0;
1307 
1308 	/*
1309 	 * ioc cmd queue initialize
1310 	 */
1311 	mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1312 
1313 	mpt->m_dev_handle = 0xFFFF;
1314 
1315 	MPTSAS_ENABLE_INTR(mpt);
1316 
1317 	/*
1318 	 * enable event notification
1319 	 */
1320 	mutex_enter(&mpt->m_mutex);
1321 	if (mptsas_ioc_enable_event_notification(mpt)) {
1322 		mutex_exit(&mpt->m_mutex);
1323 		goto fail;
1324 	}
1325 	mutex_exit(&mpt->m_mutex);
1326 
1327 
1328 	/* Check all dma handles allocated in attach */
1329 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1330 	    != DDI_SUCCESS) ||
1331 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1332 	    != DDI_SUCCESS) ||
1333 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1334 	    != DDI_SUCCESS) ||
1335 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1336 	    != DDI_SUCCESS) ||
1337 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1338 	    != DDI_SUCCESS)) {
1339 		goto fail;
1340 	}
1341 
1342 	/* Check all acc handles allocated in attach */
1343 	if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1344 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1345 	    != DDI_SUCCESS) ||
1346 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1347 	    != DDI_SUCCESS) ||
1348 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1349 	    != DDI_SUCCESS) ||
1350 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1351 	    != DDI_SUCCESS) ||
1352 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1353 	    != DDI_SUCCESS) ||
1354 	    (mptsas_check_acc_handle(mpt->m_config_handle)
1355 	    != DDI_SUCCESS)) {
1356 		goto fail;
1357 	}
1358 
1359 	/*
1360 	 * After this point, we are not going to fail the attach.
1361 	 */
1362 	/*
1363 	 * used for mptsas_watch
1364 	 */
1365 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
1366 	if (mptsas_head == NULL) {
1367 		mptsas_head = mpt;
1368 	} else {
1369 		mptsas_tail->m_next = mpt;
1370 	}
1371 	mptsas_tail = mpt;
1372 	rw_exit(&mptsas_global_rwlock);
1373 
1374 	mutex_enter(&mptsas_global_mutex);
1375 	if (mptsas_timeouts_enabled == 0) {
1376 		mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1377 		    dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1378 
1379 		mptsas_tick = mptsas_scsi_watchdog_tick *
1380 		    drv_usectohz((clock_t)1000000);
1381 
1382 		mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1383 		mptsas_timeouts_enabled = 1;
1384 	}
1385 	mutex_exit(&mptsas_global_mutex);
1386 
1387 	/* Print message of HBA present */
1388 	ddi_report_dev(dip);
1389 
1390 	/* report idle status to pm framework */
1391 	if (mpt->m_options & MPTSAS_OPT_PM) {
1392 		(void) pm_idle_component(dip, 0);
1393 	}
1394 
1395 	return (DDI_SUCCESS);
1396 
1397 fail:
1398 	mptsas_log(mpt, CE_WARN, "attach failed");
1399 	mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1400 	ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1401 	if (mpt) {
1402 		mutex_enter(&mptsas_global_mutex);
1403 
1404 		if (mptsas_timeout_id && (mptsas_head == NULL)) {
1405 			timeout_id_t tid = mptsas_timeout_id;
1406 			mptsas_timeouts_enabled = 0;
1407 			mptsas_timeout_id = 0;
1408 			mutex_exit(&mptsas_global_mutex);
1409 			(void) untimeout(tid);
1410 			mutex_enter(&mptsas_global_mutex);
1411 		}
1412 		mutex_exit(&mptsas_global_mutex);
1413 		/* deallocate in reverse order */
1414 		if (mpt->m_cache_frames) {
1415 			kmem_cache_destroy(mpt->m_cache_frames);
1416 		}
1417 		if (mpt->m_kmem_cache) {
1418 			kmem_cache_destroy(mpt->m_kmem_cache);
1419 		}
1420 		if (hba_attach_setup) {
1421 			(void) scsi_hba_detach(dip);
1422 		}
1423 		if (sas_attach_setup) {
1424 			(void) sas_hba_detach(dip);
1425 		}
1426 		if (intr_added) {
1427 			mptsas_rem_intrs(mpt);
1428 		}
1429 		if (doneq_thread_create) {
1430 			mutex_enter(&mpt->m_doneq_mutex);
1431 			doneq_thread_num = mpt->m_doneq_thread_n;
1432 			for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1433 				mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1434 				mpt->m_doneq_thread_id[j].flag &=
1435 				    (~MPTSAS_DONEQ_THREAD_ACTIVE);
1436 				cv_signal(&mpt->m_doneq_thread_id[j].cv);
1437 				mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1438 			}
1439 			while (mpt->m_doneq_thread_n) {
1440 				cv_wait(&mpt->m_doneq_thread_cv,
1441 				    &mpt->m_doneq_mutex);
1442 			}
1443 			for (j = 0; j < doneq_thread_num; j++) {
1444 				cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1445 				mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1446 			}
1447 			kmem_free(mpt->m_doneq_thread_id,
1448 			    sizeof (mptsas_doneq_thread_list_t)
1449 			    * doneq_thread_num);
1450 			mutex_exit(&mpt->m_doneq_mutex);
1451 			cv_destroy(&mpt->m_doneq_thread_cv);
1452 			mutex_destroy(&mpt->m_doneq_mutex);
1453 		}
1454 		if (event_taskq_create) {
1455 			ddi_taskq_destroy(mpt->m_event_taskq);
1456 		}
1457 		if (dr_taskq_create) {
1458 			ddi_taskq_destroy(mpt->m_dr_taskq);
1459 		}
1460 		if (mutex_init_done) {
1461 			mutex_destroy(&mpt->m_tx_waitq_mutex);
1462 			mutex_destroy(&mpt->m_mutex);
1463 			cv_destroy(&mpt->m_cv);
1464 			cv_destroy(&mpt->m_passthru_cv);
1465 			cv_destroy(&mpt->m_fw_cv);
1466 			cv_destroy(&mpt->m_config_cv);
1467 		}
1468 		mptsas_free_handshake_msg(mpt);
1469 		mptsas_hba_fini(mpt);
1470 		if (map_setup) {
1471 			mptsas_cfg_fini(mpt);
1472 		}
1473 		if (config_setup) {
1474 			pci_config_teardown(&mpt->m_config_handle);
1475 		}
1476 		if (mpt->m_tran) {
1477 			scsi_hba_tran_free(mpt->m_tran);
1478 			mpt->m_tran = NULL;
1479 		}
1480 		if (mpt->m_smptran) {
1481 			sas_hba_tran_free(mpt->m_smptran);
1482 			mpt->m_smptran = NULL;
1483 		}
1484 		mptsas_fm_fini(mpt);
1485 		ddi_soft_state_free(mptsas_state, instance);
1486 		ddi_prop_remove_all(dip);
1487 	}
1488 	return (DDI_FAILURE);
1489 }
1490 
1491 static int
1492 mptsas_suspend(dev_info_t *devi)
1493 {
1494 	mptsas_t	*mpt, *g;
1495 	scsi_hba_tran_t	*tran;
1496 
1497 	if (scsi_hba_iport_unit_address(devi)) {
1498 		return (DDI_SUCCESS);
1499 	}
1500 
1501 	if ((tran = ddi_get_driver_private(devi)) == NULL)
1502 		return (DDI_SUCCESS);
1503 
1504 	mpt = TRAN2MPT(tran);
1505 	if (!mpt) {
1506 		return (DDI_SUCCESS);
1507 	}
1508 
1509 	mutex_enter(&mpt->m_mutex);
1510 
1511 	if (mpt->m_suspended++) {
1512 		mutex_exit(&mpt->m_mutex);
1513 		return (DDI_SUCCESS);
1514 	}
1515 
1516 	/*
1517 	 * Cancel timeout threads for this mpt
1518 	 */
1519 	if (mpt->m_quiesce_timeid) {
1520 		timeout_id_t tid = mpt->m_quiesce_timeid;
1521 		mpt->m_quiesce_timeid = 0;
1522 		mutex_exit(&mpt->m_mutex);
1523 		(void) untimeout(tid);
1524 		mutex_enter(&mpt->m_mutex);
1525 	}
1526 
1527 	if (mpt->m_restart_cmd_timeid) {
1528 		timeout_id_t tid = mpt->m_restart_cmd_timeid;
1529 		mpt->m_restart_cmd_timeid = 0;
1530 		mutex_exit(&mpt->m_mutex);
1531 		(void) untimeout(tid);
1532 		mutex_enter(&mpt->m_mutex);
1533 	}
1534 
1535 	if (mpt->m_pm_timeid != 0) {
1536 		timeout_id_t tid = mpt->m_pm_timeid;
1537 		mpt->m_pm_timeid = 0;
1538 		mutex_exit(&mpt->m_mutex);
1539 		(void) untimeout(tid);
1540 		/*
1541 		 * Report idle status for last ioctl since
1542 		 * calls to pm_busy_component(9F) are stacked.
1543 		 */
1544 		(void) pm_idle_component(mpt->m_dip, 0);
1545 		mutex_enter(&mpt->m_mutex);
1546 	}
1547 	mutex_exit(&mpt->m_mutex);
1548 
1549 	/*
1550 	 * Cancel watch threads if all mpts suspended
1551 	 */
1552 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
1553 	for (g = mptsas_head; g != NULL; g = g->m_next) {
1554 		if (!g->m_suspended)
1555 			break;
1556 	}
1557 	rw_exit(&mptsas_global_rwlock);
1558 
1559 	mutex_enter(&mptsas_global_mutex);
1560 	if (g == NULL) {
1561 		timeout_id_t tid;
1562 
1563 		mptsas_timeouts_enabled = 0;
1564 		if (mptsas_timeout_id) {
1565 			tid = mptsas_timeout_id;
1566 			mptsas_timeout_id = 0;
1567 			mutex_exit(&mptsas_global_mutex);
1568 			(void) untimeout(tid);
1569 			mutex_enter(&mptsas_global_mutex);
1570 		}
1571 		if (mptsas_reset_watch) {
1572 			tid = mptsas_reset_watch;
1573 			mptsas_reset_watch = 0;
1574 			mutex_exit(&mptsas_global_mutex);
1575 			(void) untimeout(tid);
1576 			mutex_enter(&mptsas_global_mutex);
1577 		}
1578 	}
1579 	mutex_exit(&mptsas_global_mutex);
1580 
1581 	mutex_enter(&mpt->m_mutex);
1582 
1583 	/*
1584 	 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1585 	 */
1586 	if ((mpt->m_options & MPTSAS_OPT_PM) &&
1587 	    (mpt->m_power_level != PM_LEVEL_D0)) {
1588 		mutex_exit(&mpt->m_mutex);
1589 		return (DDI_SUCCESS);
1590 	}
1591 
1592 	/* Disable HBA interrupts in hardware */
1593 	MPTSAS_DISABLE_INTR(mpt);
1594 
1595 	mutex_exit(&mpt->m_mutex);
1596 
1597 	/* drain the taskq */
1598 	ddi_taskq_wait(mpt->m_event_taskq);
1599 	ddi_taskq_wait(mpt->m_dr_taskq);
1600 
1601 	return (DDI_SUCCESS);
1602 }
1603 
1604 /*
1605  * quiesce(9E) entry point.
1606  *
1607  * This function is called when the system is single-threaded at high
1608  * PIL with preemption disabled. Therefore, this function must not be
1609  * blocked.
1610  *
1611  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1612  * DDI_FAILURE indicates an error condition and should almost never happen.
1613  */
1614 #ifndef	__sparc
1615 static int
1616 mptsas_quiesce(dev_info_t *devi)
1617 {
1618 	mptsas_t	*mpt;
1619 	scsi_hba_tran_t *tran;
1620 
1621 	if ((tran = ddi_get_driver_private(devi)) == NULL)
1622 		return (DDI_SUCCESS);
1623 
1624 	if ((mpt = TRAN2MPT(tran)) == NULL)
1625 		return (DDI_SUCCESS);
1626 
1627 	/* Disable HBA interrupts in hardware */
1628 	MPTSAS_DISABLE_INTR(mpt);
1629 
1630 	return (DDI_SUCCESS);
1631 }
1632 #endif	/* __sparc */
1633 
1634 /*
1635  * detach(9E).	Remove all device allocations and system resources;
1636  * disable device interrupts.
1637  * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1638  */
1639 static int
1640 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1641 {
1642 	/* CONSTCOND */
1643 	ASSERT(NO_COMPETING_THREADS);
1644 	NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1645 
1646 	switch (cmd) {
1647 	case DDI_DETACH:
1648 		return (mptsas_do_detach(devi));
1649 
1650 	case DDI_SUSPEND:
1651 		return (mptsas_suspend(devi));
1652 
1653 	default:
1654 		return (DDI_FAILURE);
1655 	}
1656 	/* NOTREACHED */
1657 }
1658 
1659 static int
1660 mptsas_do_detach(dev_info_t *dip)
1661 {
1662 	mptsas_t	*mpt, *m;
1663 	scsi_hba_tran_t	*tran;
1664 	mptsas_slots_t	*active;
1665 	int		circ = 0;
1666 	int		circ1 = 0;
1667 	mdi_pathinfo_t	*pip = NULL;
1668 	int		i;
1669 	int		doneq_thread_num = 0;
1670 
1671 	NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1672 
1673 	if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1674 		return (DDI_FAILURE);
1675 
1676 	mpt = TRAN2MPT(tran);
1677 	if (!mpt) {
1678 		return (DDI_FAILURE);
1679 	}
1680 	/*
1681 	 * Still have pathinfo child, should not detach mpt driver
1682 	 */
1683 	if (scsi_hba_iport_unit_address(dip)) {
1684 		if (mpt->m_mpxio_enable) {
1685 			/*
1686 			 * MPxIO enabled for the iport
1687 			 */
1688 			ndi_devi_enter(scsi_vhci_dip, &circ1);
1689 			ndi_devi_enter(dip, &circ);
1690 			while (pip = mdi_get_next_client_path(dip, NULL)) {
1691 				if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1692 					continue;
1693 				}
1694 				ndi_devi_exit(dip, circ);
1695 				ndi_devi_exit(scsi_vhci_dip, circ1);
1696 				NDBG12(("detach failed because of "
1697 				    "outstanding path info"));
1698 				return (DDI_FAILURE);
1699 			}
1700 			ndi_devi_exit(dip, circ);
1701 			ndi_devi_exit(scsi_vhci_dip, circ1);
1702 			(void) mdi_phci_unregister(dip, 0);
1703 		}
1704 
1705 		ddi_prop_remove_all(dip);
1706 
1707 		return (DDI_SUCCESS);
1708 	}
1709 
1710 	/* Make sure power level is D0 before accessing registers */
1711 	if (mpt->m_options & MPTSAS_OPT_PM) {
1712 		(void) pm_busy_component(dip, 0);
1713 		if (mpt->m_power_level != PM_LEVEL_D0) {
1714 			if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1715 			    DDI_SUCCESS) {
1716 				mptsas_log(mpt, CE_WARN,
1717 				    "mptsas%d: Raise power request failed.",
1718 				    mpt->m_instance);
1719 				(void) pm_idle_component(dip, 0);
1720 				return (DDI_FAILURE);
1721 			}
1722 		}
1723 	}
1724 
1725 	mutex_enter(&mpt->m_mutex);
1726 	MPTSAS_DISABLE_INTR(mpt);
1727 	mutex_exit(&mpt->m_mutex);
1728 	mptsas_rem_intrs(mpt);
1729 	ddi_taskq_destroy(mpt->m_event_taskq);
1730 	ddi_taskq_destroy(mpt->m_dr_taskq);
1731 
1732 	if (mpt->m_doneq_thread_n) {
1733 		mutex_enter(&mpt->m_doneq_mutex);
1734 		doneq_thread_num = mpt->m_doneq_thread_n;
1735 		for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1736 			mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1737 			mpt->m_doneq_thread_id[i].flag &=
1738 			    (~MPTSAS_DONEQ_THREAD_ACTIVE);
1739 			cv_signal(&mpt->m_doneq_thread_id[i].cv);
1740 			mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1741 		}
1742 		while (mpt->m_doneq_thread_n) {
1743 			cv_wait(&mpt->m_doneq_thread_cv,
1744 			    &mpt->m_doneq_mutex);
1745 		}
1746 		for (i = 0;  i < doneq_thread_num; i++) {
1747 			cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1748 			mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1749 		}
1750 		kmem_free(mpt->m_doneq_thread_id,
1751 		    sizeof (mptsas_doneq_thread_list_t)
1752 		    * doneq_thread_num);
1753 		mutex_exit(&mpt->m_doneq_mutex);
1754 		cv_destroy(&mpt->m_doneq_thread_cv);
1755 		mutex_destroy(&mpt->m_doneq_mutex);
1756 	}
1757 
1758 	scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1759 
1760 	/*
1761 	 * Remove device instance from the global linked list
1762 	 */
1763 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
1764 	if (mptsas_head == mpt) {
1765 		m = mptsas_head = mpt->m_next;
1766 	} else {
1767 		for (m = mptsas_head; m != NULL; m = m->m_next) {
1768 			if (m->m_next == mpt) {
1769 				m->m_next = mpt->m_next;
1770 				break;
1771 			}
1772 		}
1773 		if (m == NULL) {
1774 			mptsas_log(mpt, CE_PANIC, "Not in softc list!");
1775 		}
1776 	}
1777 
1778 	if (mptsas_tail == mpt) {
1779 		mptsas_tail = m;
1780 	}
1781 	rw_exit(&mptsas_global_rwlock);
1782 
1783 	/*
1784 	 * Cancel timeout threads for this mpt
1785 	 */
1786 	mutex_enter(&mpt->m_mutex);
1787 	if (mpt->m_quiesce_timeid) {
1788 		timeout_id_t tid = mpt->m_quiesce_timeid;
1789 		mpt->m_quiesce_timeid = 0;
1790 		mutex_exit(&mpt->m_mutex);
1791 		(void) untimeout(tid);
1792 		mutex_enter(&mpt->m_mutex);
1793 	}
1794 
1795 	if (mpt->m_restart_cmd_timeid) {
1796 		timeout_id_t tid = mpt->m_restart_cmd_timeid;
1797 		mpt->m_restart_cmd_timeid = 0;
1798 		mutex_exit(&mpt->m_mutex);
1799 		(void) untimeout(tid);
1800 		mutex_enter(&mpt->m_mutex);
1801 	}
1802 
1803 	if (mpt->m_pm_timeid != 0) {
1804 		timeout_id_t tid = mpt->m_pm_timeid;
1805 		mpt->m_pm_timeid = 0;
1806 		mutex_exit(&mpt->m_mutex);
1807 		(void) untimeout(tid);
1808 		/*
1809 		 * Report idle status for last ioctl since
1810 		 * calls to pm_busy_component(9F) are stacked.
1811 		 */
1812 		(void) pm_idle_component(mpt->m_dip, 0);
1813 		mutex_enter(&mpt->m_mutex);
1814 	}
1815 	mutex_exit(&mpt->m_mutex);
1816 
1817 	/*
1818 	 * last mpt? ... if active, CANCEL watch threads.
1819 	 */
1820 	mutex_enter(&mptsas_global_mutex);
1821 	if (mptsas_head == NULL) {
1822 		timeout_id_t tid;
1823 		/*
1824 		 * Clear mptsas_timeouts_enable so that the watch thread
1825 		 * gets restarted on DDI_ATTACH
1826 		 */
1827 		mptsas_timeouts_enabled = 0;
1828 		if (mptsas_timeout_id) {
1829 			tid = mptsas_timeout_id;
1830 			mptsas_timeout_id = 0;
1831 			mutex_exit(&mptsas_global_mutex);
1832 			(void) untimeout(tid);
1833 			mutex_enter(&mptsas_global_mutex);
1834 		}
1835 		if (mptsas_reset_watch) {
1836 			tid = mptsas_reset_watch;
1837 			mptsas_reset_watch = 0;
1838 			mutex_exit(&mptsas_global_mutex);
1839 			(void) untimeout(tid);
1840 			mutex_enter(&mptsas_global_mutex);
1841 		}
1842 	}
1843 	mutex_exit(&mptsas_global_mutex);
1844 
1845 	/*
1846 	 * Delete nt_active.
1847 	 */
1848 	active = mpt->m_active;
1849 	mutex_enter(&mpt->m_mutex);
1850 	mptsas_hash_uninit(&active->m_smptbl, sizeof (mptsas_smp_t));
1851 	mutex_exit(&mpt->m_mutex);
1852 
1853 	if (active) {
1854 		kmem_free(active, active->m_size);
1855 		mpt->m_active = NULL;
1856 	}
1857 
1858 	/* deallocate everything that was allocated in mptsas_attach */
1859 	mptsas_fm_fini(mpt);
1860 	kmem_cache_destroy(mpt->m_cache_frames);
1861 	kmem_cache_destroy(mpt->m_kmem_cache);
1862 
1863 	(void) scsi_hba_detach(dip);
1864 	(void) sas_hba_detach(dip);
1865 	mptsas_free_handshake_msg(mpt);
1866 	mptsas_hba_fini(mpt);
1867 	mptsas_cfg_fini(mpt);
1868 
1869 	/* Lower the power informing PM Framework */
1870 	if (mpt->m_options & MPTSAS_OPT_PM) {
1871 		if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1872 			mptsas_log(mpt, CE_WARN,
1873 			    "!mptsas%d: Lower power request failed "
1874 			    "during detach, ignoring.",
1875 			    mpt->m_instance);
1876 	}
1877 
1878 	mutex_destroy(&mpt->m_tx_waitq_mutex);
1879 	mutex_destroy(&mpt->m_mutex);
1880 	cv_destroy(&mpt->m_cv);
1881 	cv_destroy(&mpt->m_passthru_cv);
1882 	cv_destroy(&mpt->m_fw_cv);
1883 	cv_destroy(&mpt->m_config_cv);
1884 
1885 	pci_config_teardown(&mpt->m_config_handle);
1886 	if (mpt->m_tran) {
1887 		scsi_hba_tran_free(mpt->m_tran);
1888 		mpt->m_tran = NULL;
1889 	}
1890 
1891 	if (mpt->m_smptran) {
1892 		sas_hba_tran_free(mpt->m_smptran);
1893 		mpt->m_smptran = NULL;
1894 	}
1895 
1896 	ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
1897 	ddi_prop_remove_all(dip);
1898 
1899 	return (DDI_SUCCESS);
1900 }
1901 
1902 static int
1903 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
1904 {
1905 	ddi_dma_attr_t		task_dma_attrs;
1906 	ddi_dma_cookie_t	tmp_dma_cookie;
1907 	size_t			alloc_len;
1908 	uint_t			ncookie;
1909 
1910 	/* allocate Task Management ddi_dma resources */
1911 	task_dma_attrs = mpt->m_msg_dma_attr;
1912 	task_dma_attrs.dma_attr_sgllen = 1;
1913 	task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
1914 
1915 	if (ddi_dma_alloc_handle(mpt->m_dip, &task_dma_attrs,
1916 	    DDI_DMA_SLEEP, NULL, &mpt->m_hshk_dma_hdl) != DDI_SUCCESS) {
1917 		mpt->m_hshk_dma_hdl = NULL;
1918 		return (DDI_FAILURE);
1919 	}
1920 
1921 	if (ddi_dma_mem_alloc(mpt->m_hshk_dma_hdl, alloc_size,
1922 	    &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1923 	    &mpt->m_hshk_memp, &alloc_len, &mpt->m_hshk_acc_hdl)
1924 	    != DDI_SUCCESS) {
1925 		ddi_dma_free_handle(&mpt->m_hshk_dma_hdl);
1926 		mpt->m_hshk_dma_hdl = NULL;
1927 		return (DDI_FAILURE);
1928 	}
1929 
1930 	if (ddi_dma_addr_bind_handle(mpt->m_hshk_dma_hdl, NULL,
1931 	    mpt->m_hshk_memp, alloc_len, (DDI_DMA_RDWR | DDI_DMA_CONSISTENT),
1932 	    DDI_DMA_SLEEP, NULL, &tmp_dma_cookie, &ncookie)
1933 	    != DDI_DMA_MAPPED) {
1934 		(void) ddi_dma_mem_free(&mpt->m_hshk_acc_hdl);
1935 		ddi_dma_free_handle(&mpt->m_hshk_dma_hdl);
1936 		mpt->m_hshk_dma_hdl = NULL;
1937 		return (DDI_FAILURE);
1938 	}
1939 	mpt->m_hshk_dma_size = alloc_size;
1940 	return (DDI_SUCCESS);
1941 }
1942 
1943 static void
1944 mptsas_free_handshake_msg(mptsas_t *mpt)
1945 {
1946 	if (mpt->m_hshk_dma_hdl != NULL) {
1947 		(void) ddi_dma_unbind_handle(mpt->m_hshk_dma_hdl);
1948 		(void) ddi_dma_mem_free(&mpt->m_hshk_acc_hdl);
1949 		ddi_dma_free_handle(&mpt->m_hshk_dma_hdl);
1950 		mpt->m_hshk_dma_hdl = NULL;
1951 		mpt->m_hshk_dma_size = 0;
1952 	}
1953 }
1954 
1955 static int
1956 mptsas_power(dev_info_t *dip, int component, int level)
1957 {
1958 #ifndef __lock_lint
1959 	_NOTE(ARGUNUSED(component))
1960 #endif
1961 	mptsas_t	*mpt;
1962 	int		rval = DDI_SUCCESS;
1963 	int		polls = 0;
1964 	uint32_t	ioc_status;
1965 
1966 	if (scsi_hba_iport_unit_address(dip) != 0)
1967 		return (DDI_SUCCESS);
1968 
1969 	mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
1970 	if (mpt == NULL) {
1971 		return (DDI_FAILURE);
1972 	}
1973 
1974 	mutex_enter(&mpt->m_mutex);
1975 
1976 	/*
1977 	 * If the device is busy, don't lower its power level
1978 	 */
1979 	if (mpt->m_busy && (mpt->m_power_level > level)) {
1980 		mutex_exit(&mpt->m_mutex);
1981 		return (DDI_FAILURE);
1982 	}
1983 
1984 	switch (level) {
1985 	case PM_LEVEL_D0:
1986 		NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
1987 		MPTSAS_POWER_ON(mpt);
1988 		/*
1989 		 * Wait up to 30 seconds for IOC to come out of reset.
1990 		 */
1991 		while (((ioc_status = ddi_get32(mpt->m_datap,
1992 		    &mpt->m_reg->Doorbell)) &
1993 		    MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
1994 			if (polls++ > 3000) {
1995 				break;
1996 			}
1997 			delay(drv_usectohz(10000));
1998 		}
1999 		/*
2000 		 * If IOC is not in operational state, try to hard reset it.
2001 		 */
2002 		if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2003 		    MPI2_IOC_STATE_OPERATIONAL) {
2004 			if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2005 				mptsas_log(mpt, CE_WARN,
2006 				    "mptsas_power: hard reset failed");
2007 				mutex_exit(&mpt->m_mutex);
2008 				return (DDI_FAILURE);
2009 			}
2010 		}
2011 		mpt->m_power_level = PM_LEVEL_D0;
2012 		break;
2013 	case PM_LEVEL_D3:
2014 		NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2015 		MPTSAS_POWER_OFF(mpt);
2016 		break;
2017 	default:
2018 		mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2019 		    mpt->m_instance, level);
2020 		rval = DDI_FAILURE;
2021 		break;
2022 	}
2023 	mutex_exit(&mpt->m_mutex);
2024 	return (rval);
2025 }
2026 
2027 /*
2028  * Initialize configuration space and figure out which
2029  * chip and revison of the chip the mpt driver is using.
2030  */
2031 int
2032 mptsas_config_space_init(mptsas_t *mpt)
2033 {
2034 	ushort_t	caps_ptr, cap, cap_count;
2035 
2036 	NDBG0(("mptsas_config_space_init"));
2037 
2038 	mptsas_setup_cmd_reg(mpt);
2039 
2040 	/*
2041 	 * Get the chip device id:
2042 	 */
2043 	mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2044 
2045 	/*
2046 	 * Save the revision.
2047 	 */
2048 	mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2049 
2050 	/*
2051 	 * Save the SubSystem Vendor and Device IDs
2052 	 */
2053 	mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2054 	mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2055 
2056 	/*
2057 	 * Set the latency timer to 0x40 as specified by the upa -> pci
2058 	 * bridge chip design team.  This may be done by the sparc pci
2059 	 * bus nexus driver, but the driver should make sure the latency
2060 	 * timer is correct for performance reasons.
2061 	 */
2062 	pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2063 	    MPTSAS_LATENCY_TIMER);
2064 
2065 	/*
2066 	 * Check if capabilities list is supported and if so,
2067 	 * get initial capabilities pointer and clear bits 0,1.
2068 	 */
2069 	if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
2070 	    & PCI_STAT_CAP) {
2071 		caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
2072 		    PCI_CONF_CAP_PTR), 4);
2073 	} else {
2074 		caps_ptr = PCI_CAP_NEXT_PTR_NULL;
2075 	}
2076 
2077 	/*
2078 	 * Walk capabilities if supported.
2079 	 */
2080 	for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
2081 
2082 		/*
2083 		 * Check that we haven't exceeded the maximum number of
2084 		 * capabilities and that the pointer is in a valid range.
2085 		 */
2086 		if (++cap_count > 48) {
2087 			mptsas_log(mpt, CE_WARN,
2088 			    "too many device capabilities.\n");
2089 			return (FALSE);
2090 		}
2091 		if (caps_ptr < 64) {
2092 			mptsas_log(mpt, CE_WARN,
2093 			    "capabilities pointer 0x%x out of range.\n",
2094 			    caps_ptr);
2095 			return (FALSE);
2096 		}
2097 
2098 		/*
2099 		 * Get next capability and check that it is valid.
2100 		 * For now, we only support power management.
2101 		 */
2102 		cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
2103 		switch (cap) {
2104 			case PCI_CAP_ID_PM:
2105 				mptsas_log(mpt, CE_NOTE,
2106 				    "?mptsas%d supports power management.\n",
2107 				    mpt->m_instance);
2108 				mpt->m_options |= MPTSAS_OPT_PM;
2109 
2110 				/* Save PMCSR offset */
2111 				mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
2112 				break;
2113 
2114 			/*
2115 			 * 0x5 is Message signaled interrupts and 0x7
2116 			 * is pci-x capable.  Both are unsupported for now
2117 			 * but supported by the 1030 chip so we don't
2118 			 * need to keep printing out the notice.
2119 			 * 0x10 is PCI-E support (1064E/1068E)
2120 			 * 0x11 is MSIX supported by the 1064/1068
2121 			 */
2122 			case 0x5:
2123 			case 0x7:
2124 			case 0x10:
2125 			case 0x11:
2126 				break;
2127 			default:
2128 				mptsas_log(mpt, CE_NOTE,
2129 				    "?mptsas%d unrecognized capability "
2130 				    "0x%x.\n", mpt->m_instance, cap);
2131 			break;
2132 		}
2133 
2134 		/*
2135 		 * Get next capabilities pointer and clear bits 0,1.
2136 		 */
2137 		caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
2138 		    (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
2139 	}
2140 
2141 	return (TRUE);
2142 }
2143 
2144 static void
2145 mptsas_setup_cmd_reg(mptsas_t *mpt)
2146 {
2147 	ushort_t	cmdreg;
2148 
2149 	/*
2150 	 * Set the command register to the needed values.
2151 	 */
2152 	cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2153 	cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2154 	    PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2155 	cmdreg &= ~PCI_COMM_IO;
2156 	pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2157 }
2158 
2159 static void
2160 mptsas_disable_bus_master(mptsas_t *mpt)
2161 {
2162 	ushort_t	cmdreg;
2163 
2164 	/*
2165 	 * Clear the master enable bit in the PCI command register.
2166 	 * This prevents any bus mastering activity like DMA.
2167 	 */
2168 	cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2169 	cmdreg &= ~PCI_COMM_ME;
2170 	pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2171 }
2172 
2173 int
2174 mptsas_passthru_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2175 {
2176 	ddi_dma_attr_t	attrs;
2177 	uint_t		ncookie;
2178 	size_t		alloc_len;
2179 
2180 	attrs = mpt->m_msg_dma_attr;
2181 	attrs.dma_attr_sgllen = 1;
2182 
2183 	ASSERT(dma_statep != NULL);
2184 
2185 	if (ddi_dma_alloc_handle(mpt->m_dip, &attrs,
2186 	    DDI_DMA_SLEEP, NULL, &dma_statep->handle) != DDI_SUCCESS) {
2187 		mptsas_log(mpt, CE_WARN,
2188 		    "unable to allocate dma handle.");
2189 		return (DDI_FAILURE);
2190 	}
2191 
2192 	if (ddi_dma_mem_alloc(dma_statep->handle, dma_statep->size,
2193 	    &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
2194 	    &dma_statep->memp, &alloc_len, &dma_statep->accessp) !=
2195 	    DDI_SUCCESS) {
2196 		ddi_dma_free_handle(&dma_statep->handle);
2197 		dma_statep->handle = NULL;
2198 		mptsas_log(mpt, CE_WARN,
2199 		    "unable to allocate memory for dma xfer.");
2200 		return (DDI_FAILURE);
2201 	}
2202 
2203 	if (ddi_dma_addr_bind_handle(dma_statep->handle, NULL, dma_statep->memp,
2204 	    alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2205 	    NULL, &dma_statep->cookie, &ncookie) != DDI_DMA_MAPPED) {
2206 		ddi_dma_mem_free(&dma_statep->accessp);
2207 		dma_statep->accessp = NULL;
2208 		ddi_dma_free_handle(&dma_statep->handle);
2209 		dma_statep->handle = NULL;
2210 		mptsas_log(mpt, CE_WARN, "unable to bind DMA resources.");
2211 		return (DDI_FAILURE);
2212 	}
2213 	return (DDI_SUCCESS);
2214 }
2215 
2216 void
2217 mptsas_passthru_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2218 {
2219 	ASSERT(dma_statep != NULL);
2220 	if (dma_statep->handle != NULL) {
2221 		(void) ddi_dma_unbind_handle(dma_statep->handle);
2222 		(void) ddi_dma_mem_free(&dma_statep->accessp);
2223 		ddi_dma_free_handle(&dma_statep->handle);
2224 	}
2225 }
2226 
2227 int
2228 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2229 {
2230 	ddi_dma_attr_t		attrs;
2231 	ddi_dma_handle_t	dma_handle;
2232 	caddr_t			memp;
2233 	uint_t			ncookie;
2234 	ddi_dma_cookie_t	cookie;
2235 	ddi_acc_handle_t	accessp;
2236 	size_t			alloc_len;
2237 	int			rval;
2238 
2239 	ASSERT(mutex_owned(&mpt->m_mutex));
2240 
2241 	attrs = mpt->m_msg_dma_attr;
2242 	attrs.dma_attr_sgllen = 1;
2243 	attrs.dma_attr_granular = size;
2244 
2245 	if (ddi_dma_alloc_handle(mpt->m_dip, &attrs,
2246 	    DDI_DMA_SLEEP, NULL, &dma_handle) != DDI_SUCCESS) {
2247 		mptsas_log(mpt, CE_WARN,
2248 		    "unable to allocate dma handle.");
2249 		return (DDI_FAILURE);
2250 	}
2251 
2252 	if (ddi_dma_mem_alloc(dma_handle, size,
2253 	    &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
2254 	    &memp, &alloc_len, &accessp) != DDI_SUCCESS) {
2255 		ddi_dma_free_handle(&dma_handle);
2256 		mptsas_log(mpt, CE_WARN,
2257 		    "unable to allocate request structure.");
2258 		return (DDI_FAILURE);
2259 	}
2260 
2261 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, memp,
2262 	    alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2263 	    NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2264 		(void) ddi_dma_mem_free(&accessp);
2265 		ddi_dma_free_handle(&dma_handle);
2266 		mptsas_log(mpt, CE_WARN, "unable to bind DMA resources.");
2267 		return (DDI_FAILURE);
2268 	}
2269 
2270 	rval = (*callback) (mpt, memp, var, accessp);
2271 
2272 	if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2273 	    (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2274 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2275 		rval = DDI_FAILURE;
2276 	}
2277 
2278 	if (dma_handle != NULL) {
2279 		(void) ddi_dma_unbind_handle(dma_handle);
2280 		(void) ddi_dma_mem_free(&accessp);
2281 		ddi_dma_free_handle(&dma_handle);
2282 	}
2283 
2284 	return (rval);
2285 
2286 }
2287 
2288 static int
2289 mptsas_alloc_request_frames(mptsas_t *mpt)
2290 {
2291 	ddi_dma_attr_t		frame_dma_attrs;
2292 	caddr_t			memp;
2293 	uint_t			ncookie;
2294 	ddi_dma_cookie_t	cookie;
2295 	size_t			alloc_len;
2296 	size_t			mem_size;
2297 
2298 	/*
2299 	 * The size of the request frame pool is:
2300 	 *   Number of Request Frames * Request Frame Size
2301 	 */
2302 	mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2303 
2304 	/*
2305 	 * set the DMA attributes.  System Request Message Frames must be
2306 	 * aligned on a 16-byte boundry.
2307 	 */
2308 	frame_dma_attrs = mpt->m_msg_dma_attr;
2309 	frame_dma_attrs.dma_attr_align = 16;
2310 	frame_dma_attrs.dma_attr_sgllen = 1;
2311 
2312 	/*
2313 	 * allocate the request frame pool.
2314 	 */
2315 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attrs,
2316 	    DDI_DMA_SLEEP, NULL, &mpt->m_dma_req_frame_hdl) != DDI_SUCCESS) {
2317 		mptsas_log(mpt, CE_WARN,
2318 		    "Unable to allocate dma handle.");
2319 		return (DDI_FAILURE);
2320 	}
2321 
2322 	if (ddi_dma_mem_alloc(mpt->m_dma_req_frame_hdl,
2323 	    mem_size, &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2324 	    NULL, (caddr_t *)&memp, &alloc_len, &mpt->m_acc_req_frame_hdl)
2325 	    != DDI_SUCCESS) {
2326 		ddi_dma_free_handle(&mpt->m_dma_req_frame_hdl);
2327 		mpt->m_dma_req_frame_hdl = NULL;
2328 		mptsas_log(mpt, CE_WARN,
2329 		    "Unable to allocate request frames.");
2330 		return (DDI_FAILURE);
2331 	}
2332 
2333 	if (ddi_dma_addr_bind_handle(mpt->m_dma_req_frame_hdl, NULL,
2334 	    memp, alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2335 	    DDI_DMA_SLEEP, NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2336 		(void) ddi_dma_mem_free(&mpt->m_acc_req_frame_hdl);
2337 		ddi_dma_free_handle(&mpt->m_dma_req_frame_hdl);
2338 		mpt->m_dma_req_frame_hdl = NULL;
2339 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources.");
2340 		return (DDI_FAILURE);
2341 	}
2342 
2343 	/*
2344 	 * Store the request frame memory address.  This chip uses this
2345 	 * address to dma to and from the driver's frame.  The second
2346 	 * address is the address mpt uses to fill in the frame.
2347 	 */
2348 	mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2349 	mpt->m_req_frame = memp;
2350 
2351 	/*
2352 	 * Clear the request frame pool.
2353 	 */
2354 	bzero(mpt->m_req_frame, alloc_len);
2355 
2356 	return (DDI_SUCCESS);
2357 }
2358 
2359 static int
2360 mptsas_alloc_reply_frames(mptsas_t *mpt)
2361 {
2362 	ddi_dma_attr_t		frame_dma_attrs;
2363 	caddr_t			memp;
2364 	uint_t			ncookie;
2365 	ddi_dma_cookie_t	cookie;
2366 	size_t			alloc_len;
2367 	size_t			mem_size;
2368 
2369 	/*
2370 	 * The size of the reply frame pool is:
2371 	 *   Number of Reply Frames * Reply Frame Size
2372 	 */
2373 	mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2374 
2375 	/*
2376 	 * set the DMA attributes.   System Reply Message Frames must be
2377 	 * aligned on a 4-byte boundry.  This is the default.
2378 	 */
2379 	frame_dma_attrs = mpt->m_msg_dma_attr;
2380 	frame_dma_attrs.dma_attr_sgllen = 1;
2381 
2382 	/*
2383 	 * allocate the reply frame pool
2384 	 */
2385 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attrs,
2386 	    DDI_DMA_SLEEP, NULL, &mpt->m_dma_reply_frame_hdl) != DDI_SUCCESS) {
2387 		mptsas_log(mpt, CE_WARN,
2388 		    "Unable to allocate dma handle.");
2389 		return (DDI_FAILURE);
2390 	}
2391 
2392 	if (ddi_dma_mem_alloc(mpt->m_dma_reply_frame_hdl,
2393 	    mem_size, &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2394 	    NULL, (caddr_t *)&memp, &alloc_len, &mpt->m_acc_reply_frame_hdl)
2395 	    != DDI_SUCCESS) {
2396 		ddi_dma_free_handle(&mpt->m_dma_reply_frame_hdl);
2397 		mpt->m_dma_reply_frame_hdl = NULL;
2398 		mptsas_log(mpt, CE_WARN,
2399 		    "Unable to allocate reply frames.");
2400 		return (DDI_FAILURE);
2401 	}
2402 
2403 	if (ddi_dma_addr_bind_handle(mpt->m_dma_reply_frame_hdl, NULL,
2404 	    memp, alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2405 	    DDI_DMA_SLEEP, NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2406 		(void) ddi_dma_mem_free(&mpt->m_acc_reply_frame_hdl);
2407 		ddi_dma_free_handle(&mpt->m_dma_reply_frame_hdl);
2408 		mpt->m_dma_reply_frame_hdl = NULL;
2409 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources.");
2410 		return (DDI_FAILURE);
2411 	}
2412 
2413 	/*
2414 	 * Store the reply frame memory address.  This chip uses this
2415 	 * address to dma to and from the driver's frame.  The second
2416 	 * address is the address mpt uses to process the frame.
2417 	 */
2418 	mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2419 	mpt->m_reply_frame = memp;
2420 
2421 	/*
2422 	 * Clear the reply frame pool.
2423 	 */
2424 	bzero(mpt->m_reply_frame, alloc_len);
2425 
2426 	return (DDI_SUCCESS);
2427 }
2428 
2429 static int
2430 mptsas_alloc_free_queue(mptsas_t *mpt)
2431 {
2432 	ddi_dma_attr_t		frame_dma_attrs;
2433 	caddr_t			memp;
2434 	uint_t			ncookie;
2435 	ddi_dma_cookie_t	cookie;
2436 	size_t			alloc_len;
2437 	size_t			mem_size;
2438 
2439 	/*
2440 	 * The reply free queue size is:
2441 	 *   Reply Free Queue Depth * 4
2442 	 * The "4" is the size of one 32 bit address (low part of 64-bit
2443 	 *   address)
2444 	 */
2445 	mem_size = mpt->m_free_queue_depth * 4;
2446 
2447 	/*
2448 	 * set the DMA attributes  The Reply Free Queue must be aligned on a
2449 	 * 16-byte boundry.
2450 	 */
2451 	frame_dma_attrs = mpt->m_msg_dma_attr;
2452 	frame_dma_attrs.dma_attr_align = 16;
2453 	frame_dma_attrs.dma_attr_sgllen = 1;
2454 
2455 	/*
2456 	 * allocate the reply free queue
2457 	 */
2458 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attrs,
2459 	    DDI_DMA_SLEEP, NULL, &mpt->m_dma_free_queue_hdl) != DDI_SUCCESS) {
2460 		mptsas_log(mpt, CE_WARN,
2461 		    "Unable to allocate dma handle.");
2462 		return (DDI_FAILURE);
2463 	}
2464 
2465 	if (ddi_dma_mem_alloc(mpt->m_dma_free_queue_hdl,
2466 	    mem_size, &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2467 	    NULL, (caddr_t *)&memp, &alloc_len, &mpt->m_acc_free_queue_hdl)
2468 	    != DDI_SUCCESS) {
2469 		ddi_dma_free_handle(&mpt->m_dma_free_queue_hdl);
2470 		mpt->m_dma_free_queue_hdl = NULL;
2471 		mptsas_log(mpt, CE_WARN,
2472 		    "Unable to allocate free queue.");
2473 		return (DDI_FAILURE);
2474 	}
2475 
2476 	if (ddi_dma_addr_bind_handle(mpt->m_dma_free_queue_hdl, NULL,
2477 	    memp, alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2478 	    DDI_DMA_SLEEP, NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2479 		(void) ddi_dma_mem_free(&mpt->m_acc_free_queue_hdl);
2480 		ddi_dma_free_handle(&mpt->m_dma_free_queue_hdl);
2481 		mpt->m_dma_free_queue_hdl = NULL;
2482 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources.");
2483 		return (DDI_FAILURE);
2484 	}
2485 
2486 	/*
2487 	 * Store the reply free queue memory address.  This chip uses this
2488 	 * address to read from the reply free queue.  The second address
2489 	 * is the address mpt uses to manage the queue.
2490 	 */
2491 	mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2492 	mpt->m_free_queue = memp;
2493 
2494 	/*
2495 	 * Clear the reply free queue memory.
2496 	 */
2497 	bzero(mpt->m_free_queue, alloc_len);
2498 
2499 	return (DDI_SUCCESS);
2500 }
2501 
2502 static int
2503 mptsas_alloc_post_queue(mptsas_t *mpt)
2504 {
2505 	ddi_dma_attr_t		frame_dma_attrs;
2506 	caddr_t			memp;
2507 	uint_t			ncookie;
2508 	ddi_dma_cookie_t	cookie;
2509 	size_t			alloc_len;
2510 	size_t			mem_size;
2511 
2512 	/*
2513 	 * The reply descriptor post queue size is:
2514 	 *   Reply Descriptor Post Queue Depth * 8
2515 	 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2516 	 */
2517 	mem_size = mpt->m_post_queue_depth * 8;
2518 
2519 	/*
2520 	 * set the DMA attributes.  The Reply Descriptor Post Queue must be
2521 	 * aligned on a 16-byte boundry.
2522 	 */
2523 	frame_dma_attrs = mpt->m_msg_dma_attr;
2524 	frame_dma_attrs.dma_attr_align = 16;
2525 	frame_dma_attrs.dma_attr_sgllen = 1;
2526 
2527 	/*
2528 	 * allocate the reply post queue
2529 	 */
2530 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attrs,
2531 	    DDI_DMA_SLEEP, NULL, &mpt->m_dma_post_queue_hdl) != DDI_SUCCESS) {
2532 		mptsas_log(mpt, CE_WARN,
2533 		    "Unable to allocate dma handle.");
2534 		return (DDI_FAILURE);
2535 	}
2536 
2537 	if (ddi_dma_mem_alloc(mpt->m_dma_post_queue_hdl,
2538 	    mem_size, &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2539 	    NULL, (caddr_t *)&memp, &alloc_len, &mpt->m_acc_post_queue_hdl)
2540 	    != DDI_SUCCESS) {
2541 		ddi_dma_free_handle(&mpt->m_dma_post_queue_hdl);
2542 		mpt->m_dma_post_queue_hdl = NULL;
2543 		mptsas_log(mpt, CE_WARN,
2544 		    "Unable to allocate post queue.");
2545 		return (DDI_FAILURE);
2546 	}
2547 
2548 	if (ddi_dma_addr_bind_handle(mpt->m_dma_post_queue_hdl, NULL,
2549 	    memp, alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2550 	    DDI_DMA_SLEEP, NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2551 		(void) ddi_dma_mem_free(&mpt->m_acc_post_queue_hdl);
2552 		ddi_dma_free_handle(&mpt->m_dma_post_queue_hdl);
2553 		mpt->m_dma_post_queue_hdl = NULL;
2554 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources.");
2555 		return (DDI_FAILURE);
2556 	}
2557 
2558 	/*
2559 	 * Store the reply descriptor post queue memory address.  This chip
2560 	 * uses this address to write to the reply descriptor post queue.  The
2561 	 * second address is the address mpt uses to manage the queue.
2562 	 */
2563 	mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2564 	mpt->m_post_queue = memp;
2565 
2566 	/*
2567 	 * Clear the reply post queue memory.
2568 	 */
2569 	bzero(mpt->m_post_queue, alloc_len);
2570 
2571 	return (DDI_SUCCESS);
2572 }
2573 
2574 static int
2575 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2576 {
2577 	mptsas_cache_frames_t	*frames = NULL;
2578 	if (cmd->cmd_extra_frames == NULL) {
2579 		frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2580 		if (frames == NULL) {
2581 			return (DDI_FAILURE);
2582 		}
2583 		cmd->cmd_extra_frames = frames;
2584 	}
2585 	return (DDI_SUCCESS);
2586 }
2587 
2588 static void
2589 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2590 {
2591 	if (cmd->cmd_extra_frames) {
2592 		kmem_cache_free(mpt->m_cache_frames,
2593 		    (void *)cmd->cmd_extra_frames);
2594 		cmd->cmd_extra_frames = NULL;
2595 	}
2596 }
2597 
2598 static void
2599 mptsas_cfg_fini(mptsas_t *mpt)
2600 {
2601 	NDBG0(("mptsas_cfg_fini"));
2602 	ddi_regs_map_free(&mpt->m_datap);
2603 }
2604 
2605 static void
2606 mptsas_hba_fini(mptsas_t *mpt)
2607 {
2608 	NDBG0(("mptsas_hba_fini"));
2609 
2610 	/*
2611 	 * Disable any bus mastering ability (i.e: DMA) prior to freeing any
2612 	 * allocated DMA resources.
2613 	 */
2614 	if (mpt->m_config_handle != NULL)
2615 		mptsas_disable_bus_master(mpt);
2616 
2617 	/*
2618 	 * Free up any allocated memory
2619 	 */
2620 	if (mpt->m_dma_req_frame_hdl != NULL) {
2621 		(void) ddi_dma_unbind_handle(mpt->m_dma_req_frame_hdl);
2622 		ddi_dma_mem_free(&mpt->m_acc_req_frame_hdl);
2623 		ddi_dma_free_handle(&mpt->m_dma_req_frame_hdl);
2624 		mpt->m_dma_req_frame_hdl = NULL;
2625 	}
2626 
2627 	if (mpt->m_dma_reply_frame_hdl != NULL) {
2628 		(void) ddi_dma_unbind_handle(mpt->m_dma_reply_frame_hdl);
2629 		ddi_dma_mem_free(&mpt->m_acc_reply_frame_hdl);
2630 		ddi_dma_free_handle(&mpt->m_dma_reply_frame_hdl);
2631 		mpt->m_dma_reply_frame_hdl = NULL;
2632 	}
2633 
2634 	if (mpt->m_dma_free_queue_hdl != NULL) {
2635 		(void) ddi_dma_unbind_handle(mpt->m_dma_free_queue_hdl);
2636 		ddi_dma_mem_free(&mpt->m_acc_free_queue_hdl);
2637 		ddi_dma_free_handle(&mpt->m_dma_free_queue_hdl);
2638 		mpt->m_dma_free_queue_hdl = NULL;
2639 	}
2640 
2641 	if (mpt->m_dma_post_queue_hdl != NULL) {
2642 		(void) ddi_dma_unbind_handle(mpt->m_dma_post_queue_hdl);
2643 		ddi_dma_mem_free(&mpt->m_acc_post_queue_hdl);
2644 		ddi_dma_free_handle(&mpt->m_dma_post_queue_hdl);
2645 		mpt->m_dma_post_queue_hdl = NULL;
2646 	}
2647 
2648 	if (mpt->m_replyh_args != NULL) {
2649 		kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2650 		    * mpt->m_max_replies);
2651 	}
2652 }
2653 
2654 static int
2655 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2656 {
2657 	int		lun = 0;
2658 	char		*sas_wwn = NULL;
2659 	int		phynum = -1;
2660 	int		reallen = 0;
2661 
2662 	/* Get the target num */
2663 	lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2664 	    LUN_PROP, 0);
2665 
2666 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2667 	    "target-port", &sas_wwn) == DDI_PROP_SUCCESS) {
2668 		/*
2669 		 * Stick in the address of the form "wWWN,LUN"
2670 		 */
2671 		reallen = snprintf(name, len, "w%s,%x", sas_wwn, lun);
2672 		ddi_prop_free(sas_wwn);
2673 	} else if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2674 	    DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2675 		/*
2676 		 * Stick in the address of form "pPHY,LUN"
2677 		 */
2678 		reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2679 	} else {
2680 		return (DDI_FAILURE);
2681 	}
2682 
2683 	ASSERT(reallen < len);
2684 	if (reallen >= len) {
2685 		mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2686 		    "length too small, it needs to be %d bytes", reallen + 1);
2687 	}
2688 	return (DDI_SUCCESS);
2689 }
2690 
2691 /*
2692  * tran_tgt_init(9E) - target device instance initialization
2693  */
2694 static int
2695 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2696     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2697 {
2698 #ifndef __lock_lint
2699 	_NOTE(ARGUNUSED(hba_tran))
2700 #endif
2701 
2702 	/*
2703 	 * At this point, the scsi_device structure already exists
2704 	 * and has been initialized.
2705 	 *
2706 	 * Use this function to allocate target-private data structures,
2707 	 * if needed by this HBA.  Add revised flow-control and queue
2708 	 * properties for child here, if desired and if you can tell they
2709 	 * support tagged queueing by now.
2710 	 */
2711 	mptsas_t		*mpt;
2712 	int			lun = sd->sd_address.a_lun;
2713 	mdi_pathinfo_t		*pip = NULL;
2714 	mptsas_tgt_private_t	*tgt_private = NULL;
2715 	mptsas_target_t		*ptgt = NULL;
2716 	char			*psas_wwn = NULL;
2717 	int			phymask = 0;
2718 	uint64_t		sas_wwn = 0;
2719 	mpt = SDEV2MPT(sd);
2720 
2721 	ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2722 
2723 	NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2724 	    (void *)hba_dip, (void *)tgt_dip, lun));
2725 
2726 	if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2727 		(void) ndi_merge_node(tgt_dip, mptsas_name_child);
2728 		ddi_set_name_addr(tgt_dip, NULL);
2729 		return (DDI_FAILURE);
2730 	}
2731 	/*
2732 	 * phymask is 0 means the virtual port for RAID
2733 	 */
2734 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2735 	    "phymask", 0);
2736 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2737 		if ((pip = (void *)(sd->sd_private)) == NULL) {
2738 			/*
2739 			 * Very bad news if this occurs. Somehow scsi_vhci has
2740 			 * lost the pathinfo node for this target.
2741 			 */
2742 			return (DDI_NOT_WELL_FORMED);
2743 		}
2744 
2745 		if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2746 		    DDI_PROP_SUCCESS) {
2747 			mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2748 			return (DDI_FAILURE);
2749 		}
2750 
2751 		if (mdi_prop_lookup_string(pip, "target-port", &psas_wwn) ==
2752 		    MDI_SUCCESS) {
2753 			if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2754 				sas_wwn = 0;
2755 			}
2756 			(void) mdi_prop_free(psas_wwn);
2757 		}
2758 	} else {
2759 		lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2760 		    DDI_PROP_DONTPASS, LUN_PROP, 0);
2761 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
2762 		    DDI_PROP_DONTPASS, "target-port", &psas_wwn) ==
2763 		    DDI_PROP_SUCCESS) {
2764 			if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2765 				sas_wwn = 0;
2766 			}
2767 			ddi_prop_free(psas_wwn);
2768 		} else {
2769 			sas_wwn = 0;
2770 		}
2771 	}
2772 	ASSERT((sas_wwn != 0) || (phymask != 0));
2773 	mutex_enter(&mpt->m_mutex);
2774 	ptgt = mptsas_hash_search(&mpt->m_active->m_tgttbl, sas_wwn, phymask);
2775 	mutex_exit(&mpt->m_mutex);
2776 	if (ptgt == NULL) {
2777 		mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2778 		    "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2779 		    sas_wwn);
2780 		return (DDI_FAILURE);
2781 	}
2782 	if (hba_tran->tran_tgt_private == NULL) {
2783 		tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2784 		    KM_SLEEP);
2785 		tgt_private->t_lun = lun;
2786 		tgt_private->t_private = ptgt;
2787 		hba_tran->tran_tgt_private = tgt_private;
2788 	}
2789 
2790 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2791 		return (DDI_SUCCESS);
2792 	}
2793 	mutex_enter(&mpt->m_mutex);
2794 
2795 	if (ptgt->m_deviceinfo &
2796 	    (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2797 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2798 		uchar_t *inq89 = NULL;
2799 		int inq89_len = 0x238;
2800 		int reallen = 0;
2801 		int rval = 0;
2802 		struct sata_id *sid = NULL;
2803 		char model[SATA_ID_MODEL_LEN + 1];
2804 		char fw[SATA_ID_FW_LEN + 1];
2805 		char *vid, *pid;
2806 		int i;
2807 
2808 		mutex_exit(&mpt->m_mutex);
2809 		/*
2810 		 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2811 		 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2812 		 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2813 		 */
2814 		inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2815 		rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2816 		    inq89, inq89_len, &reallen, 1);
2817 
2818 		if (rval != 0) {
2819 			if (inq89 != NULL) {
2820 				kmem_free(inq89, inq89_len);
2821 			}
2822 
2823 			mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2824 			    "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2825 			return (DDI_SUCCESS);
2826 		}
2827 		sid = (void *)(&inq89[60]);
2828 
2829 		swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2830 		swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2831 
2832 		model[SATA_ID_MODEL_LEN] = 0;
2833 		fw[SATA_ID_FW_LEN] = 0;
2834 
2835 		/*
2836 		 * split model into into vid/pid
2837 		 */
2838 		for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2839 			if ((*pid == ' ') || (*pid == '\t'))
2840 				break;
2841 		if (i < SATA_ID_MODEL_LEN) {
2842 			vid = model;
2843 			/*
2844 			 * terminate vid, establish pid
2845 			 */
2846 			*pid++ = 0;
2847 		} else {
2848 			/*
2849 			 * vid will stay "ATA     ", the rule is same
2850 			 * as sata framework implementation.
2851 			 */
2852 			vid = NULL;
2853 			/*
2854 			 * model is all pid
2855 			 */
2856 			pid = model;
2857 		}
2858 
2859 		/*
2860 		 * override SCSA "inquiry-*" properties
2861 		 */
2862 		if (vid)
2863 			(void) scsi_hba_prop_update_inqstring(sd,
2864 			    INQUIRY_VENDOR_ID, vid, strlen(vid));
2865 		if (pid)
2866 			(void) scsi_hba_prop_update_inqstring(sd,
2867 			    INQUIRY_PRODUCT_ID, pid, strlen(pid));
2868 		(void) scsi_hba_prop_update_inqstring(sd,
2869 		    INQUIRY_REVISION_ID, fw, strlen(fw));
2870 
2871 		if (inq89 != NULL) {
2872 			kmem_free(inq89, inq89_len);
2873 		}
2874 	} else {
2875 		mutex_exit(&mpt->m_mutex);
2876 	}
2877 
2878 	return (DDI_SUCCESS);
2879 }
2880 /*
2881  * tran_tgt_free(9E) - target device instance deallocation
2882  */
2883 static void
2884 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2885     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2886 {
2887 #ifndef __lock_lint
2888 	_NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
2889 #endif
2890 
2891 	mptsas_tgt_private_t	*tgt_private = hba_tran->tran_tgt_private;
2892 
2893 	if (tgt_private != NULL) {
2894 		kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
2895 		hba_tran->tran_tgt_private = NULL;
2896 	}
2897 }
2898 
2899 /*
2900  * scsi_pkt handling
2901  *
2902  * Visible to the external world via the transport structure.
2903  */
2904 
2905 /*
2906  * Notes:
2907  *	- transport the command to the addressed SCSI target/lun device
2908  *	- normal operation is to schedule the command to be transported,
2909  *	  and return TRAN_ACCEPT if this is successful.
2910  *	- if NO_INTR, tran_start must poll device for command completion
2911  */
2912 static int
2913 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
2914 {
2915 #ifndef __lock_lint
2916 	_NOTE(ARGUNUSED(ap))
2917 #endif
2918 	mptsas_t	*mpt = PKT2MPT(pkt);
2919 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
2920 	int		rval;
2921 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
2922 
2923 	NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
2924 	ASSERT(ptgt);
2925 	if (ptgt == NULL)
2926 		return (TRAN_FATAL_ERROR);
2927 
2928 	/*
2929 	 * prepare the pkt before taking mutex.
2930 	 */
2931 	rval = mptsas_prepare_pkt(cmd);
2932 	if (rval != TRAN_ACCEPT) {
2933 		return (rval);
2934 	}
2935 
2936 	/*
2937 	 * Send the command to target/lun, however your HBA requires it.
2938 	 * If busy, return TRAN_BUSY; if there's some other formatting error
2939 	 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
2940 	 * return of TRAN_ACCEPT.
2941 	 *
2942 	 * Remember that access to shared resources, including the mptsas_t
2943 	 * data structure and the HBA hardware registers, must be protected
2944 	 * with mutexes, here and everywhere.
2945 	 *
2946 	 * Also remember that at interrupt time, you'll get an argument
2947 	 * to the interrupt handler which is a pointer to your mptsas_t
2948 	 * structure; you'll have to remember which commands are outstanding
2949 	 * and which scsi_pkt is the currently-running command so the
2950 	 * interrupt handler can refer to the pkt to set completion
2951 	 * status, call the target driver back through pkt_comp, etc.
2952 	 *
2953 	 * If the instance lock is held by other thread, don't spin to wait
2954 	 * for it. Instead, queue the cmd and next time when the instance lock
2955 	 * is not held, accept all the queued cmd. A extra tx_waitq is
2956 	 * introduced to protect the queue.
2957 	 *
2958 	 * The polled cmd will not be queud and accepted as usual.
2959 	 *
2960 	 * Under the tx_waitq mutex, record whether a thread is draining
2961 	 * the tx_waitq.  An IO requesting thread that finds the instance
2962 	 * mutex contended appends to the tx_waitq and while holding the
2963 	 * tx_wait mutex, if the draining flag is not set, sets it and then
2964 	 * proceeds to spin for the instance mutex. This scheme ensures that
2965 	 * the last cmd in a burst be processed.
2966 	 *
2967 	 * we enable this feature only when the helper threads are enabled,
2968 	 * at which we think the loads are heavy.
2969 	 *
2970 	 * per instance mutex m_tx_waitq_mutex is introduced to protect the
2971 	 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
2972 	 */
2973 
2974 	if (mpt->m_doneq_thread_n) {
2975 		if (mutex_tryenter(&mpt->m_mutex) != 0) {
2976 			rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
2977 			mutex_exit(&mpt->m_mutex);
2978 		} else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
2979 			mutex_enter(&mpt->m_mutex);
2980 			rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
2981 			mutex_exit(&mpt->m_mutex);
2982 		} else {
2983 			mutex_enter(&mpt->m_tx_waitq_mutex);
2984 			/*
2985 			 * ptgt->m_dr_flag is protected by m_mutex or
2986 			 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
2987 			 * is acquired.
2988 			 */
2989 			if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
2990 				if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
2991 					/*
2992 					 * The command should be allowed to
2993 					 * retry by returning TRAN_BUSY to
2994 					 * to stall the I/O's which come from
2995 					 * scsi_vhci since the device/path is
2996 					 * in unstable state now.
2997 					 */
2998 					mutex_exit(&mpt->m_tx_waitq_mutex);
2999 					return (TRAN_BUSY);
3000 				} else {
3001 					/*
3002 					 * The device is offline, just fail the
3003 					 * command by returning
3004 					 * TRAN_FATAL_ERROR.
3005 					 */
3006 					mutex_exit(&mpt->m_tx_waitq_mutex);
3007 					return (TRAN_FATAL_ERROR);
3008 				}
3009 			}
3010 			if (mpt->m_tx_draining) {
3011 				cmd->cmd_flags |= CFLAG_TXQ;
3012 				*mpt->m_tx_waitqtail = cmd;
3013 				mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3014 				mutex_exit(&mpt->m_tx_waitq_mutex);
3015 			} else { /* drain the queue */
3016 				mpt->m_tx_draining = 1;
3017 				mutex_exit(&mpt->m_tx_waitq_mutex);
3018 				mutex_enter(&mpt->m_mutex);
3019 				rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3020 				mutex_exit(&mpt->m_mutex);
3021 			}
3022 		}
3023 	} else {
3024 		mutex_enter(&mpt->m_mutex);
3025 		/*
3026 		 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3027 		 * in this case, m_mutex is acquired.
3028 		 */
3029 		if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3030 			if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3031 				/*
3032 				 * commands should be allowed to retry by
3033 				 * returning TRAN_BUSY to stall the I/O's
3034 				 * which come from scsi_vhci since the device/
3035 				 * path is in unstable state now.
3036 				 */
3037 				mutex_exit(&mpt->m_mutex);
3038 				return (TRAN_BUSY);
3039 			} else {
3040 				/*
3041 				 * The device is offline, just fail the
3042 				 * command by returning TRAN_FATAL_ERROR.
3043 				 */
3044 				mutex_exit(&mpt->m_mutex);
3045 				return (TRAN_FATAL_ERROR);
3046 			}
3047 		}
3048 		rval = mptsas_accept_pkt(mpt, cmd);
3049 		mutex_exit(&mpt->m_mutex);
3050 	}
3051 
3052 	return (rval);
3053 }
3054 
3055 /*
3056  * Accept all the queued cmds(if any) before accept the current one.
3057  */
3058 static int
3059 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3060 {
3061 	int rval;
3062 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3063 
3064 	ASSERT(mutex_owned(&mpt->m_mutex));
3065 	/*
3066 	 * The call to mptsas_accept_tx_waitq() must always be performed
3067 	 * because that is where mpt->m_tx_draining is cleared.
3068 	 */
3069 	mutex_enter(&mpt->m_tx_waitq_mutex);
3070 	mptsas_accept_tx_waitq(mpt);
3071 	mutex_exit(&mpt->m_tx_waitq_mutex);
3072 	/*
3073 	 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3074 	 * in this case, m_mutex is acquired.
3075 	 */
3076 	if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3077 		if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3078 			/*
3079 			 * The command should be allowed to retry by returning
3080 			 * TRAN_BUSY to stall the I/O's which come from
3081 			 * scsi_vhci since the device/path is in unstable state
3082 			 * now.
3083 			 */
3084 			return (TRAN_BUSY);
3085 		} else {
3086 			/*
3087 			 * The device is offline, just fail the command by
3088 			 * return TRAN_FATAL_ERROR.
3089 			 */
3090 			return (TRAN_FATAL_ERROR);
3091 		}
3092 	}
3093 	rval = mptsas_accept_pkt(mpt, cmd);
3094 
3095 	return (rval);
3096 }
3097 
3098 static int
3099 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3100 {
3101 	int		rval = TRAN_ACCEPT;
3102 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3103 
3104 	NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3105 
3106 	ASSERT(mutex_owned(&mpt->m_mutex));
3107 
3108 	if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3109 		rval = mptsas_prepare_pkt(cmd);
3110 		if (rval != TRAN_ACCEPT) {
3111 			cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3112 			return (rval);
3113 		}
3114 	}
3115 
3116 	/*
3117 	 * reset the throttle if we were draining
3118 	 */
3119 	if ((ptgt->m_t_ncmds == 0) &&
3120 	    (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3121 		NDBG23(("reset throttle"));
3122 		ASSERT(ptgt->m_reset_delay == 0);
3123 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3124 	}
3125 
3126 	/*
3127 	 * If device handle has already been invalidated, just
3128 	 * fail the command. In theory, command from scsi_vhci
3129 	 * client is impossible send down command with invalid
3130 	 * devhdl since devhdl is set after path offline, target
3131 	 * driver is not suppose to select a offlined path.
3132 	 */
3133 	if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3134 		NDBG20(("rejecting command, it might because invalid devhdl "
3135 		    "request."));
3136 		mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3137 		if (cmd->cmd_flags & CFLAG_TXQ) {
3138 			mptsas_doneq_add(mpt, cmd);
3139 			mptsas_doneq_empty(mpt);
3140 			return (rval);
3141 		} else {
3142 			return (TRAN_FATAL_ERROR);
3143 		}
3144 	}
3145 	/*
3146 	 * The first case is the normal case.  mpt gets a command from the
3147 	 * target driver and starts it.
3148 	 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3149 	 * commands is m_max_requests - 2.
3150 	 */
3151 	if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3152 	    (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3153 	    (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3154 	    (ptgt->m_reset_delay == 0) &&
3155 	    (ptgt->m_t_nwait == 0) &&
3156 	    ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3157 		if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3158 			(void) mptsas_start_cmd(mpt, cmd);
3159 		} else {
3160 			mptsas_waitq_add(mpt, cmd);
3161 		}
3162 	} else {
3163 		/*
3164 		 * Add this pkt to the work queue
3165 		 */
3166 		mptsas_waitq_add(mpt, cmd);
3167 
3168 		if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3169 			(void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3170 
3171 			/*
3172 			 * Only flush the doneq if this is not a TM
3173 			 * cmd.  For TM cmds the flushing of the
3174 			 * doneq will be done in those routines.
3175 			 */
3176 			if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3177 				mptsas_doneq_empty(mpt);
3178 			}
3179 		}
3180 	}
3181 	return (rval);
3182 }
3183 
3184 int
3185 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3186 {
3187 	mptsas_slots_t	*slots;
3188 	int		slot;
3189 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3190 
3191 	ASSERT(mutex_owned(&mpt->m_mutex));
3192 	slots = mpt->m_active;
3193 
3194 	/*
3195 	 * Account for reserved TM request slot and reserved SMID of 0.
3196 	 */
3197 	ASSERT(slots->m_n_slots == (mpt->m_max_requests - 2));
3198 
3199 	/*
3200 	 * m_tags is equivalent to the SMID when sending requests.  Since the
3201 	 * SMID cannot be 0, start out at one if rolling over past the size
3202 	 * of the request queue depth.  Also, don't use the last SMID, which is
3203 	 * reserved for TM requests.
3204 	 */
3205 	slot = (slots->m_tags)++;
3206 	if (slots->m_tags > slots->m_n_slots) {
3207 		slots->m_tags = 1;
3208 	}
3209 
3210 alloc_tag:
3211 	/* Validate tag, should never fail. */
3212 	if (slots->m_slot[slot] == NULL) {
3213 		/*
3214 		 * Make sure SMID is not using reserved value of 0
3215 		 * and the TM request slot.
3216 		 */
3217 		ASSERT((slot > 0) && (slot <= slots->m_n_slots));
3218 		cmd->cmd_slot = slot;
3219 		slots->m_slot[slot] = cmd;
3220 		mpt->m_ncmds++;
3221 
3222 		/*
3223 		 * only increment per target ncmds if this is not a
3224 		 * command that has no target associated with it (i.e. a
3225 		 * event acknoledgment)
3226 		 */
3227 		if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3228 			ptgt->m_t_ncmds++;
3229 		}
3230 		cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3231 
3232 		return (TRUE);
3233 	} else {
3234 		int i;
3235 
3236 		/*
3237 		 * If slot in use, scan until a free one is found. Don't use 0
3238 		 * or final slot, which is reserved for TM requests.
3239 		 */
3240 		for (i = 0; i < slots->m_n_slots; i++) {
3241 			slot = slots->m_tags;
3242 			if (++(slots->m_tags) > slots->m_n_slots) {
3243 				slots->m_tags = 1;
3244 			}
3245 			if (slots->m_slot[slot] == NULL) {
3246 				NDBG22(("found free slot %d", slot));
3247 				goto alloc_tag;
3248 			}
3249 		}
3250 	}
3251 	return (FALSE);
3252 }
3253 
3254 /*
3255  * prepare the pkt:
3256  * the pkt may have been resubmitted or just reused so
3257  * initialize some fields and do some checks.
3258  */
3259 static int
3260 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3261 {
3262 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
3263 
3264 	NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3265 
3266 	/*
3267 	 * Reinitialize some fields that need it; the packet may
3268 	 * have been resubmitted
3269 	 */
3270 	pkt->pkt_reason = CMD_CMPLT;
3271 	pkt->pkt_state = 0;
3272 	pkt->pkt_statistics = 0;
3273 	pkt->pkt_resid = 0;
3274 	cmd->cmd_age = 0;
3275 	cmd->cmd_pkt_flags = pkt->pkt_flags;
3276 
3277 	/*
3278 	 * zero status byte.
3279 	 */
3280 	*(pkt->pkt_scbp) = 0;
3281 
3282 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
3283 		pkt->pkt_resid = cmd->cmd_dmacount;
3284 
3285 		/*
3286 		 * consistent packets need to be sync'ed first
3287 		 * (only for data going out)
3288 		 */
3289 		if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3290 		    (cmd->cmd_flags & CFLAG_DMASEND)) {
3291 			(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3292 			    DDI_DMA_SYNC_FORDEV);
3293 		}
3294 	}
3295 
3296 	cmd->cmd_flags =
3297 	    (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3298 	    CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3299 
3300 	return (TRAN_ACCEPT);
3301 }
3302 
3303 /*
3304  * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3305  *
3306  * One of three possibilities:
3307  *	- allocate scsi_pkt
3308  *	- allocate scsi_pkt and DMA resources
3309  *	- allocate DMA resources to an already-allocated pkt
3310  */
3311 static struct scsi_pkt *
3312 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3313     struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3314     int (*callback)(), caddr_t arg)
3315 {
3316 	mptsas_cmd_t		*cmd, *new_cmd;
3317 	mptsas_t		*mpt = ADDR2MPT(ap);
3318 	int			failure = 1;
3319 	uint_t			oldcookiec;
3320 	mptsas_target_t		*ptgt = NULL;
3321 	int			rval;
3322 	mptsas_tgt_private_t	*tgt_private;
3323 	int			kf;
3324 
3325 	kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3326 
3327 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3328 	    tran_tgt_private;
3329 	ASSERT(tgt_private != NULL);
3330 	if (tgt_private == NULL) {
3331 		return (NULL);
3332 	}
3333 	ptgt = tgt_private->t_private;
3334 	ASSERT(ptgt != NULL);
3335 	if (ptgt == NULL)
3336 		return (NULL);
3337 	ap->a_target = ptgt->m_devhdl;
3338 	ap->a_lun = tgt_private->t_lun;
3339 
3340 	ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3341 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3342 	statuslen *= 100; tgtlen *= 4;
3343 #endif
3344 	NDBG3(("mptsas_scsi_init_pkt:\n"
3345 	    "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3346 	    ap->a_target, (void *)pkt, (void *)bp,
3347 	    cmdlen, statuslen, tgtlen, flags));
3348 
3349 	/*
3350 	 * Allocate the new packet.
3351 	 */
3352 	if (pkt == NULL) {
3353 		ddi_dma_handle_t	save_dma_handle;
3354 		ddi_dma_handle_t	save_arq_dma_handle;
3355 		struct buf		*save_arq_bp;
3356 		ddi_dma_cookie_t	save_arqcookie;
3357 
3358 		cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3359 
3360 		if (cmd) {
3361 			save_dma_handle = cmd->cmd_dmahandle;
3362 			save_arq_dma_handle = cmd->cmd_arqhandle;
3363 			save_arq_bp = cmd->cmd_arq_buf;
3364 			save_arqcookie = cmd->cmd_arqcookie;
3365 			bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3366 			cmd->cmd_dmahandle = save_dma_handle;
3367 			cmd->cmd_arqhandle = save_arq_dma_handle;
3368 			cmd->cmd_arq_buf = save_arq_bp;
3369 			cmd->cmd_arqcookie = save_arqcookie;
3370 
3371 			pkt = (void *)((uchar_t *)cmd +
3372 			    sizeof (struct mptsas_cmd));
3373 			pkt->pkt_ha_private = (opaque_t)cmd;
3374 			pkt->pkt_address = *ap;
3375 			pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3376 			pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3377 			pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3378 			cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3379 			cmd->cmd_cdblen = (uchar_t)cmdlen;
3380 			cmd->cmd_scblen = statuslen;
3381 			cmd->cmd_rqslen = SENSE_LENGTH;
3382 			cmd->cmd_tgt_addr = ptgt;
3383 			failure = 0;
3384 		}
3385 
3386 		if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3387 		    (tgtlen > PKT_PRIV_LEN) ||
3388 		    (statuslen > EXTCMDS_STATUS_SIZE)) {
3389 			if (failure == 0) {
3390 				/*
3391 				 * if extern alloc fails, all will be
3392 				 * deallocated, including cmd
3393 				 */
3394 				failure = mptsas_pkt_alloc_extern(mpt, cmd,
3395 				    cmdlen, tgtlen, statuslen, kf);
3396 			}
3397 			if (failure) {
3398 				/*
3399 				 * if extern allocation fails, it will
3400 				 * deallocate the new pkt as well
3401 				 */
3402 				return (NULL);
3403 			}
3404 		}
3405 		new_cmd = cmd;
3406 
3407 	} else {
3408 		cmd = PKT2CMD(pkt);
3409 		new_cmd = NULL;
3410 	}
3411 
3412 
3413 	/* grab cmd->cmd_cookiec here as oldcookiec */
3414 
3415 	oldcookiec = cmd->cmd_cookiec;
3416 
3417 	/*
3418 	 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3419 	 * greater than 0 and we'll need to grab the next dma window
3420 	 */
3421 	/*
3422 	 * SLM-not doing extra command frame right now; may add later
3423 	 */
3424 
3425 	if (cmd->cmd_nwin > 0) {
3426 
3427 		/*
3428 		 * Make sure we havn't gone past the the total number
3429 		 * of windows
3430 		 */
3431 		if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3432 			return (NULL);
3433 		}
3434 		if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3435 		    &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3436 		    &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3437 			return (NULL);
3438 		}
3439 		goto get_dma_cookies;
3440 	}
3441 
3442 
3443 	if (flags & PKT_XARQ) {
3444 		cmd->cmd_flags |= CFLAG_XARQ;
3445 	}
3446 
3447 	/*
3448 	 * DMA resource allocation.  This version assumes your
3449 	 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3450 	 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3451 	 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3452 	 */
3453 	if (bp && (bp->b_bcount != 0) &&
3454 	    (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3455 
3456 		int	cnt, dma_flags;
3457 		mptti_t	*dmap;		/* ptr to the S/G list */
3458 
3459 		/*
3460 		 * Set up DMA memory and position to the next DMA segment.
3461 		 */
3462 		ASSERT(cmd->cmd_dmahandle != NULL);
3463 
3464 		if (bp->b_flags & B_READ) {
3465 			dma_flags = DDI_DMA_READ;
3466 			cmd->cmd_flags &= ~CFLAG_DMASEND;
3467 		} else {
3468 			dma_flags = DDI_DMA_WRITE;
3469 			cmd->cmd_flags |= CFLAG_DMASEND;
3470 		}
3471 		if (flags & PKT_CONSISTENT) {
3472 			cmd->cmd_flags |= CFLAG_CMDIOPB;
3473 			dma_flags |= DDI_DMA_CONSISTENT;
3474 		}
3475 
3476 		if (flags & PKT_DMA_PARTIAL) {
3477 			dma_flags |= DDI_DMA_PARTIAL;
3478 		}
3479 
3480 		/*
3481 		 * workaround for byte hole issue on psycho and
3482 		 * schizo pre 2.1
3483 		 */
3484 		if ((bp->b_flags & B_READ) && ((bp->b_flags &
3485 		    (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3486 		    ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3487 			dma_flags |= DDI_DMA_CONSISTENT;
3488 		}
3489 
3490 		rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3491 		    dma_flags, callback, arg,
3492 		    &cmd->cmd_cookie, &cmd->cmd_cookiec);
3493 		if (rval == DDI_DMA_PARTIAL_MAP) {
3494 			(void) ddi_dma_numwin(cmd->cmd_dmahandle,
3495 			    &cmd->cmd_nwin);
3496 			cmd->cmd_winindex = 0;
3497 			(void) ddi_dma_getwin(cmd->cmd_dmahandle,
3498 			    cmd->cmd_winindex, &cmd->cmd_dma_offset,
3499 			    &cmd->cmd_dma_len, &cmd->cmd_cookie,
3500 			    &cmd->cmd_cookiec);
3501 		} else if (rval && (rval != DDI_DMA_MAPPED)) {
3502 			switch (rval) {
3503 			case DDI_DMA_NORESOURCES:
3504 				bioerror(bp, 0);
3505 				break;
3506 			case DDI_DMA_BADATTR:
3507 			case DDI_DMA_NOMAPPING:
3508 				bioerror(bp, EFAULT);
3509 				break;
3510 			case DDI_DMA_TOOBIG:
3511 			default:
3512 				bioerror(bp, EINVAL);
3513 				break;
3514 			}
3515 			cmd->cmd_flags &= ~CFLAG_DMAVALID;
3516 			if (new_cmd) {
3517 				mptsas_scsi_destroy_pkt(ap, pkt);
3518 			}
3519 			return ((struct scsi_pkt *)NULL);
3520 		}
3521 
3522 get_dma_cookies:
3523 		cmd->cmd_flags |= CFLAG_DMAVALID;
3524 		ASSERT(cmd->cmd_cookiec > 0);
3525 
3526 		if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3527 			mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3528 			    cmd->cmd_cookiec);
3529 			bioerror(bp, EINVAL);
3530 			if (new_cmd) {
3531 				mptsas_scsi_destroy_pkt(ap, pkt);
3532 			}
3533 			return ((struct scsi_pkt *)NULL);
3534 		}
3535 
3536 		/*
3537 		 * Allocate extra SGL buffer if needed.
3538 		 */
3539 		if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3540 		    (cmd->cmd_extra_frames == NULL)) {
3541 			if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3542 			    DDI_FAILURE) {
3543 				mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3544 				    "failed");
3545 				bioerror(bp, ENOMEM);
3546 				if (new_cmd) {
3547 					mptsas_scsi_destroy_pkt(ap, pkt);
3548 				}
3549 				return ((struct scsi_pkt *)NULL);
3550 			}
3551 		}
3552 
3553 		/*
3554 		 * Always use scatter-gather transfer
3555 		 * Use the loop below to store physical addresses of
3556 		 * DMA segments, from the DMA cookies, into your HBA's
3557 		 * scatter-gather list.
3558 		 * We need to ensure we have enough kmem alloc'd
3559 		 * for the sg entries since we are no longer using an
3560 		 * array inside mptsas_cmd_t.
3561 		 *
3562 		 * We check cmd->cmd_cookiec against oldcookiec so
3563 		 * the scatter-gather list is correctly allocated
3564 		 */
3565 
3566 		if (oldcookiec != cmd->cmd_cookiec) {
3567 			if (cmd->cmd_sg != (mptti_t *)NULL) {
3568 				kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3569 				    oldcookiec);
3570 				cmd->cmd_sg = NULL;
3571 			}
3572 		}
3573 
3574 		if (cmd->cmd_sg == (mptti_t *)NULL) {
3575 			cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3576 			    cmd->cmd_cookiec), kf);
3577 
3578 			if (cmd->cmd_sg == (mptti_t *)NULL) {
3579 				mptsas_log(mpt, CE_WARN,
3580 				    "unable to kmem_alloc enough memory "
3581 				    "for scatter/gather list");
3582 		/*
3583 		 * if we have an ENOMEM condition we need to behave
3584 		 * the same way as the rest of this routine
3585 		 */
3586 
3587 				bioerror(bp, ENOMEM);
3588 				if (new_cmd) {
3589 					mptsas_scsi_destroy_pkt(ap, pkt);
3590 				}
3591 				return ((struct scsi_pkt *)NULL);
3592 			}
3593 		}
3594 
3595 		dmap = cmd->cmd_sg;
3596 
3597 		ASSERT(cmd->cmd_cookie.dmac_size != 0);
3598 
3599 		/*
3600 		 * store the first segment into the S/G list
3601 		 */
3602 		dmap->count = cmd->cmd_cookie.dmac_size;
3603 		dmap->addr.address64.Low = (uint32_t)
3604 		    (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3605 		dmap->addr.address64.High = (uint32_t)
3606 		    (cmd->cmd_cookie.dmac_laddress >> 32);
3607 
3608 		/*
3609 		 * dmacount counts the size of the dma for this window
3610 		 * (if partial dma is being used).  totaldmacount
3611 		 * keeps track of the total amount of dma we have
3612 		 * transferred for all the windows (needed to calculate
3613 		 * the resid value below).
3614 		 */
3615 		cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3616 		cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3617 
3618 		/*
3619 		 * We already stored the first DMA scatter gather segment,
3620 		 * start at 1 if we need to store more.
3621 		 */
3622 		for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3623 			/*
3624 			 * Get next DMA cookie
3625 			 */
3626 			ddi_dma_nextcookie(cmd->cmd_dmahandle,
3627 			    &cmd->cmd_cookie);
3628 			dmap++;
3629 
3630 			cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3631 			cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3632 
3633 			/*
3634 			 * store the segment parms into the S/G list
3635 			 */
3636 			dmap->count = cmd->cmd_cookie.dmac_size;
3637 			dmap->addr.address64.Low = (uint32_t)
3638 			    (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3639 			dmap->addr.address64.High = (uint32_t)
3640 			    (cmd->cmd_cookie.dmac_laddress >> 32);
3641 		}
3642 
3643 		/*
3644 		 * If this was partially allocated we set the resid
3645 		 * the amount of data NOT transferred in this window
3646 		 * If there is only one window, the resid will be 0
3647 		 */
3648 		pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3649 		NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount));
3650 	}
3651 	return (pkt);
3652 }
3653 
3654 /*
3655  * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3656  *
3657  * Notes:
3658  *	- also frees DMA resources if allocated
3659  *	- implicit DMA synchonization
3660  */
3661 static void
3662 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3663 {
3664 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
3665 	mptsas_t	*mpt = ADDR2MPT(ap);
3666 
3667 	NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3668 	    ap->a_target, (void *)pkt));
3669 
3670 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
3671 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3672 		cmd->cmd_flags &= ~CFLAG_DMAVALID;
3673 	}
3674 
3675 	if (cmd->cmd_sg) {
3676 		kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3677 		cmd->cmd_sg = NULL;
3678 	}
3679 
3680 	mptsas_free_extra_sgl_frame(mpt, cmd);
3681 
3682 	if ((cmd->cmd_flags &
3683 	    (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3684 	    CFLAG_SCBEXTERN)) == 0) {
3685 		cmd->cmd_flags = CFLAG_FREE;
3686 		kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3687 	} else {
3688 		mptsas_pkt_destroy_extern(mpt, cmd);
3689 	}
3690 }
3691 
3692 /*
3693  * kmem cache constructor and destructor:
3694  * When constructing, we bzero the cmd and allocate the dma handle
3695  * When destructing, just free the dma handle
3696  */
3697 static int
3698 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3699 {
3700 	mptsas_cmd_t		*cmd = buf;
3701 	mptsas_t		*mpt  = cdrarg;
3702 	struct scsi_address	ap;
3703 	uint_t			cookiec;
3704 	ddi_dma_attr_t		arq_dma_attr;
3705 	int			(*callback)(caddr_t);
3706 
3707 	callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3708 
3709 	NDBG4(("mptsas_kmem_cache_constructor"));
3710 
3711 	ap.a_hba_tran = mpt->m_tran;
3712 	ap.a_target = 0;
3713 	ap.a_lun = 0;
3714 
3715 	/*
3716 	 * allocate a dma handle
3717 	 */
3718 	if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3719 	    NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3720 		cmd->cmd_dmahandle = NULL;
3721 		return (-1);
3722 	}
3723 
3724 	cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3725 	    SENSE_LENGTH, B_READ, callback, NULL);
3726 	if (cmd->cmd_arq_buf == NULL) {
3727 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
3728 		cmd->cmd_dmahandle = NULL;
3729 		return (-1);
3730 	}
3731 
3732 	/*
3733 	 * allocate a arq handle
3734 	 */
3735 	arq_dma_attr = mpt->m_msg_dma_attr;
3736 	arq_dma_attr.dma_attr_sgllen = 1;
3737 	if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3738 	    NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3739 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
3740 		scsi_free_consistent_buf(cmd->cmd_arq_buf);
3741 		cmd->cmd_dmahandle = NULL;
3742 		cmd->cmd_arqhandle = NULL;
3743 		return (-1);
3744 	}
3745 
3746 	if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3747 	    cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3748 	    callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3749 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
3750 		ddi_dma_free_handle(&cmd->cmd_arqhandle);
3751 		scsi_free_consistent_buf(cmd->cmd_arq_buf);
3752 		cmd->cmd_dmahandle = NULL;
3753 		cmd->cmd_arqhandle = NULL;
3754 		cmd->cmd_arq_buf = NULL;
3755 		return (-1);
3756 	}
3757 
3758 	return (0);
3759 }
3760 
3761 static void
3762 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3763 {
3764 #ifndef __lock_lint
3765 	_NOTE(ARGUNUSED(cdrarg))
3766 #endif
3767 	mptsas_cmd_t	*cmd = buf;
3768 
3769 	NDBG4(("mptsas_kmem_cache_destructor"));
3770 
3771 	if (cmd->cmd_arqhandle) {
3772 		(void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3773 		ddi_dma_free_handle(&cmd->cmd_arqhandle);
3774 		cmd->cmd_arqhandle = NULL;
3775 	}
3776 	if (cmd->cmd_arq_buf) {
3777 		scsi_free_consistent_buf(cmd->cmd_arq_buf);
3778 		cmd->cmd_arq_buf = NULL;
3779 	}
3780 	if (cmd->cmd_dmahandle) {
3781 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
3782 		cmd->cmd_dmahandle = NULL;
3783 	}
3784 }
3785 
3786 static int
3787 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3788 {
3789 	mptsas_cache_frames_t	*p = buf;
3790 	mptsas_t		*mpt = cdrarg;
3791 	ddi_dma_attr_t		frame_dma_attr;
3792 	size_t			mem_size, alloc_len;
3793 	ddi_dma_cookie_t	cookie;
3794 	uint_t			ncookie;
3795 	int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3796 	    ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3797 
3798 	frame_dma_attr = mpt->m_msg_dma_attr;
3799 	frame_dma_attr.dma_attr_align = 0x10;
3800 	frame_dma_attr.dma_attr_sgllen = 1;
3801 
3802 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3803 	    &p->m_dma_hdl) != DDI_SUCCESS) {
3804 		mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3805 		    " extra SGL.");
3806 		return (DDI_FAILURE);
3807 	}
3808 
3809 	mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3810 
3811 	if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3812 	    DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3813 	    &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3814 		ddi_dma_free_handle(&p->m_dma_hdl);
3815 		p->m_dma_hdl = NULL;
3816 		mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3817 		    " extra SGL.");
3818 		return (DDI_FAILURE);
3819 	}
3820 
3821 	if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3822 	    alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3823 	    &cookie, &ncookie) != DDI_DMA_MAPPED) {
3824 		(void) ddi_dma_mem_free(&p->m_acc_hdl);
3825 		ddi_dma_free_handle(&p->m_dma_hdl);
3826 		p->m_dma_hdl = NULL;
3827 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3828 		    " extra SGL");
3829 		return (DDI_FAILURE);
3830 	}
3831 
3832 	/*
3833 	 * Store the SGL memory address.  This chip uses this
3834 	 * address to dma to and from the driver.  The second
3835 	 * address is the address mpt uses to fill in the SGL.
3836 	 */
3837 	p->m_phys_addr = cookie.dmac_address;
3838 
3839 	return (DDI_SUCCESS);
3840 }
3841 
3842 static void
3843 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
3844 {
3845 #ifndef __lock_lint
3846 	_NOTE(ARGUNUSED(cdrarg))
3847 #endif
3848 	mptsas_cache_frames_t	*p = buf;
3849 	if (p->m_dma_hdl != NULL) {
3850 		(void) ddi_dma_unbind_handle(p->m_dma_hdl);
3851 		(void) ddi_dma_mem_free(&p->m_acc_hdl);
3852 		ddi_dma_free_handle(&p->m_dma_hdl);
3853 		p->m_phys_addr = NULL;
3854 		p->m_frames_addr = NULL;
3855 		p->m_dma_hdl = NULL;
3856 		p->m_acc_hdl = NULL;
3857 	}
3858 
3859 }
3860 
3861 /*
3862  * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
3863  * for non-standard length cdb, pkt_private, status areas
3864  * if allocation fails, then deallocate all external space and the pkt
3865  */
3866 /* ARGSUSED */
3867 static int
3868 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
3869     int cmdlen, int tgtlen, int statuslen, int kf)
3870 {
3871 	caddr_t			cdbp, scbp, tgt;
3872 	int			(*callback)(caddr_t) = (kf == KM_SLEEP) ?
3873 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
3874 	struct scsi_address	ap;
3875 	size_t			senselength;
3876 	ddi_dma_attr_t		ext_arq_dma_attr;
3877 	uint_t			cookiec;
3878 
3879 	NDBG3(("mptsas_pkt_alloc_extern: "
3880 	    "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
3881 	    (void *)cmd, cmdlen, tgtlen, statuslen, kf));
3882 
3883 	tgt = cdbp = scbp = NULL;
3884 	cmd->cmd_scblen		= statuslen;
3885 	cmd->cmd_privlen	= (uchar_t)tgtlen;
3886 
3887 	if (cmdlen > sizeof (cmd->cmd_cdb)) {
3888 		if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
3889 			goto fail;
3890 		}
3891 		cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
3892 		cmd->cmd_flags |= CFLAG_CDBEXTERN;
3893 	}
3894 	if (tgtlen > PKT_PRIV_LEN) {
3895 		if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
3896 			goto fail;
3897 		}
3898 		cmd->cmd_flags |= CFLAG_PRIVEXTERN;
3899 		cmd->cmd_pkt->pkt_private = tgt;
3900 	}
3901 	if (statuslen > EXTCMDS_STATUS_SIZE) {
3902 		if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
3903 			goto fail;
3904 		}
3905 		cmd->cmd_flags |= CFLAG_SCBEXTERN;
3906 		cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
3907 
3908 		/* allocate sense data buf for DMA */
3909 
3910 		senselength = statuslen - MPTSAS_GET_ITEM_OFF(
3911 		    struct scsi_arq_status, sts_sensedata);
3912 		cmd->cmd_rqslen = (uchar_t)senselength;
3913 
3914 		ap.a_hba_tran = mpt->m_tran;
3915 		ap.a_target = 0;
3916 		ap.a_lun = 0;
3917 
3918 		cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
3919 		    (struct buf *)NULL, senselength, B_READ,
3920 		    callback, NULL);
3921 
3922 		if (cmd->cmd_ext_arq_buf == NULL) {
3923 			goto fail;
3924 		}
3925 		/*
3926 		 * allocate a extern arq handle and bind the buf
3927 		 */
3928 		ext_arq_dma_attr = mpt->m_msg_dma_attr;
3929 		ext_arq_dma_attr.dma_attr_sgllen = 1;
3930 		if ((ddi_dma_alloc_handle(mpt->m_dip,
3931 		    &ext_arq_dma_attr, callback,
3932 		    NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
3933 			goto fail;
3934 		}
3935 
3936 		if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
3937 		    cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3938 		    callback, NULL, &cmd->cmd_ext_arqcookie,
3939 		    &cookiec)
3940 		    != DDI_SUCCESS) {
3941 			goto fail;
3942 		}
3943 		cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
3944 	}
3945 	return (0);
3946 fail:
3947 	mptsas_pkt_destroy_extern(mpt, cmd);
3948 	return (1);
3949 }
3950 
3951 /*
3952  * deallocate external pkt space and deallocate the pkt
3953  */
3954 static void
3955 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
3956 {
3957 	NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
3958 
3959 	if (cmd->cmd_flags & CFLAG_FREE) {
3960 		mptsas_log(mpt, CE_PANIC,
3961 		    "mptsas_pkt_destroy_extern: freeing free packet");
3962 		_NOTE(NOT_REACHED)
3963 		/* NOTREACHED */
3964 	}
3965 	if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
3966 		kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
3967 	}
3968 	if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
3969 		kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
3970 		if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
3971 			(void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
3972 		}
3973 		if (cmd->cmd_ext_arqhandle) {
3974 			ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
3975 			cmd->cmd_ext_arqhandle = NULL;
3976 		}
3977 		if (cmd->cmd_ext_arq_buf)
3978 			scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
3979 	}
3980 	if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
3981 		kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
3982 	}
3983 	cmd->cmd_flags = CFLAG_FREE;
3984 	kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3985 }
3986 
3987 /*
3988  * tran_sync_pkt(9E) - explicit DMA synchronization
3989  */
3990 /*ARGSUSED*/
3991 static void
3992 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3993 {
3994 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
3995 
3996 	NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
3997 	    ap->a_target, (void *)pkt));
3998 
3999 	if (cmd->cmd_dmahandle) {
4000 		(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4001 		    (cmd->cmd_flags & CFLAG_DMASEND) ?
4002 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4003 	}
4004 }
4005 
4006 /*
4007  * tran_dmafree(9E) - deallocate DMA resources allocated for command
4008  */
4009 /*ARGSUSED*/
4010 static void
4011 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4012 {
4013 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
4014 	mptsas_t	*mpt = ADDR2MPT(ap);
4015 
4016 	NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4017 	    ap->a_target, (void *)pkt));
4018 
4019 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
4020 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4021 		cmd->cmd_flags &= ~CFLAG_DMAVALID;
4022 	}
4023 
4024 	if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4025 		(void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4026 		cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4027 	}
4028 
4029 	mptsas_free_extra_sgl_frame(mpt, cmd);
4030 }
4031 
4032 static void
4033 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4034 {
4035 	if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4036 	    (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4037 		(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4038 		    DDI_DMA_SYNC_FORCPU);
4039 	}
4040 	(*pkt->pkt_comp)(pkt);
4041 }
4042 
4043 static void
4044 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4045 	pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4046 {
4047 	uint_t			cookiec;
4048 	mptti_t			*dmap;
4049 	uint32_t		flags;
4050 	pMpi2SGESimple64_t	sge;
4051 	pMpi2SGEChain64_t	sgechain;
4052 	ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4053 
4054 	/*
4055 	 * Save the number of entries in the DMA
4056 	 * Scatter/Gather list
4057 	 */
4058 	cookiec = cmd->cmd_cookiec;
4059 
4060 	NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec));
4061 
4062 	/*
4063 	 * Set read/write bit in control.
4064 	 */
4065 	if (cmd->cmd_flags & CFLAG_DMASEND) {
4066 		*control |= MPI2_SCSIIO_CONTROL_WRITE;
4067 	} else {
4068 		*control |= MPI2_SCSIIO_CONTROL_READ;
4069 	}
4070 
4071 	ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4072 
4073 	/*
4074 	 * We have 2 cases here.  First where we can fit all the
4075 	 * SG elements into the main frame, and the case
4076 	 * where we can't.
4077 	 * If we have more cookies than we can attach to a frame
4078 	 * we will need to use a chain element to point
4079 	 * a location of memory where the rest of the S/G
4080 	 * elements reside.
4081 	 */
4082 	if (cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4083 		dmap = cmd->cmd_sg;
4084 		sge = (pMpi2SGESimple64_t)(&frame->SGL);
4085 		while (cookiec--) {
4086 			ddi_put32(acc_hdl,
4087 			    &sge->Address.Low, dmap->addr.address64.Low);
4088 			ddi_put32(acc_hdl,
4089 			    &sge->Address.High, dmap->addr.address64.High);
4090 			ddi_put32(acc_hdl, &sge->FlagsLength,
4091 			    dmap->count);
4092 			flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4093 			flags |= ((uint32_t)
4094 			    (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4095 			    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4096 			    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4097 			    MPI2_SGE_FLAGS_SHIFT);
4098 
4099 			/*
4100 			 * If this is the last cookie, we set the flags
4101 			 * to indicate so
4102 			 */
4103 			if (cookiec == 0) {
4104 				flags |=
4105 				    ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4106 				    | MPI2_SGE_FLAGS_END_OF_BUFFER
4107 				    | MPI2_SGE_FLAGS_END_OF_LIST) <<
4108 				    MPI2_SGE_FLAGS_SHIFT);
4109 			}
4110 			if (cmd->cmd_flags & CFLAG_DMASEND) {
4111 				flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4112 				    MPI2_SGE_FLAGS_SHIFT);
4113 			} else {
4114 				flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4115 				    MPI2_SGE_FLAGS_SHIFT);
4116 			}
4117 			ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4118 			dmap++;
4119 			sge++;
4120 		}
4121 	} else {
4122 		/*
4123 		 * Hereby we start to deal with multiple frames.
4124 		 * The process is as follows:
4125 		 * 1. Determine how many frames are needed for SGL element
4126 		 *    storage; Note that all frames are stored in contiguous
4127 		 *    memory space and in 64-bit DMA mode each element is
4128 		 *    3 double-words (12 bytes) long.
4129 		 * 2. Fill up the main frame. We need to do this separately
4130 		 *    since it contains the SCSI IO request header and needs
4131 		 *    dedicated processing. Note that the last 4 double-words
4132 		 *    of the SCSI IO header is for SGL element storage
4133 		 *    (MPI2_SGE_IO_UNION).
4134 		 * 3. Fill the chain element in the main frame, so the DMA
4135 		 *    engine can use the following frames.
4136 		 * 4. Enter a loop to fill the remaining frames. Note that the
4137 		 *    last frame contains no chain element.  The remaining
4138 		 *    frames go into the mpt SGL buffer allocated on the fly,
4139 		 *    not immediately following the main message frame, as in
4140 		 *    Gen1.
4141 		 * Some restrictions:
4142 		 * 1. For 64-bit DMA, the simple element and chain element
4143 		 *    are both of 3 double-words (12 bytes) in size, even
4144 		 *    though all frames are stored in the first 4G of mem
4145 		 *    range and the higher 32-bits of the address are always 0.
4146 		 * 2. On some controllers (like the 1064/1068), a frame can
4147 		 *    hold SGL elements with the last 1 or 2 double-words
4148 		 *    (4 or 8 bytes) un-used. On these controllers, we should
4149 		 *    recognize that there's not enough room for another SGL
4150 		 *    element and move the sge pointer to the next frame.
4151 		 */
4152 		int		i, j, k, l, frames, sgemax;
4153 		int		temp;
4154 		uint8_t		chainflags;
4155 		uint16_t	chainlength;
4156 		mptsas_cache_frames_t *p;
4157 
4158 		/*
4159 		 * Sgemax is the number of SGE's that will fit
4160 		 * each extra frame and frames is total
4161 		 * number of frames we'll need.  1 sge entry per
4162 		 * frame is reseverd for the chain element thus the -1 below.
4163 		 */
4164 		sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4165 		    - 1);
4166 		temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4167 
4168 		/*
4169 		 * A little check to see if we need to round up the number
4170 		 * of frames we need
4171 		 */
4172 		if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4173 		    sgemax) > 1) {
4174 			frames = (temp + 1);
4175 		} else {
4176 			frames = temp;
4177 		}
4178 		dmap = cmd->cmd_sg;
4179 		sge = (pMpi2SGESimple64_t)(&frame->SGL);
4180 
4181 		/*
4182 		 * First fill in the main frame
4183 		 */
4184 		for (j = 1; j < MPTSAS_MAX_FRAME_SGES64(mpt); j++) {
4185 			ddi_put32(acc_hdl, &sge->Address.Low,
4186 			    dmap->addr.address64.Low);
4187 			ddi_put32(acc_hdl, &sge->Address.High,
4188 			    dmap->addr.address64.High);
4189 			ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4190 			flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4191 			flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4192 			    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4193 			    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4194 			    MPI2_SGE_FLAGS_SHIFT);
4195 
4196 			/*
4197 			 * If this is the last SGE of this frame
4198 			 * we set the end of list flag
4199 			 */
4200 			if (j == (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) {
4201 				flags |= ((uint32_t)
4202 				    (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4203 				    MPI2_SGE_FLAGS_SHIFT);
4204 			}
4205 			if (cmd->cmd_flags & CFLAG_DMASEND) {
4206 				flags |=
4207 				    (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4208 				    MPI2_SGE_FLAGS_SHIFT);
4209 			} else {
4210 				flags |=
4211 				    (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4212 				    MPI2_SGE_FLAGS_SHIFT);
4213 			}
4214 			ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4215 			dmap++;
4216 			sge++;
4217 		}
4218 
4219 		/*
4220 		 * Fill in the chain element in the main frame.
4221 		 * About calculation on ChainOffset:
4222 		 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4223 		 *    in the end reserved for SGL element storage
4224 		 *    (MPI2_SGE_IO_UNION); we should count it in our
4225 		 *    calculation.  See its definition in the header file.
4226 		 * 2. Constant j is the counter of the current SGL element
4227 		 *    that will be processed, and (j - 1) is the number of
4228 		 *    SGL elements that have been processed (stored in the
4229 		 *    main frame).
4230 		 * 3. ChainOffset value should be in units of double-words (4
4231 		 *    bytes) so the last value should be divided by 4.
4232 		 */
4233 		ddi_put8(acc_hdl, &frame->ChainOffset,
4234 		    (sizeof (MPI2_SCSI_IO_REQUEST) -
4235 		    sizeof (MPI2_SGE_IO_UNION) +
4236 		    (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4237 		sgechain = (pMpi2SGEChain64_t)sge;
4238 		chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4239 		    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4240 		    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4241 		ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4242 
4243 		/*
4244 		 * The size of the next frame is the accurate size of space
4245 		 * (in bytes) used to store the SGL elements. j is the counter
4246 		 * of SGL elements. (j - 1) is the number of SGL elements that
4247 		 * have been processed (stored in frames).
4248 		 */
4249 		if (frames >= 2) {
4250 			chainlength = mpt->m_req_frame_size /
4251 			    sizeof (MPI2_SGE_SIMPLE64) *
4252 			    sizeof (MPI2_SGE_SIMPLE64);
4253 		} else {
4254 			chainlength = ((cookiec - (j - 1)) *
4255 			    sizeof (MPI2_SGE_SIMPLE64));
4256 		}
4257 
4258 		p = cmd->cmd_extra_frames;
4259 
4260 		ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4261 		ddi_put32(acc_hdl, &sgechain->Address.Low,
4262 		    p->m_phys_addr);
4263 		/* SGL is allocated in the first 4G mem range */
4264 		ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4265 
4266 		/*
4267 		 * If there are more than 2 frames left we have to
4268 		 * fill in the next chain offset to the location of
4269 		 * the chain element in the next frame.
4270 		 * sgemax is the number of simple elements in an extra
4271 		 * frame. Note that the value NextChainOffset should be
4272 		 * in double-words (4 bytes).
4273 		 */
4274 		if (frames >= 2) {
4275 			ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4276 			    (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4277 		} else {
4278 			ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4279 		}
4280 
4281 		/*
4282 		 * Jump to next frame;
4283 		 * Starting here, chain buffers go into the per command SGL.
4284 		 * This buffer is allocated when chain buffers are needed.
4285 		 */
4286 		sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4287 		i = cookiec;
4288 
4289 		/*
4290 		 * Start filling in frames with SGE's.  If we
4291 		 * reach the end of frame and still have SGE's
4292 		 * to fill we need to add a chain element and
4293 		 * use another frame.  j will be our counter
4294 		 * for what cookie we are at and i will be
4295 		 * the total cookiec. k is the current frame
4296 		 */
4297 		for (k = 1; k <= frames; k++) {
4298 			for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4299 
4300 				/*
4301 				 * If we have reached the end of frame
4302 				 * and we have more SGE's to fill in
4303 				 * we have to fill the final entry
4304 				 * with a chain element and then
4305 				 * continue to the next frame
4306 				 */
4307 				if ((l == (sgemax + 1)) && (k != frames)) {
4308 					sgechain = (pMpi2SGEChain64_t)sge;
4309 					j--;
4310 					chainflags = (
4311 					    MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4312 					    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4313 					    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4314 					ddi_put8(p->m_acc_hdl,
4315 					    &sgechain->Flags, chainflags);
4316 					/*
4317 					 * k is the frame counter and (k + 1)
4318 					 * is the number of the next frame.
4319 					 * Note that frames are in contiguous
4320 					 * memory space.
4321 					 */
4322 					ddi_put32(p->m_acc_hdl,
4323 					    &sgechain->Address.Low,
4324 					    (p->m_phys_addr +
4325 					    (mpt->m_req_frame_size * k)));
4326 					ddi_put32(p->m_acc_hdl,
4327 					    &sgechain->Address.High, 0);
4328 
4329 					/*
4330 					 * If there are more than 2 frames left
4331 					 * we have to next chain offset to
4332 					 * the location of the chain element
4333 					 * in the next frame and fill in the
4334 					 * length of the next chain
4335 					 */
4336 					if ((frames - k) >= 2) {
4337 						ddi_put8(p->m_acc_hdl,
4338 						    &sgechain->NextChainOffset,
4339 						    (sgemax *
4340 						    sizeof (MPI2_SGE_SIMPLE64))
4341 						    >> 2);
4342 						ddi_put16(p->m_acc_hdl,
4343 						    &sgechain->Length,
4344 						    mpt->m_req_frame_size /
4345 						    sizeof (MPI2_SGE_SIMPLE64) *
4346 						    sizeof (MPI2_SGE_SIMPLE64));
4347 					} else {
4348 						/*
4349 						 * This is the last frame. Set
4350 						 * the NextChainOffset to 0 and
4351 						 * Length is the total size of
4352 						 * all remaining simple elements
4353 						 */
4354 						ddi_put8(p->m_acc_hdl,
4355 						    &sgechain->NextChainOffset,
4356 						    0);
4357 						ddi_put16(p->m_acc_hdl,
4358 						    &sgechain->Length,
4359 						    (cookiec - j) *
4360 						    sizeof (MPI2_SGE_SIMPLE64));
4361 					}
4362 
4363 					/* Jump to the next frame */
4364 					sge = (pMpi2SGESimple64_t)
4365 					    ((char *)p->m_frames_addr +
4366 					    (int)mpt->m_req_frame_size * k);
4367 
4368 					continue;
4369 				}
4370 
4371 				ddi_put32(p->m_acc_hdl,
4372 				    &sge->Address.Low,
4373 				    dmap->addr.address64.Low);
4374 				ddi_put32(p->m_acc_hdl,
4375 				    &sge->Address.High,
4376 				    dmap->addr.address64.High);
4377 				ddi_put32(p->m_acc_hdl,
4378 				    &sge->FlagsLength, dmap->count);
4379 				flags = ddi_get32(p->m_acc_hdl,
4380 				    &sge->FlagsLength);
4381 				flags |= ((uint32_t)(
4382 				    MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4383 				    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4384 				    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4385 				    MPI2_SGE_FLAGS_SHIFT);
4386 
4387 				/*
4388 				 * If we are at the end of the frame and
4389 				 * there is another frame to fill in
4390 				 * we set the last simple element as last
4391 				 * element
4392 				 */
4393 				if ((l == sgemax) && (k != frames)) {
4394 					flags |= ((uint32_t)
4395 					    (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4396 					    MPI2_SGE_FLAGS_SHIFT);
4397 				}
4398 
4399 				/*
4400 				 * If this is the final cookie we
4401 				 * indicate it by setting the flags
4402 				 */
4403 				if (j == i) {
4404 					flags |= ((uint32_t)
4405 					    (MPI2_SGE_FLAGS_LAST_ELEMENT |
4406 					    MPI2_SGE_FLAGS_END_OF_BUFFER |
4407 					    MPI2_SGE_FLAGS_END_OF_LIST) <<
4408 					    MPI2_SGE_FLAGS_SHIFT);
4409 				}
4410 				if (cmd->cmd_flags & CFLAG_DMASEND) {
4411 					flags |=
4412 					    (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4413 					    MPI2_SGE_FLAGS_SHIFT);
4414 				} else {
4415 					flags |=
4416 					    (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4417 					    MPI2_SGE_FLAGS_SHIFT);
4418 				}
4419 				ddi_put32(p->m_acc_hdl,
4420 				    &sge->FlagsLength, flags);
4421 				dmap++;
4422 				sge++;
4423 			}
4424 		}
4425 
4426 		/*
4427 		 * Sync DMA with the chain buffers that were just created
4428 		 */
4429 		(void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4430 	}
4431 }
4432 
4433 /*
4434  * Interrupt handling
4435  * Utility routine.  Poll for status of a command sent to HBA
4436  * without interrupts (a FLAG_NOINTR command).
4437  */
4438 int
4439 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4440 {
4441 	int	rval = TRUE;
4442 
4443 	NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4444 
4445 	if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4446 		mptsas_restart_hba(mpt);
4447 	}
4448 
4449 	/*
4450 	 * Wait, using drv_usecwait(), long enough for the command to
4451 	 * reasonably return from the target if the target isn't
4452 	 * "dead".  A polled command may well be sent from scsi_poll, and
4453 	 * there are retries built in to scsi_poll if the transport
4454 	 * accepted the packet (TRAN_ACCEPT).  scsi_poll waits 1 second
4455 	 * and retries the transport up to scsi_poll_busycnt times
4456 	 * (currently 60) if
4457 	 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4458 	 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4459 	 *
4460 	 * limit the waiting to avoid a hang in the event that the
4461 	 * cmd never gets started but we are still receiving interrupts
4462 	 */
4463 	while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4464 		if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4465 			NDBG5(("mptsas_poll: command incomplete"));
4466 			rval = FALSE;
4467 			break;
4468 		}
4469 	}
4470 
4471 	if (rval == FALSE) {
4472 
4473 		/*
4474 		 * this isn't supposed to happen, the hba must be wedged
4475 		 * Mark this cmd as a timeout.
4476 		 */
4477 		mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4478 		    (STAT_TIMEOUT|STAT_ABORTED));
4479 
4480 		if (poll_cmd->cmd_queued == FALSE) {
4481 
4482 			NDBG5(("mptsas_poll: not on waitq"));
4483 
4484 			poll_cmd->cmd_pkt->pkt_state |=
4485 			    (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4486 		} else {
4487 
4488 			/* find and remove it from the waitq */
4489 			NDBG5(("mptsas_poll: delete from waitq"));
4490 			mptsas_waitq_delete(mpt, poll_cmd);
4491 		}
4492 
4493 	}
4494 	mptsas_fma_check(mpt, poll_cmd);
4495 	NDBG5(("mptsas_poll: done"));
4496 	return (rval);
4497 }
4498 
4499 /*
4500  * Used for polling cmds and TM function
4501  */
4502 static int
4503 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4504 {
4505 	int				cnt;
4506 	uint32_t			reply_index;
4507 	pMpi2ReplyDescriptorsUnion_t	reply_desc_union;
4508 
4509 	NDBG5(("mptsas_wait_intr"));
4510 
4511 	/*
4512 	 * Keep polling for at least (polltime * 1000) seconds
4513 	 */
4514 	reply_index = mpt->m_post_index;
4515 	for (cnt = 0; cnt < polltime; cnt++) {
4516 		reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4517 		    MPTSAS_GET_NEXT_REPLY(mpt, reply_index);
4518 
4519 		if (ddi_get32(mpt->m_acc_post_queue_hdl,
4520 		    &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4521 		    ddi_get32(mpt->m_acc_post_queue_hdl,
4522 		    &reply_desc_union->Words.High) == 0xFFFFFFFF) {
4523 			drv_usecwait(1000);
4524 			continue;
4525 		}
4526 		mpt->m_polled_intr = 1;
4527 		/*
4528 		 * The reply is valid, process it according to its
4529 		 * type.  Also, set a flag for updated the reply index
4530 		 * after they've all been processed.
4531 		 */
4532 		mptsas_process_intr(mpt, reply_desc_union);
4533 
4534 		if (++reply_index == mpt->m_post_queue_depth) {
4535 			reply_index = 0;
4536 		}
4537 		/*
4538 		 * Update the global reply index
4539 		 */
4540 		mpt->m_post_index = reply_index;
4541 		ddi_put32(mpt->m_datap,
4542 		    &mpt->m_reg->ReplyPostHostIndex, reply_index);
4543 
4544 		return (TRUE);
4545 
4546 	}
4547 	return (FALSE);
4548 }
4549 
4550 static void
4551 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4552     pMpi2ReplyDescriptorsUnion_t reply_desc)
4553 {
4554 	pMpi2SCSIIOSuccessReplyDescriptor_t	scsi_io_success;
4555 	uint16_t				SMID;
4556 	mptsas_slots_t				*slots = mpt->m_active;
4557 	mptsas_cmd_t				*cmd = NULL;
4558 	struct scsi_pkt				*pkt;
4559 
4560 	ASSERT(mutex_owned(&mpt->m_mutex));
4561 
4562 	scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4563 	SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
4564 
4565 	/*
4566 	 * This is a success reply so just complete the IO.  First, do a sanity
4567 	 * check on the SMID.  The final slot is used for TM requests, which
4568 	 * would not come into this reply handler.
4569 	 */
4570 	if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4571 		mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4572 		    SMID);
4573 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4574 		return;
4575 	}
4576 
4577 	cmd = slots->m_slot[SMID];
4578 
4579 	/*
4580 	 * print warning and return if the slot is empty
4581 	 */
4582 	if (cmd == NULL) {
4583 		mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4584 		    "in slot %d", SMID);
4585 		return;
4586 	}
4587 
4588 	pkt = CMD2PKT(cmd);
4589 	pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4590 	    STATE_GOT_STATUS);
4591 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
4592 		pkt->pkt_state |= STATE_XFERRED_DATA;
4593 	}
4594 	pkt->pkt_resid = 0;
4595 
4596 	if (cmd->cmd_flags & CFLAG_PASSTHRU) {
4597 		cmd->cmd_flags |= CFLAG_FINISHED;
4598 		cv_broadcast(&mpt->m_passthru_cv);
4599 		return;
4600 	} else {
4601 		mptsas_remove_cmd(mpt, cmd);
4602 	}
4603 
4604 	if (cmd->cmd_flags & CFLAG_RETRY) {
4605 		/*
4606 		 * The target returned QFULL or busy, do not add tihs
4607 		 * pkt to the doneq since the hba will retry
4608 		 * this cmd.
4609 		 *
4610 		 * The pkt has already been resubmitted in
4611 		 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4612 		 * Remove this cmd_flag here.
4613 		 */
4614 		cmd->cmd_flags &= ~CFLAG_RETRY;
4615 	} else {
4616 		mptsas_doneq_add(mpt, cmd);
4617 	}
4618 }
4619 
4620 static void
4621 mptsas_handle_address_reply(mptsas_t *mpt,
4622     pMpi2ReplyDescriptorsUnion_t reply_desc)
4623 {
4624 	pMpi2AddressReplyDescriptor_t	address_reply;
4625 	pMPI2DefaultReply_t		reply;
4626 	uint32_t			reply_addr, reply_index;
4627 	uint16_t			SMID;
4628 	mptsas_slots_t			*slots = mpt->m_active;
4629 	mptsas_cmd_t			*cmd = NULL;
4630 	uint8_t				function;
4631 	m_replyh_arg_t			*args;
4632 	int				reply_frame_no;
4633 
4634 	ASSERT(mutex_owned(&mpt->m_mutex));
4635 
4636 	address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
4637 	reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
4638 	    &address_reply->ReplyFrameAddress);
4639 	SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
4640 
4641 	/*
4642 	 * If reply frame is not in the proper range we should ignore this
4643 	 * message and exit the interrupt handler.
4644 	 */
4645 	if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4646 	    (reply_addr >= (mpt->m_reply_frame_dma_addr +
4647 	    (mpt->m_reply_frame_size * mpt->m_free_queue_depth))) ||
4648 	    ((reply_addr - mpt->m_reply_frame_dma_addr) %
4649 	    mpt->m_reply_frame_size != 0)) {
4650 		mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4651 		    "address 0x%x\n", reply_addr);
4652 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4653 		return;
4654 	}
4655 
4656 	(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4657 	    DDI_DMA_SYNC_FORCPU);
4658 	reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4659 	    mpt->m_reply_frame_dma_addr));
4660 	function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
4661 
4662 	/*
4663 	 * don't get slot information and command for events since these values
4664 	 * don't exist
4665 	 */
4666 	if (function != MPI2_FUNCTION_EVENT_NOTIFICATION) {
4667 		/*
4668 		 * This could be a TM reply, which use the last allocated SMID,
4669 		 * so allow for that.
4670 		 */
4671 		if ((SMID == 0) || (SMID > (slots->m_n_slots + 1))) {
4672 			mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
4673 			    "%d\n", SMID);
4674 			ddi_fm_service_impact(mpt->m_dip,
4675 			    DDI_SERVICE_UNAFFECTED);
4676 			return;
4677 		}
4678 
4679 		cmd = slots->m_slot[SMID];
4680 
4681 		/*
4682 		 * print warning and return if the slot is empty
4683 		 */
4684 		if (cmd == NULL) {
4685 			mptsas_log(mpt, CE_WARN, "?NULL command for address "
4686 			    "reply in slot %d", SMID);
4687 			return;
4688 		}
4689 		if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
4690 		    (cmd->cmd_flags & CFLAG_CONFIG)) {
4691 			cmd->cmd_rfm = reply_addr;
4692 			cmd->cmd_flags |= CFLAG_FINISHED;
4693 			cv_broadcast(&mpt->m_passthru_cv);
4694 			cv_broadcast(&mpt->m_config_cv);
4695 			return;
4696 		} else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
4697 			mptsas_remove_cmd(mpt, cmd);
4698 		}
4699 		NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
4700 	}
4701 	/*
4702 	 * Depending on the function, we need to handle
4703 	 * the reply frame (and cmd) differently.
4704 	 */
4705 	switch (function) {
4706 	case MPI2_FUNCTION_SCSI_IO_REQUEST:
4707 		mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
4708 		break;
4709 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
4710 		mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
4711 		    cmd);
4712 		break;
4713 	case MPI2_FUNCTION_FW_DOWNLOAD:
4714 		cmd->cmd_flags |= CFLAG_FINISHED;
4715 		cv_signal(&mpt->m_fw_cv);
4716 		break;
4717 	case MPI2_FUNCTION_EVENT_NOTIFICATION:
4718 		reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
4719 		    mpt->m_reply_frame_size;
4720 		args = &mpt->m_replyh_args[reply_frame_no];
4721 		args->mpt = (void *)mpt;
4722 		args->rfm = reply_addr;
4723 
4724 		/*
4725 		 * Record the event if its type is enabled in
4726 		 * this mpt instance by ioctl.
4727 		 */
4728 		mptsas_record_event(args);
4729 
4730 		/*
4731 		 * Handle time critical events
4732 		 * NOT_RESPONDING/ADDED only now
4733 		 */
4734 		if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
4735 			/*
4736 			 * Would not return main process,
4737 			 * just let taskq resolve ack action
4738 			 * and ack would be sent in taskq thread
4739 			 */
4740 			NDBG20(("send mptsas_handle_event_sync success"));
4741 		}
4742 		if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
4743 		    (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
4744 			mptsas_log(mpt, CE_WARN, "No memory available"
4745 			"for dispatch taskq");
4746 			/*
4747 			 * Return the reply frame to the free queue.
4748 			 */
4749 			reply_index = mpt->m_free_index;
4750 			ddi_put32(mpt->m_acc_free_queue_hdl,
4751 			    &((uint32_t *)(void *)
4752 			    mpt->m_free_queue)[reply_index], reply_addr);
4753 			(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4754 			    DDI_DMA_SYNC_FORDEV);
4755 			if (++reply_index == mpt->m_free_queue_depth) {
4756 				reply_index = 0;
4757 			}
4758 			mpt->m_free_index = reply_index;
4759 
4760 			ddi_put32(mpt->m_datap,
4761 			    &mpt->m_reg->ReplyFreeHostIndex, reply_index);
4762 		}
4763 		return;
4764 	default:
4765 		mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
4766 		break;
4767 	}
4768 
4769 	/*
4770 	 * Return the reply frame to the free queue.
4771 	 */
4772 	reply_index = mpt->m_free_index;
4773 	ddi_put32(mpt->m_acc_free_queue_hdl,
4774 	    &((uint32_t *)(void *)mpt->m_free_queue)[reply_index],
4775 	    reply_addr);
4776 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4777 	    DDI_DMA_SYNC_FORDEV);
4778 	if (++reply_index == mpt->m_free_queue_depth) {
4779 		reply_index = 0;
4780 	}
4781 	mpt->m_free_index = reply_index;
4782 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, reply_index);
4783 
4784 	if (cmd->cmd_flags & CFLAG_FW_CMD)
4785 		return;
4786 
4787 	if (cmd->cmd_flags & CFLAG_RETRY) {
4788 		/*
4789 		 * The target returned QFULL or busy, do not add tihs
4790 		 * pkt to the doneq since the hba will retry
4791 		 * this cmd.
4792 		 *
4793 		 * The pkt has already been resubmitted in
4794 		 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4795 		 * Remove this cmd_flag here.
4796 		 */
4797 		cmd->cmd_flags &= ~CFLAG_RETRY;
4798 	} else {
4799 		struct scsi_pkt *pkt = CMD2PKT(cmd);
4800 
4801 		ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
4802 		cmd->cmd_linkp = NULL;
4803 		cmd->cmd_flags |= CFLAG_FINISHED;
4804 		cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
4805 
4806 		if (pkt && pkt->pkt_comp) {
4807 			cmd->cmd_flags |= CFLAG_COMPLETED;
4808 			mutex_exit(&mpt->m_mutex);
4809 			mptsas_pkt_comp(pkt, cmd);
4810 			mutex_enter(&mpt->m_mutex);
4811 		}
4812 	}
4813 }
4814 
4815 static void
4816 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
4817     mptsas_cmd_t *cmd)
4818 {
4819 	uint8_t			scsi_status, scsi_state;
4820 	uint16_t		ioc_status;
4821 	uint32_t		xferred, sensecount, loginfo = 0;
4822 	struct scsi_pkt		*pkt;
4823 	struct scsi_arq_status	*arqstat;
4824 	struct buf		*bp;
4825 	mptsas_target_t		*ptgt = cmd->cmd_tgt_addr;
4826 	uint8_t			*sensedata = NULL;
4827 
4828 	if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
4829 	    (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
4830 		bp = cmd->cmd_ext_arq_buf;
4831 	} else {
4832 		bp = cmd->cmd_arq_buf;
4833 	}
4834 
4835 	scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
4836 	ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
4837 	scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
4838 	xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
4839 	sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
4840 
4841 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
4842 		loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
4843 		    &reply->IOCLogInfo);
4844 		mptsas_log(mpt, CE_NOTE,
4845 		    "?Log info 0x%x received for target %d.\n"
4846 		    "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
4847 		    loginfo, Tgt(cmd), scsi_status, ioc_status,
4848 		    scsi_state);
4849 	}
4850 
4851 	NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
4852 	    scsi_status, ioc_status, scsi_state));
4853 
4854 	pkt = CMD2PKT(cmd);
4855 	*(pkt->pkt_scbp) = scsi_status;
4856 
4857 	if (loginfo == 0x31170000) {
4858 		/*
4859 		 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
4860 		 * 0x31170000 comes, that means the device missing delay
4861 		 * is in progressing, the command need retry later.
4862 		 */
4863 		*(pkt->pkt_scbp) = STATUS_BUSY;
4864 		return;
4865 	}
4866 
4867 	if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
4868 	    ((ioc_status & MPI2_IOCSTATUS_MASK) ==
4869 	    MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
4870 		pkt->pkt_reason = CMD_INCOMPLETE;
4871 		pkt->pkt_state |= STATE_GOT_BUS;
4872 		if (ptgt->m_reset_delay == 0) {
4873 			mptsas_set_throttle(mpt, ptgt,
4874 			    DRAIN_THROTTLE);
4875 		}
4876 		return;
4877 	}
4878 
4879 	switch (scsi_status) {
4880 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
4881 		pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
4882 		arqstat = (void*)(pkt->pkt_scbp);
4883 		arqstat->sts_rqpkt_status = *((struct scsi_status *)
4884 		    (pkt->pkt_scbp));
4885 		pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
4886 		    STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
4887 		if (cmd->cmd_flags & CFLAG_XARQ) {
4888 			pkt->pkt_state |= STATE_XARQ_DONE;
4889 		}
4890 		if (pkt->pkt_resid != cmd->cmd_dmacount) {
4891 			pkt->pkt_state |= STATE_XFERRED_DATA;
4892 		}
4893 		arqstat->sts_rqpkt_reason = pkt->pkt_reason;
4894 		arqstat->sts_rqpkt_state  = pkt->pkt_state;
4895 		arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
4896 		arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
4897 		sensedata = (uint8_t *)&arqstat->sts_sensedata;
4898 
4899 		bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
4900 		    ((cmd->cmd_rqslen >= sensecount) ? sensecount :
4901 		    cmd->cmd_rqslen));
4902 		arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
4903 		cmd->cmd_flags |= CFLAG_CMDARQ;
4904 		/*
4905 		 * Set proper status for pkt if autosense was valid
4906 		 */
4907 		if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
4908 			struct scsi_status zero_status = { 0 };
4909 			arqstat->sts_rqpkt_status = zero_status;
4910 		}
4911 
4912 		/*
4913 		 * ASC=0x47 is parity error
4914 		 * ASC=0x48 is initiator detected error received
4915 		 */
4916 		if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
4917 		    ((scsi_sense_asc(sensedata) == 0x47) ||
4918 		    (scsi_sense_asc(sensedata) == 0x48))) {
4919 			mptsas_log(mpt, CE_NOTE, "Aborted_command!");
4920 		}
4921 
4922 		/*
4923 		 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
4924 		 * ASC/ASCQ=0x25/0x00 means invalid lun
4925 		 */
4926 		if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
4927 		    (scsi_sense_asc(sensedata) == 0x3F) &&
4928 		    (scsi_sense_ascq(sensedata) == 0x0E)) ||
4929 		    ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
4930 		    (scsi_sense_asc(sensedata) == 0x25) &&
4931 		    (scsi_sense_ascq(sensedata) == 0x00))) {
4932 			mptsas_topo_change_list_t *topo_node = NULL;
4933 
4934 			topo_node = kmem_zalloc(
4935 			    sizeof (mptsas_topo_change_list_t),
4936 			    KM_NOSLEEP);
4937 			if (topo_node == NULL) {
4938 				mptsas_log(mpt, CE_NOTE, "No memory"
4939 				    "resource for handle SAS dynamic"
4940 				    "reconfigure.\n");
4941 				break;
4942 			}
4943 			topo_node->mpt = mpt;
4944 			topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
4945 			topo_node->un.phymask = ptgt->m_phymask;
4946 			topo_node->devhdl = ptgt->m_devhdl;
4947 			topo_node->object = (void *)ptgt;
4948 			topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
4949 
4950 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
4951 			    mptsas_handle_dr,
4952 			    (void *)topo_node,
4953 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
4954 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
4955 				    "for handle SAS dynamic reconfigure"
4956 				    "failed. \n");
4957 			}
4958 		}
4959 		break;
4960 	case MPI2_SCSI_STATUS_GOOD:
4961 		switch (ioc_status & MPI2_IOCSTATUS_MASK) {
4962 		case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4963 			pkt->pkt_reason = CMD_DEV_GONE;
4964 			pkt->pkt_state |= STATE_GOT_BUS;
4965 			if (ptgt->m_reset_delay == 0) {
4966 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
4967 			}
4968 			NDBG31(("lost disk for target%d, command:%x",
4969 			    Tgt(cmd), pkt->pkt_cdbp[0]));
4970 			break;
4971 		case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
4972 			NDBG31(("data overrun: xferred=%d", xferred));
4973 			NDBG31(("dmacount=%d", cmd->cmd_dmacount));
4974 			pkt->pkt_reason = CMD_DATA_OVR;
4975 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
4976 			    | STATE_SENT_CMD | STATE_GOT_STATUS
4977 			    | STATE_XFERRED_DATA);
4978 			pkt->pkt_resid = 0;
4979 			break;
4980 		case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4981 		case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
4982 			NDBG31(("data underrun: xferred=%d", xferred));
4983 			NDBG31(("dmacount=%d", cmd->cmd_dmacount));
4984 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
4985 			    | STATE_SENT_CMD | STATE_GOT_STATUS);
4986 			pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
4987 			if (pkt->pkt_resid != cmd->cmd_dmacount) {
4988 				pkt->pkt_state |= STATE_XFERRED_DATA;
4989 			}
4990 			break;
4991 		case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
4992 			mptsas_set_pkt_reason(mpt,
4993 			    cmd, CMD_RESET, STAT_BUS_RESET);
4994 			break;
4995 		case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
4996 		case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
4997 			mptsas_set_pkt_reason(mpt,
4998 			    cmd, CMD_RESET, STAT_DEV_RESET);
4999 			break;
5000 		case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5001 		case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5002 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5003 			mptsas_set_pkt_reason(mpt,
5004 			    cmd, CMD_TERMINATED, STAT_TERMINATED);
5005 			break;
5006 		case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5007 		case MPI2_IOCSTATUS_BUSY:
5008 			/*
5009 			 * set throttles to drain
5010 			 */
5011 			ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5012 			    &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
5013 			while (ptgt != NULL) {
5014 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5015 
5016 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5017 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
5018 			}
5019 
5020 			/*
5021 			 * retry command
5022 			 */
5023 			cmd->cmd_flags |= CFLAG_RETRY;
5024 			cmd->cmd_pkt_flags |= FLAG_HEAD;
5025 
5026 			(void) mptsas_accept_pkt(mpt, cmd);
5027 			break;
5028 		default:
5029 			mptsas_log(mpt, CE_WARN,
5030 			    "unknown ioc_status = %x\n", ioc_status);
5031 			mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5032 			    "count = %x, scsi_status = %x", scsi_state,
5033 			    xferred, scsi_status);
5034 			break;
5035 		}
5036 		break;
5037 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
5038 		mptsas_handle_qfull(mpt, cmd);
5039 		break;
5040 	case MPI2_SCSI_STATUS_BUSY:
5041 		NDBG31(("scsi_status busy received"));
5042 		break;
5043 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5044 		NDBG31(("scsi_status reservation conflict received"));
5045 		break;
5046 	default:
5047 		mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5048 		    scsi_status, ioc_status);
5049 		mptsas_log(mpt, CE_WARN,
5050 		    "mptsas_process_intr: invalid scsi status\n");
5051 		break;
5052 	}
5053 }
5054 
5055 static void
5056 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5057 	mptsas_cmd_t *cmd)
5058 {
5059 	uint8_t		task_type;
5060 	uint16_t	ioc_status;
5061 	uint32_t	log_info;
5062 	uint16_t	dev_handle;
5063 	struct scsi_pkt *pkt = CMD2PKT(cmd);
5064 
5065 	task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5066 	ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5067 	log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5068 	dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5069 
5070 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5071 		mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5072 		    "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5073 		    task_type, ioc_status, log_info, dev_handle);
5074 		pkt->pkt_reason = CMD_INCOMPLETE;
5075 		return;
5076 	}
5077 
5078 	switch (task_type) {
5079 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5080 	case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5081 	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5082 	case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5083 	case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5084 	case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5085 		break;
5086 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5087 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5088 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5089 		mptsas_flush_target(mpt, dev_handle, Lun(cmd), task_type);
5090 		break;
5091 	default:
5092 		mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5093 		    task_type);
5094 		mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5095 		break;
5096 	}
5097 }
5098 
5099 static void
5100 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5101 {
5102 	mptsas_t			*mpt = arg->mpt;
5103 	uint64_t			t = arg->t;
5104 	mptsas_cmd_t			*cmd;
5105 	struct scsi_pkt			*pkt;
5106 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
5107 
5108 	mutex_enter(&item->mutex);
5109 	while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5110 		if (!item->doneq) {
5111 			cv_wait(&item->cv, &item->mutex);
5112 		}
5113 		pkt = NULL;
5114 		if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5115 			cmd->cmd_flags |= CFLAG_COMPLETED;
5116 			pkt = CMD2PKT(cmd);
5117 		}
5118 		mutex_exit(&item->mutex);
5119 		if (pkt) {
5120 			mptsas_pkt_comp(pkt, cmd);
5121 		}
5122 		mutex_enter(&item->mutex);
5123 	}
5124 	mutex_exit(&item->mutex);
5125 	mutex_enter(&mpt->m_doneq_mutex);
5126 	mpt->m_doneq_thread_n--;
5127 	cv_broadcast(&mpt->m_doneq_thread_cv);
5128 	mutex_exit(&mpt->m_doneq_mutex);
5129 }
5130 
5131 
5132 /*
5133  * mpt interrupt handler.
5134  */
5135 static uint_t
5136 mptsas_intr(caddr_t arg1, caddr_t arg2)
5137 {
5138 	mptsas_t			*mpt = (void *)arg1;
5139 	uint32_t			reply_index;
5140 	pMpi2ReplyDescriptorsUnion_t	reply_desc_union;
5141 	uchar_t				did_reply = FALSE;
5142 
5143 	NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5144 
5145 	mutex_enter(&mpt->m_mutex);
5146 
5147 	/*
5148 	 * If interrupts are shared by two channels then
5149 	 * check whether this interrupt is genuinely for this
5150 	 * channel by making sure first the chip is in high
5151 	 * power state.
5152 	 */
5153 	if ((mpt->m_options & MPTSAS_OPT_PM) &&
5154 	    (mpt->m_power_level != PM_LEVEL_D0)) {
5155 		mutex_exit(&mpt->m_mutex);
5156 		return (DDI_INTR_UNCLAIMED);
5157 	}
5158 
5159 	/*
5160 	 * Save the current reply post host index value.
5161 	 */
5162 	reply_index = mpt->m_post_index;
5163 
5164 	/*
5165 	 * Read the istat register.
5166 	 */
5167 	if ((INTPENDING(mpt)) != 0) {
5168 		/*
5169 		 * read fifo until empty.
5170 		 */
5171 		(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5172 		    DDI_DMA_SYNC_FORCPU);
5173 #ifndef __lock_lint
5174 		_NOTE(CONSTCOND)
5175 #endif
5176 		while (TRUE) {
5177 			reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5178 			    MPTSAS_GET_NEXT_REPLY(mpt, reply_index);
5179 
5180 			if (ddi_get32(mpt->m_acc_post_queue_hdl,
5181 			    &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5182 			    ddi_get32(mpt->m_acc_post_queue_hdl,
5183 			    &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5184 				break;
5185 			}
5186 
5187 			/*
5188 			 * The reply is valid, process it according to its
5189 			 * type.  Also, set a flag for updated the reply index
5190 			 * after they've all been processed.
5191 			 */
5192 			did_reply = TRUE;
5193 
5194 			mptsas_process_intr(mpt, reply_desc_union);
5195 
5196 			if (++reply_index == mpt->m_post_queue_depth) {
5197 				reply_index = 0;
5198 			}
5199 			mpt->m_post_index = reply_index;
5200 		}
5201 
5202 		/*
5203 		 * Update the global reply index if at least one reply was
5204 		 * processed.
5205 		 */
5206 		if (did_reply) {
5207 			ddi_put32(mpt->m_datap,
5208 			    &mpt->m_reg->ReplyPostHostIndex, reply_index);
5209 		}
5210 	} else {
5211 		if (mpt->m_polled_intr) {
5212 			mpt->m_polled_intr = 0;
5213 			mutex_exit(&mpt->m_mutex);
5214 			return (DDI_INTR_CLAIMED);
5215 		}
5216 		mutex_exit(&mpt->m_mutex);
5217 		return (DDI_INTR_UNCLAIMED);
5218 	}
5219 	NDBG1(("mptsas_intr complete"));
5220 
5221 	/*
5222 	 * If no helper threads are created, process the doneq in ISR.
5223 	 * If helpers are created, use the doneq length as a metric to
5224 	 * measure the load on the interrupt CPU. If it is long enough,
5225 	 * which indicates the load is heavy, then we deliver the IO
5226 	 * completions to the helpers.
5227 	 * this measurement has some limitations although, it is simple
5228 	 * and straightforward and works well for most of the cases at
5229 	 * present.
5230 	 */
5231 
5232 	if (!mpt->m_doneq_thread_n ||
5233 	    (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5234 		mptsas_doneq_empty(mpt);
5235 	} else {
5236 		mptsas_deliver_doneq_thread(mpt);
5237 	}
5238 
5239 	/*
5240 	 * If there are queued cmd, start them now.
5241 	 */
5242 	if (mpt->m_waitq != NULL) {
5243 		mptsas_restart_waitq(mpt);
5244 	}
5245 
5246 	if (mpt->m_polled_intr) {
5247 		mpt->m_polled_intr = 0;
5248 	}
5249 
5250 	mutex_exit(&mpt->m_mutex);
5251 	return (DDI_INTR_CLAIMED);
5252 }
5253 
5254 static void
5255 mptsas_process_intr(mptsas_t *mpt,
5256     pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5257 {
5258 	uint8_t	reply_type;
5259 
5260 	ASSERT(mutex_owned(&mpt->m_mutex));
5261 
5262 	/*
5263 	 * The reply is valid, process it according to its
5264 	 * type.  Also, set a flag for updated the reply index
5265 	 * after they've all been processed.
5266 	 */
5267 	reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5268 	    &reply_desc_union->Default.ReplyFlags);
5269 	reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5270 	if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5271 		mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5272 	} else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5273 		mptsas_handle_address_reply(mpt, reply_desc_union);
5274 	} else {
5275 		mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5276 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5277 	}
5278 
5279 	/*
5280 	 * Clear the reply descriptor for re-use and increment
5281 	 * index.
5282 	 */
5283 	ddi_put64(mpt->m_acc_post_queue_hdl,
5284 	    &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5285 	    0xFFFFFFFFFFFFFFFF);
5286 	(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5287 	    DDI_DMA_SYNC_FORDEV);
5288 }
5289 
5290 /*
5291  * handle qfull condition
5292  */
5293 static void
5294 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5295 {
5296 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
5297 
5298 	if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5299 	    (ptgt->m_qfull_retries == 0)) {
5300 		/*
5301 		 * We have exhausted the retries on QFULL, or,
5302 		 * the target driver has indicated that it
5303 		 * wants to handle QFULL itself by setting
5304 		 * qfull-retries capability to 0. In either case
5305 		 * we want the target driver's QFULL handling
5306 		 * to kick in. We do this by having pkt_reason
5307 		 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5308 		 */
5309 		mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5310 	} else {
5311 		if (ptgt->m_reset_delay == 0) {
5312 			ptgt->m_t_throttle =
5313 			    max((ptgt->m_t_ncmds - 2), 0);
5314 		}
5315 
5316 		cmd->cmd_pkt_flags |= FLAG_HEAD;
5317 		cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5318 		cmd->cmd_flags |= CFLAG_RETRY;
5319 
5320 		(void) mptsas_accept_pkt(mpt, cmd);
5321 
5322 		/*
5323 		 * when target gives queue full status with no commands
5324 		 * outstanding (m_t_ncmds == 0), throttle is set to 0
5325 		 * (HOLD_THROTTLE), and the queue full handling start
5326 		 * (see psarc/1994/313); if there are commands outstanding,
5327 		 * throttle is set to (m_t_ncmds - 2)
5328 		 */
5329 		if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5330 			/*
5331 			 * By setting throttle to QFULL_THROTTLE, we
5332 			 * avoid submitting new commands and in
5333 			 * mptsas_restart_cmd find out slots which need
5334 			 * their throttles to be cleared.
5335 			 */
5336 			mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5337 			if (mpt->m_restart_cmd_timeid == 0) {
5338 				mpt->m_restart_cmd_timeid =
5339 				    timeout(mptsas_restart_cmd, mpt,
5340 				    ptgt->m_qfull_retry_interval);
5341 			}
5342 		}
5343 	}
5344 }
5345 
5346 uint8_t
5347 mptsas_phymask_to_physport(mptsas_t *mpt, uint8_t phymask)
5348 {
5349 	int i;
5350 
5351 	/*
5352 	 * RAID doesn't have valid phymask and physport so we use phymask == 0
5353 	 * and physport == 0xff to indicate that it's RAID.
5354 	 */
5355 	if (phymask == 0) {
5356 		return (0xff);
5357 	}
5358 	for (i = 0; i < 8; i++) {
5359 		if (phymask & (1 << i)) {
5360 			break;
5361 		}
5362 	}
5363 	return (mpt->m_phy_info[i].port_num);
5364 }
5365 uint8_t
5366 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5367 {
5368 	uint8_t		phy_mask = 0;
5369 	uint8_t		i = 0;
5370 
5371 	NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5372 
5373 	ASSERT(mutex_owned(&mpt->m_mutex));
5374 
5375 	/*
5376 	 * If physport is 0xFF, this is a RAID volume.  Use phymask of 0.
5377 	 */
5378 	if (physport == 0xFF) {
5379 		return (0);
5380 	}
5381 
5382 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5383 		if (mpt->m_phy_info[i].attached_devhdl &&
5384 		    (mpt->m_phy_info[i].phy_mask != 0) &&
5385 		    (mpt->m_phy_info[i].port_num == physport)) {
5386 			phy_mask = mpt->m_phy_info[i].phy_mask;
5387 			break;
5388 		}
5389 	}
5390 	NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5391 	    mpt->m_instance, physport, phy_mask));
5392 	return (phy_mask);
5393 }
5394 
5395 /*
5396  * mpt free device handle after device gone, by use of passthrough
5397  */
5398 static int
5399 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5400 {
5401 	Mpi2SasIoUnitControlRequest_t	req;
5402 	Mpi2SasIoUnitControlReply_t	rep;
5403 	int				ret;
5404 
5405 	ASSERT(mutex_owned(&mpt->m_mutex));
5406 
5407 	/*
5408 	 * Need to compose a SAS IO Unit Control request message
5409 	 * and call mptsas_do_passthru() function
5410 	 */
5411 	bzero(&req, sizeof (req));
5412 	bzero(&rep, sizeof (rep));
5413 
5414 	req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5415 	req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5416 	req.DevHandle = LE_16(devhdl);
5417 
5418 	ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5419 	    sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5420 	if (ret != 0) {
5421 		cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5422 		    "Control error %d", ret);
5423 		return (DDI_FAILURE);
5424 	}
5425 
5426 	/* do passthrough success, check the ioc status */
5427 	if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5428 		cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5429 		    "Control IOCStatus %d", LE_16(rep.IOCStatus));
5430 		return (DDI_FAILURE);
5431 	}
5432 
5433 	return (DDI_SUCCESS);
5434 }
5435 
5436 static void
5437 mptsas_update_phymask(mptsas_t *mpt)
5438 {
5439 	uint8_t	mask = 0, phy_mask;
5440 	char	*phy_mask_name;
5441 	uint8_t current_port;
5442 	int	i, j;
5443 
5444 	NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5445 
5446 	ASSERT(mutex_owned(&mpt->m_mutex));
5447 
5448 	(void) mptsas_get_sas_io_unit_page(mpt);
5449 
5450 	phy_mask_name = kmem_zalloc(8, KM_SLEEP);
5451 
5452 	for (i = 0; i < mpt->m_num_phys; i++) {
5453 		phy_mask = 0x00;
5454 
5455 		if (mpt->m_phy_info[i].attached_devhdl == 0)
5456 			continue;
5457 
5458 		bzero(phy_mask_name, sizeof (phy_mask_name));
5459 
5460 		current_port = mpt->m_phy_info[i].port_num;
5461 
5462 		if ((mask & (1 << i)) != 0)
5463 			continue;
5464 
5465 		for (j = 0; j < mpt->m_num_phys; j++) {
5466 			if (mpt->m_phy_info[j].attached_devhdl &&
5467 			    (mpt->m_phy_info[j].port_num == current_port)) {
5468 				phy_mask |= (1 << j);
5469 			}
5470 		}
5471 		mask = mask | phy_mask;
5472 
5473 		for (j = 0; j < mpt->m_num_phys; j++) {
5474 			if ((phy_mask >> j) & 0x01) {
5475 				mpt->m_phy_info[j].phy_mask = phy_mask;
5476 			}
5477 		}
5478 
5479 		(void) sprintf(phy_mask_name, "%x", phy_mask);
5480 
5481 		mutex_exit(&mpt->m_mutex);
5482 		/*
5483 		 * register a iport, if the port has already been existed
5484 		 * SCSA will do nothing and just return.
5485 		 */
5486 		(void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
5487 		mutex_enter(&mpt->m_mutex);
5488 	}
5489 	kmem_free(phy_mask_name, 8);
5490 	NDBG20(("mptsas%d update phymask return", mpt->m_instance));
5491 }
5492 
5493 /*
5494  * mptsas_handle_dr is a task handler for DR, the DR action includes:
5495  * 1. Directly attched Device Added/Removed.
5496  * 2. Expander Device Added/Removed.
5497  * 3. Indirectly Attached Device Added/Expander.
5498  * 4. LUNs of a existing device status change.
5499  * 5. RAID volume created/deleted.
5500  * 6. Member of RAID volume is released because of RAID deletion.
5501  * 7. Physical disks are removed because of RAID creation.
5502  */
5503 static void
5504 mptsas_handle_dr(void *args) {
5505 	mptsas_topo_change_list_t	*topo_node = NULL;
5506 	mptsas_topo_change_list_t	*save_node = NULL;
5507 	mptsas_t			*mpt;
5508 	dev_info_t			*parent = NULL;
5509 	uint8_t				phymask = 0;
5510 	char				*phy_mask_name;
5511 	uint8_t				flags = 0, physport = 0xff;
5512 	uint8_t				port_update = 0;
5513 	uint_t				event;
5514 
5515 	topo_node = (mptsas_topo_change_list_t *)args;
5516 
5517 	mpt = topo_node->mpt;
5518 	event = topo_node->event;
5519 	flags = topo_node->flags;
5520 
5521 	phy_mask_name = kmem_zalloc(8, KM_SLEEP);
5522 
5523 	NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
5524 
5525 	switch (event) {
5526 	case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5527 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5528 		    (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
5529 		    (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
5530 			/*
5531 			 * Direct attached or expander attached device added
5532 			 * into system or a Phys Disk that is being unhidden.
5533 			 */
5534 			port_update = 1;
5535 		}
5536 		break;
5537 	case MPTSAS_DR_EVENT_RECONFIG_SMP:
5538 		/*
5539 		 * New expander added into system, it must be the head
5540 		 * of topo_change_list_t
5541 		 */
5542 		port_update = 1;
5543 		break;
5544 	default:
5545 		port_update = 0;
5546 		break;
5547 	}
5548 	/*
5549 	 * All cases port_update == 1 may cause initiator port form change
5550 	 */
5551 	mutex_enter(&mpt->m_mutex);
5552 	if (mpt->m_port_chng && port_update) {
5553 		/*
5554 		 * mpt->m_port_chng flag indicates some PHYs of initiator
5555 		 * port have changed to online. So when expander added or
5556 		 * directly attached device online event come, we force to
5557 		 * update port information by issueing SAS IO Unit Page and
5558 		 * update PHYMASKs.
5559 		 */
5560 		(void) mptsas_update_phymask(mpt);
5561 		mpt->m_port_chng = 0;
5562 
5563 	}
5564 	mutex_exit(&mpt->m_mutex);
5565 	while (topo_node) {
5566 		phymask = 0;
5567 		if (parent == NULL) {
5568 			physport = topo_node->un.physport;
5569 			event = topo_node->event;
5570 			flags = topo_node->flags;
5571 			if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
5572 			    MPTSAS_DR_EVENT_OFFLINE_SMP)) {
5573 				/*
5574 				 * For all offline events, phymask is known
5575 				 */
5576 				phymask = topo_node->un.phymask;
5577 				goto find_parent;
5578 			}
5579 			if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
5580 				goto handle_topo_change;
5581 			}
5582 			if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
5583 				phymask = topo_node->un.phymask;
5584 				goto find_parent;
5585 			}
5586 
5587 			if ((flags ==
5588 			    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
5589 			    (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
5590 				/*
5591 				 * There is no any field in IR_CONFIG_CHANGE
5592 				 * event indicate physport/phynum, let's get
5593 				 * parent after SAS Device Page0 request.
5594 				 */
5595 				goto handle_topo_change;
5596 			}
5597 
5598 			mutex_enter(&mpt->m_mutex);
5599 			if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5600 				/*
5601 				 * If the direct attached device added or a
5602 				 * phys disk is being unhidden, argument
5603 				 * physport actually is PHY#, so we have to get
5604 				 * phymask according PHY#.
5605 				 */
5606 				physport = mpt->m_phy_info[physport].port_num;
5607 			}
5608 
5609 			/*
5610 			 * Translate physport to phymask so that we can search
5611 			 * parent dip.
5612 			 */
5613 			phymask = mptsas_physport_to_phymask(mpt,
5614 			    physport);
5615 			mutex_exit(&mpt->m_mutex);
5616 
5617 find_parent:
5618 			bzero(phy_mask_name, 8);
5619 			/*
5620 			 * For RAID topology change node, write the iport name
5621 			 * as v0.
5622 			 */
5623 			if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5624 				(void) sprintf(phy_mask_name, "v0");
5625 			} else {
5626 				/*
5627 				 * phymask can bo 0 if the drive has been
5628 				 * pulled by the time an add event is
5629 				 * processed.  If phymask is 0, just skip this
5630 				 * event and continue.
5631 				 */
5632 				if (phymask == 0) {
5633 					mutex_enter(&mpt->m_mutex);
5634 					save_node = topo_node;
5635 					topo_node = topo_node->next;
5636 					ASSERT(save_node);
5637 					kmem_free(save_node,
5638 					    sizeof (mptsas_topo_change_list_t));
5639 					mutex_exit(&mpt->m_mutex);
5640 
5641 					parent = NULL;
5642 					continue;
5643 				}
5644 				(void) sprintf(phy_mask_name, "%x", phymask);
5645 			}
5646 			parent = scsi_hba_iport_find(mpt->m_dip,
5647 			    phy_mask_name);
5648 			if (parent == NULL) {
5649 				mptsas_log(mpt, CE_WARN, "Failed to find an "
5650 				    "iport, should not happen!");
5651 				goto out;
5652 			}
5653 
5654 		}
5655 		ASSERT(parent);
5656 handle_topo_change:
5657 
5658 		mutex_enter(&mpt->m_mutex);
5659 
5660 		mptsas_handle_topo_change(topo_node, parent);
5661 		save_node = topo_node;
5662 		topo_node = topo_node->next;
5663 		ASSERT(save_node);
5664 		kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
5665 		mutex_exit(&mpt->m_mutex);
5666 
5667 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5668 		    (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
5669 		    (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
5670 			/*
5671 			 * If direct attached device associated, make sure
5672 			 * reset the parent before start the next one. But
5673 			 * all devices associated with expander shares the
5674 			 * parent.  Also, reset parent if this is for RAID.
5675 			 */
5676 			parent = NULL;
5677 		}
5678 	}
5679 out:
5680 	kmem_free(phy_mask_name, 8);
5681 }
5682 
5683 static void
5684 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
5685     dev_info_t *parent)
5686 {
5687 	mptsas_target_t	*ptgt = NULL;
5688 	mptsas_smp_t	*psmp = NULL;
5689 	mptsas_t	*mpt = (void *)topo_node->mpt;
5690 	uint16_t	devhdl;
5691 	uint64_t	sas_wwn = 0;
5692 	int		rval = 0;
5693 	uint32_t	page_address;
5694 	uint8_t		phy, flags;
5695 	char		*addr = NULL;
5696 	dev_info_t	*lundip;
5697 	int		circ = 0, circ1 = 0;
5698 
5699 	NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance));
5700 
5701 	ASSERT(mutex_owned(&mpt->m_mutex));
5702 
5703 	switch (topo_node->event) {
5704 	case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5705 	{
5706 		char *phy_mask_name;
5707 		uint8_t phymask = 0;
5708 
5709 		if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5710 			/*
5711 			 * Get latest RAID info.
5712 			 */
5713 			(void) mptsas_get_raid_info(mpt);
5714 			ptgt = mptsas_search_by_devhdl(
5715 			    &mpt->m_active->m_tgttbl, topo_node->devhdl);
5716 			if (ptgt == NULL)
5717 				break;
5718 		} else {
5719 			ptgt = (void *)topo_node->object;
5720 		}
5721 
5722 		if (ptgt == NULL) {
5723 			/*
5724 			 * Get sas device page 0 by DevHandle to make sure if
5725 			 * SSP/SATA end device exist.
5726 			 */
5727 			page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
5728 			    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
5729 			    topo_node->devhdl;
5730 
5731 			rval = mptsas_get_target_device_info(mpt, page_address,
5732 			    &devhdl, &ptgt);
5733 			if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
5734 				mptsas_log(mpt, CE_NOTE,
5735 				    "mptsas_handle_topo_change: target %d is "
5736 				    "not a SAS/SATA device. \n",
5737 				    topo_node->devhdl);
5738 			} else if (rval == DEV_INFO_FAIL_ALLOC) {
5739 				mptsas_log(mpt, CE_NOTE,
5740 				    "mptsas_handle_topo_change: could not "
5741 				    "allocate memory. \n");
5742 			}
5743 			/*
5744 			 * If rval is DEV_INFO_PHYS_DISK than there is nothing
5745 			 * else to do, just leave.
5746 			 */
5747 			if (rval != DEV_INFO_SUCCESS) {
5748 				return;
5749 			}
5750 		}
5751 
5752 		ASSERT(ptgt->m_devhdl == topo_node->devhdl);
5753 
5754 		mutex_exit(&mpt->m_mutex);
5755 		flags = topo_node->flags;
5756 
5757 		if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
5758 			phymask = ptgt->m_phymask;
5759 			phy_mask_name = kmem_zalloc(8, KM_SLEEP);
5760 			(void) sprintf(phy_mask_name, "%x", phymask);
5761 			parent = scsi_hba_iport_find(mpt->m_dip,
5762 			    phy_mask_name);
5763 			kmem_free(phy_mask_name, 8);
5764 			if (parent == NULL) {
5765 				mptsas_log(mpt, CE_WARN, "Failed to find a "
5766 				    "iport for PD, should not happen!");
5767 				mutex_enter(&mpt->m_mutex);
5768 				break;
5769 			}
5770 		}
5771 
5772 		if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5773 			ndi_devi_enter(parent, &circ1);
5774 			(void) mptsas_config_raid(parent, topo_node->devhdl,
5775 			    &lundip);
5776 			ndi_devi_exit(parent, circ1);
5777 		} else {
5778 			/*
5779 			 * hold nexus for bus configure
5780 			 */
5781 			ndi_devi_enter(scsi_vhci_dip, &circ);
5782 			ndi_devi_enter(parent, &circ1);
5783 			rval = mptsas_config_target(parent, ptgt);
5784 			/*
5785 			 * release nexus for bus configure
5786 			 */
5787 			ndi_devi_exit(parent, circ1);
5788 			ndi_devi_exit(scsi_vhci_dip, circ);
5789 
5790 		}
5791 		mutex_enter(&mpt->m_mutex);
5792 
5793 		NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
5794 		    "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
5795 		    ptgt->m_phymask));
5796 		break;
5797 	}
5798 	case MPTSAS_DR_EVENT_OFFLINE_TARGET:
5799 	{
5800 		mptsas_hash_table_t *tgttbl = &mpt->m_active->m_tgttbl;
5801 		devhdl = topo_node->devhdl;
5802 		ptgt = mptsas_search_by_devhdl(tgttbl, devhdl);
5803 		if (ptgt == NULL)
5804 			break;
5805 
5806 		sas_wwn = ptgt->m_sas_wwn;
5807 		phy = ptgt->m_phynum;
5808 
5809 		addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
5810 
5811 		if (sas_wwn) {
5812 			(void) sprintf(addr, "w%016"PRIx64, sas_wwn);
5813 		} else {
5814 			(void) sprintf(addr, "p%x", phy);
5815 		}
5816 		ASSERT(ptgt->m_devhdl == devhdl);
5817 
5818 		if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5819 			/*
5820 			 * Get latest RAID info, if RAID volume status change
5821 			 */
5822 			(void) mptsas_get_raid_info(mpt);
5823 		}
5824 		/*
5825 		 * Abort all outstanding command on the device
5826 		 */
5827 		rval = mptsas_do_scsi_reset(mpt, devhdl);
5828 		if (rval) {
5829 			NDBG20(("mptsas%d handle_topo_change to reset target "
5830 			    "before offline devhdl:%x, phymask:%x, rval:%x",
5831 			    mpt->m_instance, ptgt->m_devhdl, ptgt->m_phymask,
5832 			    rval));
5833 		}
5834 
5835 		mutex_exit(&mpt->m_mutex);
5836 
5837 		ndi_devi_enter(scsi_vhci_dip, &circ);
5838 		ndi_devi_enter(parent, &circ1);
5839 		rval = mptsas_offline_target(parent, addr);
5840 		ndi_devi_exit(parent, circ1);
5841 		ndi_devi_exit(scsi_vhci_dip, circ);
5842 		NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
5843 		    "phymask:%x, rval:%x", mpt->m_instance,
5844 		    ptgt->m_devhdl, ptgt->m_phymask, rval));
5845 
5846 		kmem_free(addr, SCSI_MAXNAMELEN);
5847 
5848 		mutex_enter(&mpt->m_mutex);
5849 		if (rval == DDI_SUCCESS) {
5850 			mptsas_tgt_free(&mpt->m_active->m_tgttbl,
5851 			    ptgt->m_sas_wwn, ptgt->m_phymask);
5852 			ptgt = NULL;
5853 		} else {
5854 			/*
5855 			 * clean DR_INTRANSITION flag to allow I/O down to
5856 			 * PHCI driver since failover finished.
5857 			 * Invalidate the devhdl
5858 			 */
5859 			ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
5860 			mutex_enter(&mpt->m_tx_waitq_mutex);
5861 			ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
5862 			mutex_exit(&mpt->m_tx_waitq_mutex);
5863 		}
5864 
5865 		/*
5866 		 * Send SAS IO Unit Control to free the dev handle
5867 		 */
5868 		flags = topo_node->flags;
5869 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5870 		    (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
5871 			rval = mptsas_free_devhdl(mpt, devhdl);
5872 
5873 			NDBG20(("mptsas%d handle_topo_change to remove "
5874 			    "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
5875 			    rval));
5876 		}
5877 		break;
5878 	}
5879 	case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
5880 	{
5881 		devhdl = topo_node->devhdl;
5882 		/*
5883 		 * If this is the remove handle event, do a reset first.
5884 		 */
5885 		if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
5886 			rval = mptsas_do_scsi_reset(mpt, devhdl);
5887 			if (rval) {
5888 				NDBG20(("mpt%d reset target before remove "
5889 				    "devhdl:%x, rval:%x", mpt->m_instance,
5890 				    devhdl, rval));
5891 			}
5892 		}
5893 
5894 		/*
5895 		 * Send SAS IO Unit Control to free the dev handle
5896 		 */
5897 		rval = mptsas_free_devhdl(mpt, devhdl);
5898 		NDBG20(("mptsas%d handle_topo_change to remove "
5899 		    "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
5900 		    rval));
5901 		break;
5902 	}
5903 	case MPTSAS_DR_EVENT_RECONFIG_SMP:
5904 	{
5905 		mptsas_smp_t smp;
5906 		dev_info_t *smpdip;
5907 		mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
5908 
5909 		devhdl = topo_node->devhdl;
5910 
5911 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
5912 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
5913 		rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
5914 		if (rval != DDI_SUCCESS) {
5915 			mptsas_log(mpt, CE_WARN, "failed to online smp, "
5916 			    "handle %x", devhdl);
5917 			return;
5918 		}
5919 
5920 		psmp = mptsas_smp_alloc(smptbl, &smp);
5921 		if (psmp == NULL) {
5922 			return;
5923 		}
5924 
5925 		mutex_exit(&mpt->m_mutex);
5926 		ndi_devi_enter(parent, &circ1);
5927 		(void) mptsas_online_smp(parent, psmp, &smpdip);
5928 		ndi_devi_exit(parent, circ1);
5929 		mutex_enter(&mpt->m_mutex);
5930 		break;
5931 	}
5932 	case MPTSAS_DR_EVENT_OFFLINE_SMP:
5933 	{
5934 		mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
5935 		devhdl = topo_node->devhdl;
5936 		psmp = mptsas_search_by_devhdl(smptbl, devhdl);
5937 		if (psmp == NULL)
5938 			break;
5939 		/*
5940 		 * The mptsas_smp_t data is released only if the dip is offlined
5941 		 * successfully.
5942 		 */
5943 		mutex_exit(&mpt->m_mutex);
5944 		ndi_devi_enter(parent, &circ1);
5945 		rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
5946 		ndi_devi_exit(parent, circ1);
5947 		mutex_enter(&mpt->m_mutex);
5948 		NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
5949 		    "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
5950 		if (rval == DDI_SUCCESS) {
5951 			mptsas_smp_free(smptbl, psmp->m_sasaddr,
5952 			    psmp->m_phymask);
5953 		} else {
5954 			psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
5955 		}
5956 		break;
5957 	}
5958 	default:
5959 		return;
5960 	}
5961 }
5962 
5963 /*
5964  * Record the event if its type is enabled in mpt instance by ioctl.
5965  */
5966 static void
5967 mptsas_record_event(void *args)
5968 {
5969 	m_replyh_arg_t			*replyh_arg;
5970 	pMpi2EventNotificationReply_t	eventreply;
5971 	uint32_t			event, rfm;
5972 	mptsas_t			*mpt;
5973 	int				i, j;
5974 	uint16_t			event_data_len;
5975 	boolean_t			sendAEN = FALSE;
5976 
5977 	replyh_arg = (m_replyh_arg_t *)args;
5978 	rfm = replyh_arg->rfm;
5979 	mpt = replyh_arg->mpt;
5980 
5981 	eventreply = (pMpi2EventNotificationReply_t)
5982 	    (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
5983 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
5984 
5985 
5986 	/*
5987 	 * Generate a system event to let anyone who cares know that a
5988 	 * LOG_ENTRY_ADDED event has occurred.  This is sent no matter what the
5989 	 * event mask is set to.
5990 	 */
5991 	if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
5992 		sendAEN = TRUE;
5993 	}
5994 
5995 	/*
5996 	 * Record the event only if it is not masked.  Determine which dword
5997 	 * and bit of event mask to test.
5998 	 */
5999 	i = (uint8_t)(event / 32);
6000 	j = (uint8_t)(event % 32);
6001 	if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6002 		i = mpt->m_event_number;
6003 		mpt->m_events[i].Type = event;
6004 		mpt->m_events[i].Number = ++mpt->m_event_number;
6005 		bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6006 		event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6007 		    &eventreply->EventDataLength);
6008 
6009 		if (event_data_len > 0) {
6010 			/*
6011 			 * Limit data to size in m_event entry
6012 			 */
6013 			if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6014 				event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6015 			}
6016 			for (j = 0; j < event_data_len; j++) {
6017 				mpt->m_events[i].Data[j] =
6018 				    ddi_get32(mpt->m_acc_reply_frame_hdl,
6019 				    &(eventreply->EventData[j]));
6020 			}
6021 
6022 			/*
6023 			 * check for index wrap-around
6024 			 */
6025 			if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6026 				i = 0;
6027 			}
6028 			mpt->m_event_number = i;
6029 
6030 			/*
6031 			 * Set flag to send the event.
6032 			 */
6033 			sendAEN = TRUE;
6034 		}
6035 	}
6036 
6037 	/*
6038 	 * Generate a system event if flag is set to let anyone who cares know
6039 	 * that an event has occurred.
6040 	 */
6041 	if (sendAEN) {
6042 		(void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6043 		    "SAS", NULL, NULL, DDI_NOSLEEP);
6044 	}
6045 }
6046 
6047 #define	SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6048 /*
6049  * handle sync events from ioc in interrupt
6050  * return value:
6051  * DDI_SUCCESS: The event is handled by this func
6052  * DDI_FAILURE: Event is not handled
6053  */
6054 static int
6055 mptsas_handle_event_sync(void *args)
6056 {
6057 	m_replyh_arg_t			*replyh_arg;
6058 	pMpi2EventNotificationReply_t	eventreply;
6059 	uint32_t			event, rfm;
6060 	mptsas_t			*mpt;
6061 	uint_t				iocstatus;
6062 
6063 	replyh_arg = (m_replyh_arg_t *)args;
6064 	rfm = replyh_arg->rfm;
6065 	mpt = replyh_arg->mpt;
6066 
6067 	ASSERT(mutex_owned(&mpt->m_mutex));
6068 
6069 	eventreply = (pMpi2EventNotificationReply_t)
6070 	    (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6071 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6072 
6073 	if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6074 	    &eventreply->IOCStatus)) {
6075 		if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6076 			mptsas_log(mpt, CE_WARN,
6077 			    "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6078 			    "IOCLogInfo=0x%x", iocstatus,
6079 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
6080 			    &eventreply->IOCLogInfo));
6081 		} else {
6082 			mptsas_log(mpt, CE_WARN,
6083 			    "mptsas_handle_event_sync: IOCStatus=0x%x, "
6084 			    "IOCLogInfo=0x%x", iocstatus,
6085 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
6086 			    &eventreply->IOCLogInfo));
6087 		}
6088 	}
6089 
6090 	/*
6091 	 * figure out what kind of event we got and handle accordingly
6092 	 */
6093 	switch (event) {
6094 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6095 	{
6096 		pMpi2EventDataSasTopologyChangeList_t	sas_topo_change_list;
6097 		uint8_t				num_entries, expstatus, phy;
6098 		uint8_t				phystatus, physport, state, i;
6099 		uint8_t				start_phy_num, link_rate;
6100 		uint16_t			dev_handle;
6101 		uint16_t			enc_handle, expd_handle;
6102 		char				string[80], curr[80], prev[80];
6103 		mptsas_topo_change_list_t	*topo_head = NULL;
6104 		mptsas_topo_change_list_t	*topo_tail = NULL;
6105 		mptsas_topo_change_list_t	*topo_node = NULL;
6106 		mptsas_target_t			*ptgt;
6107 		mptsas_smp_t			*psmp;
6108 		mptsas_hash_table_t		*tgttbl, *smptbl;
6109 		uint8_t				flags = 0, exp_flag;
6110 
6111 		NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6112 
6113 		tgttbl = &mpt->m_active->m_tgttbl;
6114 		smptbl = &mpt->m_active->m_smptbl;
6115 
6116 		sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6117 		    eventreply->EventData;
6118 
6119 		enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6120 		    &sas_topo_change_list->EnclosureHandle);
6121 		expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6122 		    &sas_topo_change_list->ExpanderDevHandle);
6123 		num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6124 		    &sas_topo_change_list->NumEntries);
6125 		start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6126 		    &sas_topo_change_list->StartPhyNum);
6127 		expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6128 		    &sas_topo_change_list->ExpStatus);
6129 		physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6130 		    &sas_topo_change_list->PhysicalPort);
6131 
6132 		string[0] = 0;
6133 		if (expd_handle) {
6134 			flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6135 			switch (expstatus) {
6136 			case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6137 				(void) sprintf(string, " added");
6138 				/*
6139 				 * New expander device added
6140 				 */
6141 				mpt->m_port_chng = 1;
6142 				topo_node = kmem_zalloc(
6143 				    sizeof (mptsas_topo_change_list_t),
6144 				    KM_SLEEP);
6145 				topo_node->mpt = mpt;
6146 				topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6147 				topo_node->un.physport = physport;
6148 				topo_node->devhdl = expd_handle;
6149 				topo_node->flags = flags;
6150 				topo_node->object = NULL;
6151 				if (topo_head == NULL) {
6152 					topo_head = topo_tail = topo_node;
6153 				} else {
6154 					topo_tail->next = topo_node;
6155 					topo_tail = topo_node;
6156 				}
6157 				break;
6158 			case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6159 				(void) sprintf(string, " not responding, "
6160 				    "removed");
6161 				psmp = mptsas_search_by_devhdl(smptbl,
6162 				    expd_handle);
6163 				if (psmp == NULL)
6164 					break;
6165 
6166 				topo_node = kmem_zalloc(
6167 				    sizeof (mptsas_topo_change_list_t),
6168 				    KM_SLEEP);
6169 				topo_node->mpt = mpt;
6170 				topo_node->un.phymask = psmp->m_phymask;
6171 				topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6172 				topo_node->devhdl = expd_handle;
6173 				topo_node->flags = flags;
6174 				topo_node->object = NULL;
6175 				if (topo_head == NULL) {
6176 					topo_head = topo_tail = topo_node;
6177 				} else {
6178 					topo_tail->next = topo_node;
6179 					topo_tail = topo_node;
6180 				}
6181 				break;
6182 			case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6183 				break;
6184 			case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6185 				(void) sprintf(string, " not responding, "
6186 				    "delaying removal");
6187 				break;
6188 			default:
6189 				break;
6190 			}
6191 		} else {
6192 			flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6193 		}
6194 
6195 		NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6196 		    enc_handle, expd_handle, string));
6197 		for (i = 0; i < num_entries; i++) {
6198 			phy = i + start_phy_num;
6199 			phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6200 			    &sas_topo_change_list->PHY[i].PhyStatus);
6201 			dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6202 			    &sas_topo_change_list->PHY[i].AttachedDevHandle);
6203 			if (phystatus & MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) {
6204 				continue;
6205 			}
6206 			curr[0] = 0;
6207 			prev[0] = 0;
6208 			string[0] = 0;
6209 			switch (phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK) {
6210 			case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6211 			{
6212 				NDBG20(("mptsas%d phy %d physical_port %d "
6213 				    "dev_handle %d added", mpt->m_instance, phy,
6214 				    physport, dev_handle));
6215 				link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6216 				    &sas_topo_change_list->PHY[i].LinkRate);
6217 				state = (link_rate &
6218 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6219 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6220 				switch (state) {
6221 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6222 					(void) sprintf(curr, "is disabled");
6223 					break;
6224 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6225 					(void) sprintf(curr, "is offline, "
6226 					    "failed speed negotiation");
6227 					break;
6228 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6229 					(void) sprintf(curr, "SATA OOB "
6230 					    "complete");
6231 					break;
6232 				case SMP_RESET_IN_PROGRESS:
6233 					(void) sprintf(curr, "SMP reset in "
6234 					    "progress");
6235 					break;
6236 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6237 					(void) sprintf(curr, "is online at "
6238 					    "1.5 Gbps");
6239 					break;
6240 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6241 					(void) sprintf(curr, "is online at 3.0 "
6242 					    "Gbps");
6243 					break;
6244 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6245 					(void) sprintf(curr, "is online at 6.0 "
6246 					    "Gbps");
6247 					break;
6248 				default:
6249 					(void) sprintf(curr, "state is "
6250 					    "unknown");
6251 					break;
6252 				}
6253 				/*
6254 				 * New target device added into the system.
6255 				 * Set association flag according to if an
6256 				 * expander is used or not.
6257 				 */
6258 				exp_flag =
6259 				    MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6260 				if (flags ==
6261 				    MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6262 					flags = exp_flag;
6263 				}
6264 				topo_node = kmem_zalloc(
6265 				    sizeof (mptsas_topo_change_list_t),
6266 				    KM_SLEEP);
6267 				topo_node->mpt = mpt;
6268 				topo_node->event =
6269 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
6270 				if (expd_handle == 0) {
6271 					/*
6272 					 * Per MPI 2, if expander dev handle
6273 					 * is 0, it's a directly attached
6274 					 * device. So driver use PHY to decide
6275 					 * which iport is associated
6276 					 */
6277 					physport = phy;
6278 					mpt->m_port_chng = 1;
6279 				}
6280 				topo_node->un.physport = physport;
6281 				topo_node->devhdl = dev_handle;
6282 				topo_node->flags = flags;
6283 				topo_node->object = NULL;
6284 				if (topo_head == NULL) {
6285 					topo_head = topo_tail = topo_node;
6286 				} else {
6287 					topo_tail->next = topo_node;
6288 					topo_tail = topo_node;
6289 				}
6290 				break;
6291 			}
6292 			case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6293 			{
6294 				NDBG20(("mptsas%d phy %d physical_port %d "
6295 				    "dev_handle %d removed", mpt->m_instance,
6296 				    phy, physport, dev_handle));
6297 				/*
6298 				 * Set association flag according to if an
6299 				 * expander is used or not.
6300 				 */
6301 				exp_flag =
6302 				    MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6303 				if (flags ==
6304 				    MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6305 					flags = exp_flag;
6306 				}
6307 				/*
6308 				 * Target device is removed from the system
6309 				 * Before the device is really offline from
6310 				 * from system.
6311 				 */
6312 				ptgt = mptsas_search_by_devhdl(tgttbl,
6313 				    dev_handle);
6314 				/*
6315 				 * If ptgt is NULL here, it means that the
6316 				 * DevHandle is not in the hash table.  This is
6317 				 * reasonable sometimes.  For example, if a
6318 				 * disk was pulled, then added, then pulled
6319 				 * again, the disk will not have been put into
6320 				 * the hash table because the add event will
6321 				 * have an invalid phymask.  BUT, this does not
6322 				 * mean that the DevHandle is invalid.  The
6323 				 * controller will still have a valid DevHandle
6324 				 * that must be removed.  To do this, use the
6325 				 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
6326 				 */
6327 				if (ptgt == NULL) {
6328 					topo_node = kmem_zalloc(
6329 					    sizeof (mptsas_topo_change_list_t),
6330 					    KM_SLEEP);
6331 					topo_node->mpt = mpt;
6332 					topo_node->un.phymask = 0;
6333 					topo_node->event =
6334 					    MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
6335 					topo_node->devhdl = dev_handle;
6336 					topo_node->flags = flags;
6337 					topo_node->object = NULL;
6338 					if (topo_head == NULL) {
6339 						topo_head = topo_tail =
6340 						    topo_node;
6341 					} else {
6342 						topo_tail->next = topo_node;
6343 						topo_tail = topo_node;
6344 					}
6345 					break;
6346 				}
6347 
6348 				/*
6349 				 * Update DR flag immediately avoid I/O failure
6350 				 * before failover finish. Pay attention to the
6351 				 * mutex protect, we need grab m_tx_waitq_mutex
6352 				 * during set m_dr_flag because we won't add
6353 				 * the following command into waitq, instead,
6354 				 * we need return TRAN_BUSY in the tran_start
6355 				 * context.
6356 				 */
6357 				mutex_enter(&mpt->m_tx_waitq_mutex);
6358 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6359 				mutex_exit(&mpt->m_tx_waitq_mutex);
6360 
6361 				topo_node = kmem_zalloc(
6362 				    sizeof (mptsas_topo_change_list_t),
6363 				    KM_SLEEP);
6364 				topo_node->mpt = mpt;
6365 				topo_node->un.phymask = ptgt->m_phymask;
6366 				topo_node->event =
6367 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
6368 				topo_node->devhdl = dev_handle;
6369 				topo_node->flags = flags;
6370 				topo_node->object = NULL;
6371 				if (topo_head == NULL) {
6372 					topo_head = topo_tail = topo_node;
6373 				} else {
6374 					topo_tail->next = topo_node;
6375 					topo_tail = topo_node;
6376 				}
6377 
6378 				break;
6379 			}
6380 			case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6381 				link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6382 				    &sas_topo_change_list->PHY[i].LinkRate);
6383 				state = (link_rate &
6384 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6385 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6386 				switch (state) {
6387 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6388 					(void) sprintf(curr, "is disabled");
6389 					break;
6390 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6391 					(void) sprintf(curr, "is offline, "
6392 					    "failed speed negotiation");
6393 					break;
6394 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6395 					(void) sprintf(curr, "SATA OOB "
6396 					    "complete");
6397 					break;
6398 				case SMP_RESET_IN_PROGRESS:
6399 					(void) sprintf(curr, "SMP reset in "
6400 					    "progress");
6401 					break;
6402 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6403 					(void) sprintf(curr, "is online at "
6404 					    "1.5 Gbps");
6405 					if ((expd_handle == 0) &&
6406 					    (enc_handle == 1)) {
6407 						mpt->m_port_chng = 1;
6408 					}
6409 					break;
6410 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6411 					(void) sprintf(curr, "is online at 3.0 "
6412 					    "Gbps");
6413 					if ((expd_handle == 0) &&
6414 					    (enc_handle == 1)) {
6415 						mpt->m_port_chng = 1;
6416 					}
6417 					break;
6418 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6419 					(void) sprintf(curr, "is online at "
6420 					    "6.0 Gbps");
6421 					if ((expd_handle == 0) &&
6422 					    (enc_handle == 1)) {
6423 						mpt->m_port_chng = 1;
6424 					}
6425 					break;
6426 				default:
6427 					(void) sprintf(curr, "state is "
6428 					    "unknown");
6429 					break;
6430 				}
6431 
6432 				state = (link_rate &
6433 				    MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
6434 				    MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
6435 				switch (state) {
6436 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6437 					(void) sprintf(prev, ", was disabled");
6438 					break;
6439 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6440 					(void) sprintf(prev, ", was offline, "
6441 					    "failed speed negotiation");
6442 					break;
6443 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6444 					(void) sprintf(prev, ", was SATA OOB "
6445 					    "complete");
6446 					break;
6447 				case SMP_RESET_IN_PROGRESS:
6448 					(void) sprintf(prev, ", was SMP reset "
6449 					    "in progress");
6450 					break;
6451 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6452 					(void) sprintf(prev, ", was online at "
6453 					    "1.5 Gbps");
6454 					break;
6455 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6456 					(void) sprintf(prev, ", was online at "
6457 					    "3.0 Gbps");
6458 					break;
6459 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6460 					(void) sprintf(prev, ", was online at "
6461 					    "6.0 Gbps");
6462 					break;
6463 				default:
6464 				break;
6465 				}
6466 				(void) sprintf(&string[strlen(string)], "link "
6467 				    "changed, ");
6468 				break;
6469 			case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6470 				continue;
6471 			case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6472 				(void) sprintf(&string[strlen(string)],
6473 				    "target not responding, delaying "
6474 				    "removal");
6475 				break;
6476 			}
6477 			NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
6478 			    mpt->m_instance, phy, dev_handle, string, curr,
6479 			    prev));
6480 		}
6481 		if (topo_head != NULL) {
6482 			/*
6483 			 * Launch DR taskq to handle topology change
6484 			 */
6485 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6486 			    mptsas_handle_dr, (void *)topo_head,
6487 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
6488 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6489 				    "for handle SAS DR event failed. \n");
6490 			}
6491 		}
6492 		break;
6493 	}
6494 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
6495 	{
6496 		Mpi2EventDataIrConfigChangeList_t	*irChangeList;
6497 		mptsas_topo_change_list_t		*topo_head = NULL;
6498 		mptsas_topo_change_list_t		*topo_tail = NULL;
6499 		mptsas_topo_change_list_t		*topo_node = NULL;
6500 		mptsas_target_t				*ptgt;
6501 		mptsas_hash_table_t			*tgttbl;
6502 		uint8_t					num_entries, i, reason;
6503 		uint16_t				volhandle, diskhandle;
6504 
6505 		irChangeList = (pMpi2EventDataIrConfigChangeList_t)
6506 		    eventreply->EventData;
6507 		num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6508 		    &irChangeList->NumElements);
6509 
6510 		tgttbl = &mpt->m_active->m_tgttbl;
6511 
6512 		NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
6513 		    mpt->m_instance));
6514 
6515 		for (i = 0; i < num_entries; i++) {
6516 			reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
6517 			    &irChangeList->ConfigElement[i].ReasonCode);
6518 			volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6519 			    &irChangeList->ConfigElement[i].VolDevHandle);
6520 			diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6521 			    &irChangeList->ConfigElement[i].PhysDiskDevHandle);
6522 
6523 			switch (reason) {
6524 			case MPI2_EVENT_IR_CHANGE_RC_ADDED:
6525 			case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
6526 			{
6527 				NDBG20(("mptsas %d volume added\n",
6528 				    mpt->m_instance));
6529 
6530 				topo_node = kmem_zalloc(
6531 				    sizeof (mptsas_topo_change_list_t),
6532 				    KM_SLEEP);
6533 
6534 				topo_node->mpt = mpt;
6535 				topo_node->event =
6536 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
6537 				topo_node->un.physport = 0xff;
6538 				topo_node->devhdl = volhandle;
6539 				topo_node->flags =
6540 				    MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6541 				topo_node->object = NULL;
6542 				if (topo_head == NULL) {
6543 					topo_head = topo_tail = topo_node;
6544 				} else {
6545 					topo_tail->next = topo_node;
6546 					topo_tail = topo_node;
6547 				}
6548 				break;
6549 			}
6550 			case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
6551 			case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
6552 			{
6553 				NDBG20(("mptsas %d volume deleted\n",
6554 				    mpt->m_instance));
6555 				ptgt = mptsas_search_by_devhdl(tgttbl,
6556 				    volhandle);
6557 				if (ptgt == NULL)
6558 					break;
6559 
6560 				/*
6561 				 * Clear any flags related to volume
6562 				 */
6563 				(void) mptsas_delete_volume(mpt, volhandle);
6564 
6565 				/*
6566 				 * Update DR flag immediately avoid I/O failure
6567 				 */
6568 				mutex_enter(&mpt->m_tx_waitq_mutex);
6569 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6570 				mutex_exit(&mpt->m_tx_waitq_mutex);
6571 
6572 				topo_node = kmem_zalloc(
6573 				    sizeof (mptsas_topo_change_list_t),
6574 				    KM_SLEEP);
6575 				topo_node->mpt = mpt;
6576 				topo_node->un.phymask = ptgt->m_phymask;
6577 				topo_node->event =
6578 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
6579 				topo_node->devhdl = volhandle;
6580 				topo_node->flags =
6581 				    MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6582 				topo_node->object = (void *)ptgt;
6583 				if (topo_head == NULL) {
6584 					topo_head = topo_tail = topo_node;
6585 				} else {
6586 					topo_tail->next = topo_node;
6587 					topo_tail = topo_node;
6588 				}
6589 				break;
6590 			}
6591 			case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
6592 			case MPI2_EVENT_IR_CHANGE_RC_HIDE:
6593 			{
6594 				ptgt = mptsas_search_by_devhdl(tgttbl,
6595 				    diskhandle);
6596 				if (ptgt == NULL)
6597 					break;
6598 
6599 				/*
6600 				 * Update DR flag immediately avoid I/O failure
6601 				 */
6602 				mutex_enter(&mpt->m_tx_waitq_mutex);
6603 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6604 				mutex_exit(&mpt->m_tx_waitq_mutex);
6605 
6606 				topo_node = kmem_zalloc(
6607 				    sizeof (mptsas_topo_change_list_t),
6608 				    KM_SLEEP);
6609 				topo_node->mpt = mpt;
6610 				topo_node->un.phymask = ptgt->m_phymask;
6611 				topo_node->event =
6612 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
6613 				topo_node->devhdl = diskhandle;
6614 				topo_node->flags =
6615 				    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6616 				topo_node->object = (void *)ptgt;
6617 				if (topo_head == NULL) {
6618 					topo_head = topo_tail = topo_node;
6619 				} else {
6620 					topo_tail->next = topo_node;
6621 					topo_tail = topo_node;
6622 				}
6623 				break;
6624 			}
6625 			case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
6626 			case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
6627 			{
6628 				/*
6629 				 * The physical drive is released by a IR
6630 				 * volume. But we cannot get the the physport
6631 				 * or phynum from the event data, so we only
6632 				 * can get the physport/phynum after SAS
6633 				 * Device Page0 request for the devhdl.
6634 				 */
6635 				topo_node = kmem_zalloc(
6636 				    sizeof (mptsas_topo_change_list_t),
6637 				    KM_SLEEP);
6638 				topo_node->mpt = mpt;
6639 				topo_node->un.phymask = 0;
6640 				topo_node->event =
6641 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
6642 				topo_node->devhdl = diskhandle;
6643 				topo_node->flags =
6644 				    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6645 				topo_node->object = NULL;
6646 				mpt->m_port_chng = 1;
6647 				if (topo_head == NULL) {
6648 					topo_head = topo_tail = topo_node;
6649 				} else {
6650 					topo_tail->next = topo_node;
6651 					topo_tail = topo_node;
6652 				}
6653 				break;
6654 			}
6655 			default:
6656 				break;
6657 			}
6658 		}
6659 
6660 		if (topo_head != NULL) {
6661 			/*
6662 			 * Launch DR taskq to handle topology change
6663 			 */
6664 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6665 			    mptsas_handle_dr, (void *)topo_head,
6666 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
6667 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6668 				    "for handle SAS DR event failed. \n");
6669 			}
6670 		}
6671 		break;
6672 	}
6673 	default:
6674 		return (DDI_FAILURE);
6675 	}
6676 
6677 	return (DDI_SUCCESS);
6678 }
6679 
6680 /*
6681  * handle events from ioc
6682  */
6683 static void
6684 mptsas_handle_event(void *args)
6685 {
6686 	m_replyh_arg_t			*replyh_arg;
6687 	pMpi2EventNotificationReply_t	eventreply;
6688 	uint32_t			event, iocloginfo, rfm;
6689 	uint32_t			status, reply_index;
6690 	uint8_t				port;
6691 	mptsas_t			*mpt;
6692 	uint_t				iocstatus;
6693 
6694 	replyh_arg = (m_replyh_arg_t *)args;
6695 	rfm = replyh_arg->rfm;
6696 	mpt = replyh_arg->mpt;
6697 
6698 	mutex_enter(&mpt->m_mutex);
6699 
6700 	eventreply = (pMpi2EventNotificationReply_t)
6701 	    (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6702 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6703 
6704 	if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6705 	    &eventreply->IOCStatus)) {
6706 		if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6707 			mptsas_log(mpt, CE_WARN,
6708 			    "!mptsas_handle_event: IOCStatus=0x%x, "
6709 			    "IOCLogInfo=0x%x", iocstatus,
6710 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
6711 			    &eventreply->IOCLogInfo));
6712 		} else {
6713 			mptsas_log(mpt, CE_WARN,
6714 			    "mptsas_handle_event: IOCStatus=0x%x, "
6715 			    "IOCLogInfo=0x%x", iocstatus,
6716 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
6717 			    &eventreply->IOCLogInfo));
6718 		}
6719 	}
6720 
6721 	/*
6722 	 * figure out what kind of event we got and handle accordingly
6723 	 */
6724 	switch (event) {
6725 	case MPI2_EVENT_LOG_ENTRY_ADDED:
6726 		break;
6727 	case MPI2_EVENT_LOG_DATA:
6728 		iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
6729 		    &eventreply->IOCLogInfo);
6730 		NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
6731 		    iocloginfo));
6732 		break;
6733 	case MPI2_EVENT_STATE_CHANGE:
6734 		NDBG20(("mptsas%d state change.", mpt->m_instance));
6735 		break;
6736 	case MPI2_EVENT_HARD_RESET_RECEIVED:
6737 		NDBG20(("mptsas%d event change.", mpt->m_instance));
6738 		break;
6739 	case MPI2_EVENT_SAS_DISCOVERY:
6740 	{
6741 		MPI2_EVENT_DATA_SAS_DISCOVERY	*sasdiscovery;
6742 		char				string[80];
6743 		uint8_t				rc;
6744 
6745 		sasdiscovery =
6746 		    (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
6747 
6748 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
6749 		    &sasdiscovery->ReasonCode);
6750 		port = ddi_get8(mpt->m_acc_reply_frame_hdl,
6751 		    &sasdiscovery->PhysicalPort);
6752 		status = ddi_get32(mpt->m_acc_reply_frame_hdl,
6753 		    &sasdiscovery->DiscoveryStatus);
6754 
6755 		string[0] = 0;
6756 		switch (rc) {
6757 		case MPI2_EVENT_SAS_DISC_RC_STARTED:
6758 			(void) sprintf(string, "STARTING");
6759 			break;
6760 		case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
6761 			(void) sprintf(string, "COMPLETED");
6762 			break;
6763 		default:
6764 			(void) sprintf(string, "UNKNOWN");
6765 			break;
6766 		}
6767 
6768 		NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
6769 		    port, status));
6770 
6771 		break;
6772 	}
6773 	case MPI2_EVENT_EVENT_CHANGE:
6774 		NDBG20(("mptsas%d event change.", mpt->m_instance));
6775 		break;
6776 	case MPI2_EVENT_TASK_SET_FULL:
6777 	{
6778 		pMpi2EventDataTaskSetFull_t	taskfull;
6779 
6780 		taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
6781 
6782 		NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
6783 		    mpt->m_instance,  ddi_get16(mpt->m_acc_reply_frame_hdl,
6784 		    &taskfull->CurrentDepth)));
6785 		break;
6786 	}
6787 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6788 	{
6789 		/*
6790 		 * SAS TOPOLOGY CHANGE LIST Event has already been handled
6791 		 * in mptsas_handle_event_sync() of interrupt context
6792 		 */
6793 		break;
6794 	}
6795 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
6796 	{
6797 		pMpi2EventDataSasEnclDevStatusChange_t	encstatus;
6798 		uint8_t					rc;
6799 		char					string[80];
6800 
6801 		encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
6802 		    eventreply->EventData;
6803 
6804 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
6805 		    &encstatus->ReasonCode);
6806 		switch (rc) {
6807 		case MPI2_EVENT_SAS_ENCL_RC_ADDED:
6808 			(void) sprintf(string, "added");
6809 			break;
6810 		case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
6811 			(void) sprintf(string, ", not responding");
6812 			break;
6813 		default:
6814 		break;
6815 		}
6816 		NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
6817 		    mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
6818 		    &encstatus->EnclosureHandle), string));
6819 		break;
6820 	}
6821 
6822 	/*
6823 	 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
6824 	 * mptsas_handle_event_sync,in here just send ack message.
6825 	 */
6826 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
6827 	{
6828 		pMpi2EventDataSasDeviceStatusChange_t	statuschange;
6829 		uint8_t					rc;
6830 		uint16_t				devhdl;
6831 		uint64_t				wwn = 0;
6832 		uint32_t				wwn_lo, wwn_hi;
6833 
6834 		statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
6835 		    eventreply->EventData;
6836 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
6837 		    &statuschange->ReasonCode);
6838 		wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
6839 		    (uint32_t *)(void *)&statuschange->SASAddress);
6840 		wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
6841 		    (uint32_t *)(void *)&statuschange->SASAddress + 1);
6842 		wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
6843 		devhdl =  ddi_get16(mpt->m_acc_reply_frame_hdl,
6844 		    &statuschange->DevHandle);
6845 
6846 		NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
6847 		    wwn));
6848 
6849 		switch (rc) {
6850 		case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
6851 			NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
6852 			    ddi_get8(mpt->m_acc_reply_frame_hdl,
6853 			    &statuschange->ASC),
6854 			    ddi_get8(mpt->m_acc_reply_frame_hdl,
6855 			    &statuschange->ASCQ)));
6856 			break;
6857 
6858 		case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
6859 			NDBG20(("Device not supported"));
6860 			break;
6861 
6862 		case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
6863 			NDBG20(("IOC internally generated the Target Reset "
6864 			    "for devhdl:%x", devhdl));
6865 			break;
6866 
6867 		case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
6868 			NDBG20(("IOC's internally generated Target Reset "
6869 			    "completed for devhdl:%x", devhdl));
6870 			break;
6871 
6872 		case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
6873 			NDBG20(("IOC internally generated Abort Task"));
6874 			break;
6875 
6876 		case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
6877 			NDBG20(("IOC's internally generated Abort Task "
6878 			    "completed"));
6879 			break;
6880 
6881 		case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
6882 			NDBG20(("IOC internally generated Abort Task Set"));
6883 			break;
6884 
6885 		case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
6886 			NDBG20(("IOC internally generated Clear Task Set"));
6887 			break;
6888 
6889 		case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
6890 			NDBG20(("IOC internally generated Query Task"));
6891 			break;
6892 
6893 		case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
6894 			NDBG20(("Device sent an Asynchronous Notification"));
6895 			break;
6896 
6897 		default:
6898 			break;
6899 		}
6900 		break;
6901 	}
6902 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
6903 	{
6904 		/*
6905 		 * IR TOPOLOGY CHANGE LIST Event has already been handled
6906 		 * in mpt_handle_event_sync() of interrupt context
6907 		 */
6908 		break;
6909 	}
6910 	case MPI2_EVENT_IR_OPERATION_STATUS:
6911 	{
6912 		Mpi2EventDataIrOperationStatus_t	*irOpStatus;
6913 		char					reason_str[80];
6914 		uint8_t					rc, percent;
6915 		uint16_t				handle;
6916 
6917 		irOpStatus = (pMpi2EventDataIrOperationStatus_t)
6918 		    eventreply->EventData;
6919 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
6920 		    &irOpStatus->RAIDOperation);
6921 		percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
6922 		    &irOpStatus->PercentComplete);
6923 		handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6924 		    &irOpStatus->VolDevHandle);
6925 
6926 		switch (rc) {
6927 			case MPI2_EVENT_IR_RAIDOP_RESYNC:
6928 				(void) sprintf(reason_str, "resync");
6929 				break;
6930 			case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
6931 				(void) sprintf(reason_str, "online capacity "
6932 				    "expansion");
6933 				break;
6934 			case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
6935 				(void) sprintf(reason_str, "consistency check");
6936 				break;
6937 			default:
6938 				(void) sprintf(reason_str, "unknown reason %x",
6939 				    rc);
6940 		}
6941 
6942 		NDBG20(("mptsas%d raid operational status: (%s)"
6943 		    "\thandle(0x%04x), percent complete(%d)\n",
6944 		    mpt->m_instance, reason_str, handle, percent));
6945 		break;
6946 	}
6947 	case MPI2_EVENT_IR_VOLUME:
6948 	{
6949 		Mpi2EventDataIrVolume_t		*irVolume;
6950 		uint16_t			devhandle;
6951 		uint32_t			state;
6952 		int				config, vol;
6953 		mptsas_slots_t			*slots = mpt->m_active;
6954 		uint8_t				found = FALSE;
6955 
6956 		irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
6957 		state = ddi_get32(mpt->m_acc_reply_frame_hdl,
6958 		    &irVolume->NewValue);
6959 		devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6960 		    &irVolume->VolDevHandle);
6961 
6962 		NDBG20(("EVENT_IR_VOLUME event is received"));
6963 
6964 		/*
6965 		 * Get latest RAID info and then find the DevHandle for this
6966 		 * event in the configuration.  If the DevHandle is not found
6967 		 * just exit the event.
6968 		 */
6969 		(void) mptsas_get_raid_info(mpt);
6970 		for (config = 0; config < slots->m_num_raid_configs;
6971 		    config++) {
6972 			for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
6973 				if (slots->m_raidconfig[config].m_raidvol[vol].
6974 				    m_raidhandle == devhandle) {
6975 					found = TRUE;
6976 					break;
6977 				}
6978 			}
6979 		}
6980 		if (!found) {
6981 			break;
6982 		}
6983 
6984 		switch (irVolume->ReasonCode) {
6985 		case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
6986 		{
6987 			uint32_t i;
6988 			slots->m_raidconfig[config].m_raidvol[vol].m_settings =
6989 			    state;
6990 
6991 			i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
6992 			mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
6993 			    ", auto-config of hot-swap drives is %s"
6994 			    ", write caching is %s"
6995 			    ", hot-spare pool mask is %02x\n",
6996 			    vol, state &
6997 			    MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
6998 			    ? "disabled" : "enabled",
6999 			    i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7000 			    ? "controlled by member disks" :
7001 			    i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7002 			    ? "disabled" :
7003 			    i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7004 			    ? "enabled" :
7005 			    "incorrectly set",
7006 			    (state >> 16) & 0xff);
7007 				break;
7008 		}
7009 		case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7010 		{
7011 			slots->m_raidconfig[config].m_raidvol[vol].m_state =
7012 			    (uint8_t)state;
7013 
7014 			mptsas_log(mpt, CE_NOTE,
7015 			    "Volume %d is now %s\n", vol,
7016 			    state == MPI2_RAID_VOL_STATE_OPTIMAL
7017 			    ? "optimal" :
7018 			    state == MPI2_RAID_VOL_STATE_DEGRADED
7019 			    ? "degraded" :
7020 			    state == MPI2_RAID_VOL_STATE_ONLINE
7021 			    ? "online" :
7022 			    state == MPI2_RAID_VOL_STATE_INITIALIZING
7023 			    ? "initializing" :
7024 			    state == MPI2_RAID_VOL_STATE_FAILED
7025 			    ? "failed" :
7026 			    state == MPI2_RAID_VOL_STATE_MISSING
7027 			    ? "missing" :
7028 			    "state unknown");
7029 			break;
7030 		}
7031 		case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7032 		{
7033 			slots->m_raidconfig[config].m_raidvol[vol].
7034 			    m_statusflags = state;
7035 
7036 			mptsas_log(mpt, CE_NOTE,
7037 			    " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7038 			    vol,
7039 			    state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7040 			    ? ", enabled" : ", disabled",
7041 			    state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7042 			    ? ", quiesced" : "",
7043 			    state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7044 			    ? ", inactive" : ", active",
7045 			    state &
7046 			    MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7047 			    ? ", bad block table is full" : "",
7048 			    state &
7049 			    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7050 			    ? ", resync in progress" : "",
7051 			    state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7052 			    ? ", background initialization in progress" : "",
7053 			    state &
7054 			    MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7055 			    ? ", capacity expansion in progress" : "",
7056 			    state &
7057 			    MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7058 			    ? ", consistency check in progress" : "",
7059 			    state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7060 			    ? ", data scrub in progress" : "");
7061 			break;
7062 		}
7063 		default:
7064 			break;
7065 		}
7066 		break;
7067 	}
7068 	case MPI2_EVENT_IR_PHYSICAL_DISK:
7069 	{
7070 		Mpi2EventDataIrPhysicalDisk_t	*irPhysDisk;
7071 		uint16_t			devhandle, enchandle, slot;
7072 		uint32_t			status, state;
7073 		uint8_t				physdisknum, reason;
7074 
7075 		irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7076 		    eventreply->EventData;
7077 		physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7078 		    &irPhysDisk->PhysDiskNum);
7079 		devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7080 		    &irPhysDisk->PhysDiskDevHandle);
7081 		enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7082 		    &irPhysDisk->EnclosureHandle);
7083 		slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7084 		    &irPhysDisk->Slot);
7085 		state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7086 		    &irPhysDisk->NewValue);
7087 		reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7088 		    &irPhysDisk->ReasonCode);
7089 
7090 		NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7091 
7092 		switch (reason) {
7093 		case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7094 			mptsas_log(mpt, CE_NOTE,
7095 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7096 			    "for enclosure with handle 0x%x is now in hot "
7097 			    "spare pool %d",
7098 			    physdisknum, devhandle, slot, enchandle,
7099 			    (state >> 16) & 0xff);
7100 			break;
7101 
7102 		case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7103 			status = state;
7104 			mptsas_log(mpt, CE_NOTE,
7105 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7106 			    "for enclosure with handle 0x%x is now "
7107 			    "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7108 			    enchandle,
7109 			    status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7110 			    ? ", inactive" : ", active",
7111 			    status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7112 			    ? ", out of sync" : "",
7113 			    status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7114 			    ? ", quiesced" : "",
7115 			    status &
7116 			    MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7117 			    ? ", write cache enabled" : "",
7118 			    status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7119 			    ? ", capacity expansion target" : "");
7120 			break;
7121 
7122 		case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7123 			mptsas_log(mpt, CE_NOTE,
7124 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7125 			    "for enclosure with handle 0x%x is now %s\n",
7126 			    physdisknum, devhandle, slot, enchandle,
7127 			    state == MPI2_RAID_PD_STATE_OPTIMAL
7128 			    ? "optimal" :
7129 			    state == MPI2_RAID_PD_STATE_REBUILDING
7130 			    ? "rebuilding" :
7131 			    state == MPI2_RAID_PD_STATE_DEGRADED
7132 			    ? "degraded" :
7133 			    state == MPI2_RAID_PD_STATE_HOT_SPARE
7134 			    ? "a hot spare" :
7135 			    state == MPI2_RAID_PD_STATE_ONLINE
7136 			    ? "online" :
7137 			    state == MPI2_RAID_PD_STATE_OFFLINE
7138 			    ? "offline" :
7139 			    state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7140 			    ? "not compatible" :
7141 			    state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
7142 			    ? "not configured" :
7143 			    "state unknown");
7144 			break;
7145 		}
7146 		break;
7147 	}
7148 	default:
7149 		mptsas_log(mpt, CE_NOTE, "mptsas%d: unknown event %x received",
7150 		    mpt->m_instance, event);
7151 		break;
7152 	}
7153 
7154 	/*
7155 	 * Return the reply frame to the free queue.
7156 	 */
7157 	reply_index = mpt->m_free_index;
7158 	ddi_put32(mpt->m_acc_free_queue_hdl,
7159 	    &((uint32_t *)(void *)mpt->m_free_queue)[reply_index], rfm);
7160 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
7161 	    DDI_DMA_SYNC_FORDEV);
7162 	if (++reply_index == mpt->m_free_queue_depth) {
7163 		reply_index = 0;
7164 	}
7165 	mpt->m_free_index = reply_index;
7166 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, reply_index);
7167 	mutex_exit(&mpt->m_mutex);
7168 }
7169 
7170 /*
7171  * invoked from timeout() to restart qfull cmds with throttle == 0
7172  */
7173 static void
7174 mptsas_restart_cmd(void *arg)
7175 {
7176 	mptsas_t	*mpt = arg;
7177 	mptsas_target_t	*ptgt = NULL;
7178 
7179 	mutex_enter(&mpt->m_mutex);
7180 
7181 	mpt->m_restart_cmd_timeid = 0;
7182 
7183 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
7184 	    MPTSAS_HASH_FIRST);
7185 	while (ptgt != NULL) {
7186 		if (ptgt->m_reset_delay == 0) {
7187 			if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7188 				mptsas_set_throttle(mpt, ptgt,
7189 				    MAX_THROTTLE);
7190 			}
7191 		}
7192 
7193 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
7194 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
7195 	}
7196 	mptsas_restart_hba(mpt);
7197 	mutex_exit(&mpt->m_mutex);
7198 }
7199 
7200 void
7201 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7202 {
7203 	int		slot;
7204 	mptsas_slots_t	*slots = mpt->m_active;
7205 	int		t;
7206 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
7207 
7208 	ASSERT(cmd != NULL);
7209 	ASSERT(cmd->cmd_queued == FALSE);
7210 
7211 	/*
7212 	 * Task Management cmds are removed in their own routines.  Also,
7213 	 * we don't want to modify timeout based on TM cmds.
7214 	 */
7215 	if (cmd->cmd_flags & CFLAG_TM_CMD) {
7216 		return;
7217 	}
7218 
7219 	t = Tgt(cmd);
7220 	slot = cmd->cmd_slot;
7221 
7222 	/*
7223 	 * remove the cmd.
7224 	 */
7225 	if (cmd == slots->m_slot[slot]) {
7226 		NDBG31(("mptsas_remove_cmd: removing cmd=0x%p", (void *)cmd));
7227 		slots->m_slot[slot] = NULL;
7228 		mpt->m_ncmds--;
7229 
7230 		/*
7231 		 * only decrement per target ncmds if command
7232 		 * has a target associated with it.
7233 		 */
7234 		if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
7235 			ptgt->m_t_ncmds--;
7236 			/*
7237 			 * reset throttle if we just ran an untagged command
7238 			 * to a tagged target
7239 			 */
7240 			if ((ptgt->m_t_ncmds == 0) &&
7241 			    ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
7242 				mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7243 			}
7244 		}
7245 
7246 	}
7247 
7248 	/*
7249 	 * This is all we need to do for ioc commands.
7250 	 */
7251 	if (cmd->cmd_flags & CFLAG_CMDIOC) {
7252 		mptsas_return_to_pool(mpt, cmd);
7253 		return;
7254 	}
7255 
7256 	/*
7257 	 * Figure out what to set tag Q timeout for...
7258 	 *
7259 	 * Optimize: If we have duplicate's of same timeout
7260 	 * we're using, then we'll use it again until we run
7261 	 * out of duplicates.  This should be the normal case
7262 	 * for block and raw I/O.
7263 	 * If no duplicates, we have to scan through tag que and
7264 	 * find the longest timeout value and use it.  This is
7265 	 * going to take a while...
7266 	 * Add 1 to m_n_slots to account for TM request.
7267 	 */
7268 	if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) {
7269 		if (--(ptgt->m_dups) == 0) {
7270 			if (ptgt->m_t_ncmds) {
7271 				mptsas_cmd_t *ssp;
7272 				uint_t n = 0;
7273 				ushort_t nslots = (slots->m_n_slots + 1);
7274 				ushort_t i;
7275 				/*
7276 				 * This crude check assumes we don't do
7277 				 * this too often which seems reasonable
7278 				 * for block and raw I/O.
7279 				 */
7280 				for (i = 0; i < nslots; i++) {
7281 					ssp = slots->m_slot[i];
7282 					if (ssp && (Tgt(ssp) == t) &&
7283 					    (ssp->cmd_pkt->pkt_time > n)) {
7284 						n = ssp->cmd_pkt->pkt_time;
7285 						ptgt->m_dups = 1;
7286 					} else if (ssp && (Tgt(ssp) == t) &&
7287 					    (ssp->cmd_pkt->pkt_time == n)) {
7288 						ptgt->m_dups++;
7289 					}
7290 				}
7291 				ptgt->m_timebase = n;
7292 			} else {
7293 				ptgt->m_dups = 0;
7294 				ptgt->m_timebase = 0;
7295 			}
7296 		}
7297 	}
7298 	ptgt->m_timeout = ptgt->m_timebase;
7299 
7300 	ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
7301 }
7302 
7303 /*
7304  * accept all cmds on the tx_waitq if any and then
7305  * start a fresh request from the top of the device queue.
7306  *
7307  * since there are always cmds queued on the tx_waitq, and rare cmds on
7308  * the instance waitq, so this function should not be invoked in the ISR,
7309  * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
7310  * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
7311  */
7312 static void
7313 mptsas_restart_hba(mptsas_t *mpt)
7314 {
7315 	ASSERT(mutex_owned(&mpt->m_mutex));
7316 
7317 	mutex_enter(&mpt->m_tx_waitq_mutex);
7318 	if (mpt->m_tx_waitq) {
7319 		mptsas_accept_tx_waitq(mpt);
7320 	}
7321 	mutex_exit(&mpt->m_tx_waitq_mutex);
7322 	mptsas_restart_waitq(mpt);
7323 }
7324 
7325 /*
7326  * start a fresh request from the top of the device queue
7327  */
7328 static void
7329 mptsas_restart_waitq(mptsas_t *mpt)
7330 {
7331 	mptsas_cmd_t	*cmd, *next_cmd;
7332 	mptsas_target_t *ptgt = NULL;
7333 
7334 	NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
7335 
7336 	ASSERT(mutex_owned(&mpt->m_mutex));
7337 
7338 	/*
7339 	 * If there is a reset delay, don't start any cmds.  Otherwise, start
7340 	 * as many cmds as possible.
7341 	 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
7342 	 * commands is m_max_requests - 2.
7343 	 */
7344 	cmd = mpt->m_waitq;
7345 
7346 	while (cmd != NULL) {
7347 		next_cmd = cmd->cmd_linkp;
7348 		if (cmd->cmd_flags & CFLAG_PASSTHRU) {
7349 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7350 				/*
7351 				 * passthru command get slot need
7352 				 * set CFLAG_PREPARED.
7353 				 */
7354 				cmd->cmd_flags |= CFLAG_PREPARED;
7355 				mptsas_waitq_delete(mpt, cmd);
7356 				mptsas_start_passthru(mpt, cmd);
7357 			}
7358 			cmd = next_cmd;
7359 			continue;
7360 		}
7361 		if (cmd->cmd_flags & CFLAG_CONFIG) {
7362 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7363 				/*
7364 				 * Send the config page request and delete it
7365 				 * from the waitq.
7366 				 */
7367 				cmd->cmd_flags |= CFLAG_PREPARED;
7368 				mptsas_waitq_delete(mpt, cmd);
7369 				mptsas_start_config_page_access(mpt, cmd);
7370 			}
7371 			cmd = next_cmd;
7372 			continue;
7373 		}
7374 
7375 		ptgt = cmd->cmd_tgt_addr;
7376 		if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
7377 		    (ptgt->m_t_ncmds == 0)) {
7378 			mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7379 		}
7380 		if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
7381 		    (ptgt && (ptgt->m_reset_delay == 0)) &&
7382 		    (ptgt && (ptgt->m_t_ncmds <
7383 		    ptgt->m_t_throttle))) {
7384 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7385 				mptsas_waitq_delete(mpt, cmd);
7386 				(void) mptsas_start_cmd(mpt, cmd);
7387 			}
7388 		}
7389 		cmd = next_cmd;
7390 	}
7391 }
7392 /*
7393  * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
7394  * Accept all those queued cmds before new cmd is accept so that the
7395  * cmds are sent in order.
7396  */
7397 static void
7398 mptsas_accept_tx_waitq(mptsas_t *mpt)
7399 {
7400 	mptsas_cmd_t *cmd;
7401 
7402 	ASSERT(mutex_owned(&mpt->m_mutex));
7403 	ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
7404 
7405 	/*
7406 	 * A Bus Reset could occur at any time and flush the tx_waitq,
7407 	 * so we cannot count on the tx_waitq to contain even one cmd.
7408 	 * And when the m_tx_waitq_mutex is released and run
7409 	 * mptsas_accept_pkt(), the tx_waitq may be flushed.
7410 	 */
7411 	cmd = mpt->m_tx_waitq;
7412 	for (;;) {
7413 		if ((cmd = mpt->m_tx_waitq) == NULL) {
7414 			mpt->m_tx_draining = 0;
7415 			break;
7416 		}
7417 		if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
7418 			mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
7419 		}
7420 		cmd->cmd_linkp = NULL;
7421 		mutex_exit(&mpt->m_tx_waitq_mutex);
7422 		if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
7423 			cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
7424 			    "to accept cmd on queue\n");
7425 		mutex_enter(&mpt->m_tx_waitq_mutex);
7426 	}
7427 }
7428 
7429 
7430 /*
7431  * mpt tag type lookup
7432  */
7433 static char mptsas_tag_lookup[] =
7434 	{0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
7435 
7436 static int
7437 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7438 {
7439 	struct scsi_pkt		*pkt = CMD2PKT(cmd);
7440 	uint32_t		control = 0;
7441 	int			n;
7442 	caddr_t			mem;
7443 	pMpi2SCSIIORequest_t	io_request;
7444 	ddi_dma_handle_t	dma_hdl = mpt->m_dma_req_frame_hdl;
7445 	ddi_acc_handle_t	acc_hdl = mpt->m_acc_req_frame_hdl;
7446 	mptsas_target_t		*ptgt = cmd->cmd_tgt_addr;
7447 	uint16_t		SMID, io_flags = 0;
7448 	uint32_t		request_desc_low, request_desc_high;
7449 
7450 	NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
7451 
7452 	/*
7453 	 * Set SMID and increment index.  Rollover to 1 instead of 0 if index
7454 	 * is at the max.  0 is an invalid SMID, so we call the first index 1.
7455 	 */
7456 	SMID = cmd->cmd_slot;
7457 
7458 	/*
7459 	 * It is possible for back to back device reset to
7460 	 * happen before the reset delay has expired.  That's
7461 	 * ok, just let the device reset go out on the bus.
7462 	 */
7463 	if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7464 		ASSERT(ptgt->m_reset_delay == 0);
7465 	}
7466 
7467 	/*
7468 	 * if a non-tagged cmd is submitted to an active tagged target
7469 	 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
7470 	 * to be untagged
7471 	 */
7472 	if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
7473 	    (ptgt->m_t_ncmds > 1) &&
7474 	    ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
7475 	    (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
7476 		if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7477 			NDBG23(("target=%d, untagged cmd, start draining\n",
7478 			    ptgt->m_devhdl));
7479 
7480 			if (ptgt->m_reset_delay == 0) {
7481 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
7482 			}
7483 
7484 			mptsas_remove_cmd(mpt, cmd);
7485 			cmd->cmd_pkt_flags |= FLAG_HEAD;
7486 			mptsas_waitq_add(mpt, cmd);
7487 		}
7488 		return (DDI_FAILURE);
7489 	}
7490 
7491 	/*
7492 	 * Set correct tag bits.
7493 	 */
7494 	if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
7495 		switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
7496 		    FLAG_TAGMASK) >> 12)]) {
7497 		case MSG_SIMPLE_QTAG:
7498 			control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7499 			break;
7500 		case MSG_HEAD_QTAG:
7501 			control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
7502 			break;
7503 		case MSG_ORDERED_QTAG:
7504 			control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
7505 			break;
7506 		default:
7507 			mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
7508 			break;
7509 		}
7510 	} else {
7511 		if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
7512 				ptgt->m_t_throttle = 1;
7513 		}
7514 		control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7515 	}
7516 
7517 	mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
7518 	io_request = (pMpi2SCSIIORequest_t)mem;
7519 
7520 	bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
7521 	ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
7522 	    (MPI2_SCSI_IO_REQUEST, SGL) / 4);
7523 	mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
7524 	    MPI2_FUNCTION_SCSI_IO_REQUEST);
7525 
7526 	(void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
7527 	    io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
7528 
7529 	io_flags = cmd->cmd_cdblen;
7530 	ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
7531 	/*
7532 	 * setup the Scatter/Gather DMA list for this request
7533 	 */
7534 	if (cmd->cmd_cookiec > 0) {
7535 		mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
7536 	} else {
7537 		ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
7538 		    ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
7539 		    MPI2_SGE_FLAGS_END_OF_BUFFER |
7540 		    MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
7541 		    MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
7542 	}
7543 
7544 	/*
7545 	 * save ARQ information
7546 	 */
7547 	ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
7548 	if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
7549 	    (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
7550 		ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7551 		    cmd->cmd_ext_arqcookie.dmac_address);
7552 	} else {
7553 		ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7554 		    cmd->cmd_arqcookie.dmac_address);
7555 	}
7556 
7557 	ddi_put32(acc_hdl, &io_request->Control, control);
7558 
7559 	NDBG31(("starting message=0x%p, with cmd=0x%p",
7560 	    (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
7561 
7562 	(void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
7563 
7564 	/*
7565 	 * Build request descriptor and write it to the request desc post reg.
7566 	 */
7567 	request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
7568 	request_desc_high = ptgt->m_devhdl << 16;
7569 	MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
7570 
7571 	/*
7572 	 * Start timeout.
7573 	 */
7574 #ifdef MPTSAS_TEST
7575 	/*
7576 	 * Temporarily set timebase = 0;  needed for
7577 	 * timeout torture test.
7578 	 */
7579 	if (mptsas_test_timeouts) {
7580 		ptgt->m_timebase = 0;
7581 	}
7582 #endif
7583 	n = pkt->pkt_time - ptgt->m_timebase;
7584 
7585 	if (n == 0) {
7586 		(ptgt->m_dups)++;
7587 		ptgt->m_timeout = ptgt->m_timebase;
7588 	} else if (n > 0) {
7589 		ptgt->m_timeout =
7590 		    ptgt->m_timebase = pkt->pkt_time;
7591 		ptgt->m_dups = 1;
7592 	} else if (n < 0) {
7593 		ptgt->m_timeout = ptgt->m_timebase;
7594 	}
7595 #ifdef MPTSAS_TEST
7596 	/*
7597 	 * Set back to a number higher than
7598 	 * mptsas_scsi_watchdog_tick
7599 	 * so timeouts will happen in mptsas_watchsubr
7600 	 */
7601 	if (mptsas_test_timeouts) {
7602 		ptgt->m_timebase = 60;
7603 	}
7604 #endif
7605 
7606 	if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
7607 	    (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
7608 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
7609 		return (DDI_FAILURE);
7610 	}
7611 	return (DDI_SUCCESS);
7612 }
7613 
7614 /*
7615  * Select a helper thread to handle current doneq
7616  */
7617 static void
7618 mptsas_deliver_doneq_thread(mptsas_t *mpt)
7619 {
7620 	uint64_t			t, i;
7621 	uint32_t			min = 0xffffffff;
7622 	mptsas_doneq_thread_list_t	*item;
7623 
7624 	for (i = 0; i < mpt->m_doneq_thread_n; i++) {
7625 		item = &mpt->m_doneq_thread_id[i];
7626 		/*
7627 		 * If the completed command on help thread[i] less than
7628 		 * doneq_thread_threshold, then pick the thread[i]. Otherwise
7629 		 * pick a thread which has least completed command.
7630 		 */
7631 
7632 		mutex_enter(&item->mutex);
7633 		if (item->len < mpt->m_doneq_thread_threshold) {
7634 			t = i;
7635 			mutex_exit(&item->mutex);
7636 			break;
7637 		}
7638 		if (item->len < min) {
7639 			min = item->len;
7640 			t = i;
7641 		}
7642 		mutex_exit(&item->mutex);
7643 	}
7644 	mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
7645 	mptsas_doneq_mv(mpt, t);
7646 	cv_signal(&mpt->m_doneq_thread_id[t].cv);
7647 	mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
7648 }
7649 
7650 /*
7651  * move the current global doneq to the doneq of thead[t]
7652  */
7653 static void
7654 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
7655 {
7656 	mptsas_cmd_t			*cmd;
7657 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
7658 
7659 	ASSERT(mutex_owned(&item->mutex));
7660 	while ((cmd = mpt->m_doneq) != NULL) {
7661 		if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
7662 			mpt->m_donetail = &mpt->m_doneq;
7663 		}
7664 		cmd->cmd_linkp = NULL;
7665 		*item->donetail = cmd;
7666 		item->donetail = &cmd->cmd_linkp;
7667 		mpt->m_doneq_len--;
7668 		item->len++;
7669 	}
7670 }
7671 
7672 void
7673 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
7674 {
7675 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
7676 
7677 	/* Check all acc and dma handles */
7678 	if ((mptsas_check_acc_handle(mpt->m_datap) !=
7679 	    DDI_SUCCESS) ||
7680 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
7681 	    DDI_SUCCESS) ||
7682 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
7683 	    DDI_SUCCESS) ||
7684 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
7685 	    DDI_SUCCESS) ||
7686 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
7687 	    DDI_SUCCESS) ||
7688 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
7689 	    DDI_SUCCESS) ||
7690 	    (mptsas_check_acc_handle(mpt->m_config_handle) !=
7691 	    DDI_SUCCESS)) {
7692 		ddi_fm_service_impact(mpt->m_dip,
7693 		    DDI_SERVICE_UNAFFECTED);
7694 		ddi_fm_acc_err_clear(mpt->m_config_handle,
7695 		    DDI_FME_VER0);
7696 		pkt->pkt_reason = CMD_TRAN_ERR;
7697 		pkt->pkt_statistics = 0;
7698 	}
7699 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
7700 	    DDI_SUCCESS) ||
7701 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
7702 	    DDI_SUCCESS) ||
7703 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
7704 	    DDI_SUCCESS) ||
7705 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
7706 	    DDI_SUCCESS) ||
7707 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
7708 	    DDI_SUCCESS)) {
7709 		ddi_fm_service_impact(mpt->m_dip,
7710 		    DDI_SERVICE_UNAFFECTED);
7711 		pkt->pkt_reason = CMD_TRAN_ERR;
7712 		pkt->pkt_statistics = 0;
7713 	}
7714 	if (cmd->cmd_dmahandle &&
7715 	    (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
7716 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
7717 		pkt->pkt_reason = CMD_TRAN_ERR;
7718 		pkt->pkt_statistics = 0;
7719 	}
7720 	if ((cmd->cmd_extra_frames &&
7721 	    ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
7722 	    DDI_SUCCESS) ||
7723 	    (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
7724 	    DDI_SUCCESS)))) {
7725 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
7726 		pkt->pkt_reason = CMD_TRAN_ERR;
7727 		pkt->pkt_statistics = 0;
7728 	}
7729 	if (cmd->cmd_arqhandle &&
7730 	    (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
7731 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
7732 		pkt->pkt_reason = CMD_TRAN_ERR;
7733 		pkt->pkt_statistics = 0;
7734 	}
7735 	if (cmd->cmd_ext_arqhandle &&
7736 	    (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
7737 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
7738 		pkt->pkt_reason = CMD_TRAN_ERR;
7739 		pkt->pkt_statistics = 0;
7740 	}
7741 }
7742 
7743 /*
7744  * These routines manipulate the queue of commands that
7745  * are waiting for their completion routines to be called.
7746  * The queue is usually in FIFO order but on an MP system
7747  * it's possible for the completion routines to get out
7748  * of order. If that's a problem you need to add a global
7749  * mutex around the code that calls the completion routine
7750  * in the interrupt handler.
7751  */
7752 static void
7753 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
7754 {
7755 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
7756 
7757 	NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
7758 
7759 	ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
7760 	cmd->cmd_linkp = NULL;
7761 	cmd->cmd_flags |= CFLAG_FINISHED;
7762 	cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
7763 
7764 	mptsas_fma_check(mpt, cmd);
7765 
7766 	/*
7767 	 * only add scsi pkts that have completion routines to
7768 	 * the doneq.  no intr cmds do not have callbacks.
7769 	 * run the callback on an ARQ pkt immediately.  This
7770 	 * frees the ARQ for other check conditions.
7771 	 */
7772 	if (pkt->pkt_comp && !(cmd->cmd_flags & CFLAG_CMDARQ)) {
7773 		*mpt->m_donetail = cmd;
7774 		mpt->m_donetail = &cmd->cmd_linkp;
7775 		mpt->m_doneq_len++;
7776 	} else if (pkt->pkt_comp && (cmd->cmd_flags & CFLAG_CMDARQ)) {
7777 		cmd->cmd_flags |= CFLAG_COMPLETED;
7778 		mutex_exit(&mpt->m_mutex);
7779 		mptsas_pkt_comp(pkt, cmd);
7780 		mutex_enter(&mpt->m_mutex);
7781 	}
7782 }
7783 
7784 static mptsas_cmd_t *
7785 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
7786 {
7787 	mptsas_cmd_t			*cmd;
7788 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
7789 
7790 	/* pop one off the done queue */
7791 	if ((cmd = item->doneq) != NULL) {
7792 		/* if the queue is now empty fix the tail pointer */
7793 		NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
7794 		if ((item->doneq = cmd->cmd_linkp) == NULL) {
7795 			item->donetail = &item->doneq;
7796 		}
7797 		cmd->cmd_linkp = NULL;
7798 		item->len--;
7799 	}
7800 	return (cmd);
7801 }
7802 
7803 static void
7804 mptsas_doneq_empty(mptsas_t *mpt)
7805 {
7806 	if (mpt->m_doneq && !mpt->m_in_callback) {
7807 		mptsas_cmd_t	*cmd, *next;
7808 		struct scsi_pkt *pkt;
7809 
7810 		mpt->m_in_callback = 1;
7811 		cmd = mpt->m_doneq;
7812 		mpt->m_doneq = NULL;
7813 		mpt->m_donetail = &mpt->m_doneq;
7814 		mpt->m_doneq_len = 0;
7815 
7816 		mutex_exit(&mpt->m_mutex);
7817 		/*
7818 		 * run the completion routines of all the
7819 		 * completed commands
7820 		 */
7821 		while (cmd != NULL) {
7822 			next = cmd->cmd_linkp;
7823 			cmd->cmd_linkp = NULL;
7824 			/* run this command's completion routine */
7825 			cmd->cmd_flags |= CFLAG_COMPLETED;
7826 			pkt = CMD2PKT(cmd);
7827 			mptsas_pkt_comp(pkt, cmd);
7828 			cmd = next;
7829 		}
7830 		mutex_enter(&mpt->m_mutex);
7831 		mpt->m_in_callback = 0;
7832 	}
7833 }
7834 
7835 /*
7836  * These routines manipulate the target's queue of pending requests
7837  */
7838 void
7839 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
7840 {
7841 	NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
7842 	mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7843 	cmd->cmd_queued = TRUE;
7844 	if (ptgt)
7845 		ptgt->m_t_nwait++;
7846 	if (cmd->cmd_pkt_flags & FLAG_HEAD) {
7847 		if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
7848 			mpt->m_waitqtail = &cmd->cmd_linkp;
7849 		}
7850 		mpt->m_waitq = cmd;
7851 	} else {
7852 		cmd->cmd_linkp = NULL;
7853 		*(mpt->m_waitqtail) = cmd;
7854 		mpt->m_waitqtail = &cmd->cmd_linkp;
7855 	}
7856 }
7857 
7858 static mptsas_cmd_t *
7859 mptsas_waitq_rm(mptsas_t *mpt)
7860 {
7861 	mptsas_cmd_t	*cmd;
7862 	mptsas_target_t *ptgt;
7863 	NDBG7(("mptsas_waitq_rm"));
7864 
7865 	MPTSAS_WAITQ_RM(mpt, cmd);
7866 
7867 	NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
7868 	if (cmd) {
7869 		ptgt = cmd->cmd_tgt_addr;
7870 		if (ptgt) {
7871 			ptgt->m_t_nwait--;
7872 			ASSERT(ptgt->m_t_nwait >= 0);
7873 		}
7874 	}
7875 	return (cmd);
7876 }
7877 
7878 /*
7879  * remove specified cmd from the middle of the wait queue.
7880  */
7881 static void
7882 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
7883 {
7884 	mptsas_cmd_t	*prevp = mpt->m_waitq;
7885 	mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7886 
7887 	NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
7888 	    (void *)mpt, (void *)cmd));
7889 	if (ptgt) {
7890 		ptgt->m_t_nwait--;
7891 		ASSERT(ptgt->m_t_nwait >= 0);
7892 	}
7893 
7894 	if (prevp == cmd) {
7895 		if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
7896 			mpt->m_waitqtail = &mpt->m_waitq;
7897 
7898 		cmd->cmd_linkp = NULL;
7899 		cmd->cmd_queued = FALSE;
7900 		NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
7901 		    (void *)mpt, (void *)cmd));
7902 		return;
7903 	}
7904 
7905 	while (prevp != NULL) {
7906 		if (prevp->cmd_linkp == cmd) {
7907 			if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
7908 				mpt->m_waitqtail = &prevp->cmd_linkp;
7909 
7910 			cmd->cmd_linkp = NULL;
7911 			cmd->cmd_queued = FALSE;
7912 			NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
7913 			    (void *)mpt, (void *)cmd));
7914 			return;
7915 		}
7916 		prevp = prevp->cmd_linkp;
7917 	}
7918 	cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
7919 }
7920 
7921 static mptsas_cmd_t *
7922 mptsas_tx_waitq_rm(mptsas_t *mpt)
7923 {
7924 	mptsas_cmd_t *cmd;
7925 	NDBG7(("mptsas_tx_waitq_rm"));
7926 
7927 	MPTSAS_TX_WAITQ_RM(mpt, cmd);
7928 
7929 	NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
7930 
7931 	return (cmd);
7932 }
7933 
7934 /*
7935  * remove specified cmd from the middle of the tx_waitq.
7936  */
7937 static void
7938 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
7939 {
7940 	mptsas_cmd_t *prevp = mpt->m_tx_waitq;
7941 
7942 	NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
7943 	    (void *)mpt, (void *)cmd));
7944 
7945 	if (prevp == cmd) {
7946 		if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
7947 			mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
7948 
7949 		cmd->cmd_linkp = NULL;
7950 		cmd->cmd_queued = FALSE;
7951 		NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
7952 		    (void *)mpt, (void *)cmd));
7953 		return;
7954 	}
7955 
7956 	while (prevp != NULL) {
7957 		if (prevp->cmd_linkp == cmd) {
7958 			if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
7959 				mpt->m_tx_waitqtail = &prevp->cmd_linkp;
7960 
7961 			cmd->cmd_linkp = NULL;
7962 			cmd->cmd_queued = FALSE;
7963 			NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
7964 			    (void *)mpt, (void *)cmd));
7965 			return;
7966 		}
7967 		prevp = prevp->cmd_linkp;
7968 	}
7969 	cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
7970 }
7971 
7972 /*
7973  * device and bus reset handling
7974  *
7975  * Notes:
7976  *	- RESET_ALL:	reset the controller
7977  *	- RESET_TARGET:	reset the target specified in scsi_address
7978  */
7979 static int
7980 mptsas_scsi_reset(struct scsi_address *ap, int level)
7981 {
7982 	mptsas_t		*mpt = ADDR2MPT(ap);
7983 	int			rval;
7984 	mptsas_tgt_private_t	*tgt_private;
7985 	mptsas_target_t		*ptgt = NULL;
7986 
7987 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
7988 	ptgt = tgt_private->t_private;
7989 	if (ptgt == NULL) {
7990 		return (FALSE);
7991 	}
7992 	NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
7993 	    level));
7994 
7995 	mutex_enter(&mpt->m_mutex);
7996 	/*
7997 	 * if we are not in panic set up a reset delay for this target
7998 	 */
7999 	if (!ddi_in_panic()) {
8000 		mptsas_setup_bus_reset_delay(mpt);
8001 	} else {
8002 		drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8003 	}
8004 	rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8005 	mutex_exit(&mpt->m_mutex);
8006 
8007 	/*
8008 	 * The transport layer expect to only see TRUE and
8009 	 * FALSE. Therefore, we will adjust the return value
8010 	 * if mptsas_do_scsi_reset returns FAILED.
8011 	 */
8012 	if (rval == FAILED)
8013 		rval = FALSE;
8014 	return (rval);
8015 }
8016 
8017 static int
8018 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8019 {
8020 	int		rval = FALSE;
8021 	uint8_t		config, disk;
8022 	mptsas_slots_t	*slots = mpt->m_active;
8023 
8024 	ASSERT(mutex_owned(&mpt->m_mutex));
8025 
8026 	if (mptsas_debug_resets) {
8027 		mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8028 		    devhdl);
8029 	}
8030 
8031 	/*
8032 	 * Issue a Target Reset message to the target specified but not to a
8033 	 * disk making up a raid volume.  Just look through the RAID config
8034 	 * Phys Disk list of DevHandles.  If the target's DevHandle is in this
8035 	 * list, then don't reset this target.
8036 	 */
8037 	for (config = 0; config < slots->m_num_raid_configs; config++) {
8038 		for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8039 			if (devhdl == slots->m_raidconfig[config].
8040 			    m_physdisk_devhdl[disk]) {
8041 				return (TRUE);
8042 			}
8043 		}
8044 	}
8045 
8046 	rval = mptsas_ioc_task_management(mpt,
8047 	    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0);
8048 
8049 	mptsas_doneq_empty(mpt);
8050 	return (rval);
8051 }
8052 
8053 static int
8054 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8055 	void (*callback)(caddr_t), caddr_t arg)
8056 {
8057 	mptsas_t	*mpt = ADDR2MPT(ap);
8058 
8059 	NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8060 
8061 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
8062 	    &mpt->m_mutex, &mpt->m_reset_notify_listf));
8063 }
8064 
8065 static int
8066 mptsas_get_name(struct scsi_device *sd, char *name, int len)
8067 {
8068 	dev_info_t	*lun_dip = NULL;
8069 
8070 	ASSERT(sd != NULL);
8071 	ASSERT(name != NULL);
8072 	lun_dip = sd->sd_dev;
8073 	ASSERT(lun_dip != NULL);
8074 
8075 	if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
8076 		return (1);
8077 	} else {
8078 		return (0);
8079 	}
8080 }
8081 
8082 static int
8083 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
8084 {
8085 	return (mptsas_get_name(sd, name, len));
8086 }
8087 
8088 void
8089 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
8090 {
8091 
8092 	NDBG25(("mptsas_set_throttle: throttle=%x", what));
8093 
8094 	/*
8095 	 * if the bus is draining/quiesced, no changes to the throttles
8096 	 * are allowed. Not allowing change of throttles during draining
8097 	 * limits error recovery but will reduce draining time
8098 	 *
8099 	 * all throttles should have been set to HOLD_THROTTLE
8100 	 */
8101 	if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
8102 		return;
8103 	}
8104 
8105 	if (what == HOLD_THROTTLE) {
8106 		ptgt->m_t_throttle = HOLD_THROTTLE;
8107 	} else if (ptgt->m_reset_delay == 0) {
8108 		ptgt->m_t_throttle = what;
8109 	}
8110 }
8111 
8112 /*
8113  * Clean up from a device reset.
8114  * For the case of target reset, this function clears the waitq of all
8115  * commands for a particular target.   For the case of abort task set, this
8116  * function clears the waitq of all commonds for a particular target/lun.
8117  */
8118 static void
8119 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
8120 {
8121 	mptsas_slots_t	*slots = mpt->m_active;
8122 	mptsas_cmd_t	*cmd, *next_cmd;
8123 	int		slot;
8124 	uchar_t		reason;
8125 	uint_t		stat;
8126 
8127 	NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
8128 
8129 	/*
8130 	 * Make sure the I/O Controller has flushed all cmds
8131 	 * that are associated with this target for a target reset
8132 	 * and target/lun for abort task set.
8133 	 * Account for TM requests, which use the last SMID.
8134 	 */
8135 	for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8136 		if ((cmd = slots->m_slot[slot]) == NULL)
8137 			continue;
8138 		reason = CMD_RESET;
8139 		stat = STAT_DEV_RESET;
8140 		switch (tasktype) {
8141 		case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8142 			if (Tgt(cmd) == target) {
8143 				mptsas_log(mpt, CE_NOTE, "mptsas_flush_target "
8144 				    "discovered non-NULL cmd in slot %d, "
8145 				    "tasktype 0x%x", slot, tasktype);
8146 				mptsas_dump_cmd(mpt, cmd);
8147 				mptsas_remove_cmd(mpt, cmd);
8148 				mptsas_set_pkt_reason(mpt, cmd, reason, stat);
8149 				mptsas_doneq_add(mpt, cmd);
8150 			}
8151 			break;
8152 		case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8153 			reason = CMD_ABORTED;
8154 			stat = STAT_ABORTED;
8155 			/*FALLTHROUGH*/
8156 		case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8157 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8158 
8159 				mptsas_log(mpt, CE_NOTE, "mptsas_flush_target "
8160 				    "discovered non-NULL cmd in slot %d, "
8161 				    "tasktype 0x%x", slot, tasktype);
8162 				mptsas_dump_cmd(mpt, cmd);
8163 				mptsas_remove_cmd(mpt, cmd);
8164 				mptsas_set_pkt_reason(mpt, cmd, reason,
8165 				    stat);
8166 				mptsas_doneq_add(mpt, cmd);
8167 			}
8168 			break;
8169 		default:
8170 			break;
8171 		}
8172 	}
8173 
8174 	/*
8175 	 * Flush the waitq and tx_waitq of this target's cmds
8176 	 */
8177 	cmd = mpt->m_waitq;
8178 
8179 	reason = CMD_RESET;
8180 	stat = STAT_DEV_RESET;
8181 
8182 	switch (tasktype) {
8183 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8184 		while (cmd != NULL) {
8185 			next_cmd = cmd->cmd_linkp;
8186 			if (Tgt(cmd) == target) {
8187 				mptsas_waitq_delete(mpt, cmd);
8188 				mptsas_set_pkt_reason(mpt, cmd,
8189 				    reason, stat);
8190 				mptsas_doneq_add(mpt, cmd);
8191 			}
8192 			cmd = next_cmd;
8193 		}
8194 		mutex_enter(&mpt->m_tx_waitq_mutex);
8195 		cmd = mpt->m_tx_waitq;
8196 		while (cmd != NULL) {
8197 			next_cmd = cmd->cmd_linkp;
8198 			if (Tgt(cmd) == target) {
8199 				mptsas_tx_waitq_delete(mpt, cmd);
8200 				mutex_exit(&mpt->m_tx_waitq_mutex);
8201 				mptsas_set_pkt_reason(mpt, cmd,
8202 				    reason, stat);
8203 				mptsas_doneq_add(mpt, cmd);
8204 				mutex_enter(&mpt->m_tx_waitq_mutex);
8205 			}
8206 			cmd = next_cmd;
8207 		}
8208 		mutex_exit(&mpt->m_tx_waitq_mutex);
8209 		break;
8210 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8211 		reason = CMD_ABORTED;
8212 		stat =  STAT_ABORTED;
8213 		/*FALLTHROUGH*/
8214 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8215 		while (cmd != NULL) {
8216 			next_cmd = cmd->cmd_linkp;
8217 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8218 				mptsas_waitq_delete(mpt, cmd);
8219 				mptsas_set_pkt_reason(mpt, cmd,
8220 				    reason, stat);
8221 				mptsas_doneq_add(mpt, cmd);
8222 			}
8223 			cmd = next_cmd;
8224 		}
8225 		mutex_enter(&mpt->m_tx_waitq_mutex);
8226 		cmd = mpt->m_tx_waitq;
8227 		while (cmd != NULL) {
8228 			next_cmd = cmd->cmd_linkp;
8229 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8230 				mptsas_tx_waitq_delete(mpt, cmd);
8231 				mutex_exit(&mpt->m_tx_waitq_mutex);
8232 				mptsas_set_pkt_reason(mpt, cmd,
8233 				    reason, stat);
8234 				mptsas_doneq_add(mpt, cmd);
8235 				mutex_enter(&mpt->m_tx_waitq_mutex);
8236 			}
8237 			cmd = next_cmd;
8238 		}
8239 		mutex_exit(&mpt->m_tx_waitq_mutex);
8240 		break;
8241 	default:
8242 		mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
8243 		    tasktype);
8244 		break;
8245 	}
8246 }
8247 
8248 /*
8249  * Clean up hba state, abort all outstanding command and commands in waitq
8250  * reset timeout of all targets.
8251  */
8252 static void
8253 mptsas_flush_hba(mptsas_t *mpt)
8254 {
8255 	mptsas_slots_t	*slots = mpt->m_active;
8256 	mptsas_cmd_t	*cmd;
8257 	int		slot;
8258 
8259 	NDBG25(("mptsas_flush_hba"));
8260 
8261 	/*
8262 	 * The I/O Controller should have already sent back
8263 	 * all commands via the scsi I/O reply frame.  Make
8264 	 * sure all commands have been flushed.
8265 	 * Account for TM request, which use the last SMID.
8266 	 */
8267 	for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8268 		if ((cmd = slots->m_slot[slot]) == NULL)
8269 			continue;
8270 
8271 		if (cmd->cmd_flags & CFLAG_CMDIOC)
8272 			continue;
8273 
8274 		mptsas_log(mpt, CE_NOTE, "mptsas_flush_hba discovered non-NULL "
8275 		    "cmd in slot %d", slot);
8276 		mptsas_dump_cmd(mpt, cmd);
8277 
8278 		mptsas_remove_cmd(mpt, cmd);
8279 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8280 		mptsas_doneq_add(mpt, cmd);
8281 	}
8282 
8283 	/*
8284 	 * Flush the waitq.
8285 	 */
8286 	while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
8287 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8288 		if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8289 		    (cmd->cmd_flags & CFLAG_CONFIG)) {
8290 			cmd->cmd_flags |= CFLAG_FINISHED;
8291 			cv_broadcast(&mpt->m_passthru_cv);
8292 			cv_broadcast(&mpt->m_config_cv);
8293 		} else {
8294 			mptsas_doneq_add(mpt, cmd);
8295 		}
8296 	}
8297 
8298 	/*
8299 	 * Flush the tx_waitq
8300 	 */
8301 	mutex_enter(&mpt->m_tx_waitq_mutex);
8302 	while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
8303 		mutex_exit(&mpt->m_tx_waitq_mutex);
8304 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8305 		mptsas_doneq_add(mpt, cmd);
8306 		mutex_enter(&mpt->m_tx_waitq_mutex);
8307 	}
8308 	mutex_exit(&mpt->m_tx_waitq_mutex);
8309 }
8310 
8311 /*
8312  * set pkt_reason and OR in pkt_statistics flag
8313  */
8314 static void
8315 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
8316     uint_t stat)
8317 {
8318 #ifndef __lock_lint
8319 	_NOTE(ARGUNUSED(mpt))
8320 #endif
8321 
8322 	NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
8323 	    (void *)cmd, reason, stat));
8324 
8325 	if (cmd) {
8326 		if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
8327 			cmd->cmd_pkt->pkt_reason = reason;
8328 		}
8329 		cmd->cmd_pkt->pkt_statistics |= stat;
8330 	}
8331 }
8332 
8333 static void
8334 mptsas_start_watch_reset_delay()
8335 {
8336 	NDBG22(("mptsas_start_watch_reset_delay"));
8337 
8338 	mutex_enter(&mptsas_global_mutex);
8339 	if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
8340 		mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
8341 		    drv_usectohz((clock_t)
8342 		    MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
8343 		ASSERT(mptsas_reset_watch != NULL);
8344 	}
8345 	mutex_exit(&mptsas_global_mutex);
8346 }
8347 
8348 static void
8349 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
8350 {
8351 	mptsas_target_t	*ptgt = NULL;
8352 
8353 	NDBG22(("mptsas_setup_bus_reset_delay"));
8354 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8355 	    MPTSAS_HASH_FIRST);
8356 	while (ptgt != NULL) {
8357 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
8358 		ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
8359 
8360 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8361 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8362 	}
8363 
8364 	mptsas_start_watch_reset_delay();
8365 }
8366 
8367 /*
8368  * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8369  * mpt instance for active reset delays
8370  */
8371 static void
8372 mptsas_watch_reset_delay(void *arg)
8373 {
8374 #ifndef __lock_lint
8375 	_NOTE(ARGUNUSED(arg))
8376 #endif
8377 
8378 	mptsas_t	*mpt;
8379 	int		not_done = 0;
8380 
8381 	NDBG22(("mptsas_watch_reset_delay"));
8382 
8383 	mutex_enter(&mptsas_global_mutex);
8384 	mptsas_reset_watch = 0;
8385 	mutex_exit(&mptsas_global_mutex);
8386 	rw_enter(&mptsas_global_rwlock, RW_READER);
8387 	for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
8388 		if (mpt->m_tran == 0) {
8389 			continue;
8390 		}
8391 		mutex_enter(&mpt->m_mutex);
8392 		not_done += mptsas_watch_reset_delay_subr(mpt);
8393 		mutex_exit(&mpt->m_mutex);
8394 	}
8395 	rw_exit(&mptsas_global_rwlock);
8396 
8397 	if (not_done) {
8398 		mptsas_start_watch_reset_delay();
8399 	}
8400 }
8401 
8402 static int
8403 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
8404 {
8405 	int		done = 0;
8406 	int		restart = 0;
8407 	mptsas_target_t	*ptgt = NULL;
8408 
8409 	NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
8410 
8411 	ASSERT(mutex_owned(&mpt->m_mutex));
8412 
8413 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8414 	    MPTSAS_HASH_FIRST);
8415 	while (ptgt != NULL) {
8416 		if (ptgt->m_reset_delay != 0) {
8417 			ptgt->m_reset_delay -=
8418 			    MPTSAS_WATCH_RESET_DELAY_TICK;
8419 			if (ptgt->m_reset_delay <= 0) {
8420 				ptgt->m_reset_delay = 0;
8421 				mptsas_set_throttle(mpt, ptgt,
8422 				    MAX_THROTTLE);
8423 				restart++;
8424 			} else {
8425 				done = -1;
8426 			}
8427 		}
8428 
8429 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8430 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8431 	}
8432 
8433 	if (restart > 0) {
8434 		mptsas_restart_hba(mpt);
8435 	}
8436 	return (done);
8437 }
8438 
8439 #ifdef MPTSAS_TEST
8440 static void
8441 mptsas_test_reset(mptsas_t *mpt, int target)
8442 {
8443 	mptsas_target_t    *ptgt = NULL;
8444 
8445 	if (mptsas_rtest == target) {
8446 		if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
8447 			mptsas_rtest = -1;
8448 		}
8449 		if (mptsas_rtest == -1) {
8450 			NDBG22(("mptsas_test_reset success"));
8451 		}
8452 	}
8453 }
8454 #endif
8455 
8456 /*
8457  * abort handling:
8458  *
8459  * Notes:
8460  *	- if pkt is not NULL, abort just that command
8461  *	- if pkt is NULL, abort all outstanding commands for target
8462  */
8463 static int
8464 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
8465 {
8466 	mptsas_t		*mpt = ADDR2MPT(ap);
8467 	int			rval;
8468 	mptsas_tgt_private_t	*tgt_private;
8469 	int			target, lun;
8470 
8471 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
8472 	    tran_tgt_private;
8473 	ASSERT(tgt_private != NULL);
8474 	target = tgt_private->t_private->m_devhdl;
8475 	lun = tgt_private->t_lun;
8476 
8477 	NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
8478 
8479 	mutex_enter(&mpt->m_mutex);
8480 	rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
8481 	mutex_exit(&mpt->m_mutex);
8482 	return (rval);
8483 }
8484 
8485 static int
8486 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
8487 {
8488 	mptsas_cmd_t	*sp = NULL;
8489 	mptsas_slots_t	*slots = mpt->m_active;
8490 	int		rval = FALSE;
8491 
8492 	ASSERT(mutex_owned(&mpt->m_mutex));
8493 
8494 	/*
8495 	 * Abort the command pkt on the target/lun in ap.  If pkt is
8496 	 * NULL, abort all outstanding commands on that target/lun.
8497 	 * If you can abort them, return 1, else return 0.
8498 	 * Each packet that's aborted should be sent back to the target
8499 	 * driver through the callback routine, with pkt_reason set to
8500 	 * CMD_ABORTED.
8501 	 *
8502 	 * abort cmd pkt on HBA hardware; clean out of outstanding
8503 	 * command lists, etc.
8504 	 */
8505 	if (pkt != NULL) {
8506 		/* abort the specified packet */
8507 		sp = PKT2CMD(pkt);
8508 
8509 		if (sp->cmd_queued) {
8510 			NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
8511 			    (void *)sp));
8512 			mptsas_waitq_delete(mpt, sp);
8513 			mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
8514 			    STAT_ABORTED);
8515 			mptsas_doneq_add(mpt, sp);
8516 			rval = TRUE;
8517 			goto done;
8518 		}
8519 
8520 		/*
8521 		 * Have mpt firmware abort this command
8522 		 */
8523 
8524 		if (slots->m_slot[sp->cmd_slot] != NULL) {
8525 			rval = mptsas_ioc_task_management(mpt,
8526 			    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
8527 			    lun);
8528 
8529 			/*
8530 			 * The transport layer expects only TRUE and FALSE.
8531 			 * Therefore, if mptsas_ioc_task_management returns
8532 			 * FAILED we will return FALSE.
8533 			 */
8534 			if (rval == FAILED)
8535 				rval = FALSE;
8536 			goto done;
8537 		}
8538 	}
8539 
8540 	/*
8541 	 * If pkt is NULL then abort task set
8542 	 */
8543 	rval = mptsas_ioc_task_management(mpt,
8544 	    MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun);
8545 
8546 	/*
8547 	 * The transport layer expects only TRUE and FALSE.
8548 	 * Therefore, if mptsas_ioc_task_management returns
8549 	 * FAILED we will return FALSE.
8550 	 */
8551 	if (rval == FAILED)
8552 		rval = FALSE;
8553 
8554 #ifdef MPTSAS_TEST
8555 	if (rval && mptsas_test_stop) {
8556 		debug_enter("mptsas_do_scsi_abort");
8557 	}
8558 #endif
8559 
8560 done:
8561 	mptsas_doneq_empty(mpt);
8562 	return (rval);
8563 }
8564 
8565 /*
8566  * capability handling:
8567  * (*tran_getcap).  Get the capability named, and return its value.
8568  */
8569 static int
8570 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
8571 {
8572 	mptsas_t	*mpt = ADDR2MPT(ap);
8573 	int		ckey;
8574 	int		rval = FALSE;
8575 
8576 	NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
8577 	    ap->a_target, cap, tgtonly));
8578 
8579 	mutex_enter(&mpt->m_mutex);
8580 
8581 	if ((mptsas_capchk(cap, tgtonly, &ckey)) != TRUE) {
8582 		mutex_exit(&mpt->m_mutex);
8583 		return (UNDEFINED);
8584 	}
8585 
8586 	switch (ckey) {
8587 	case SCSI_CAP_DMA_MAX:
8588 		rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
8589 		break;
8590 	case SCSI_CAP_ARQ:
8591 		rval = TRUE;
8592 		break;
8593 	case SCSI_CAP_MSG_OUT:
8594 	case SCSI_CAP_PARITY:
8595 	case SCSI_CAP_UNTAGGED_QING:
8596 		rval = TRUE;
8597 		break;
8598 	case SCSI_CAP_TAGGED_QING:
8599 		rval = TRUE;
8600 		break;
8601 	case SCSI_CAP_RESET_NOTIFICATION:
8602 		rval = TRUE;
8603 		break;
8604 	case SCSI_CAP_LINKED_CMDS:
8605 		rval = FALSE;
8606 		break;
8607 	case SCSI_CAP_QFULL_RETRIES:
8608 		rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
8609 		    tran_tgt_private))->t_private->m_qfull_retries;
8610 		break;
8611 	case SCSI_CAP_QFULL_RETRY_INTERVAL:
8612 		rval = drv_hztousec(((mptsas_tgt_private_t *)
8613 		    (ap->a_hba_tran->tran_tgt_private))->
8614 		    t_private->m_qfull_retry_interval) / 1000;
8615 		break;
8616 	case SCSI_CAP_CDB_LEN:
8617 		rval = CDB_GROUP4;
8618 		break;
8619 	case SCSI_CAP_INTERCONNECT_TYPE:
8620 		rval = INTERCONNECT_SAS;
8621 		break;
8622 	default:
8623 		rval = UNDEFINED;
8624 		break;
8625 	}
8626 
8627 	NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
8628 
8629 	mutex_exit(&mpt->m_mutex);
8630 	return (rval);
8631 }
8632 
8633 /*
8634  * (*tran_setcap).  Set the capability named to the value given.
8635  */
8636 static int
8637 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
8638 {
8639 	mptsas_t	*mpt = ADDR2MPT(ap);
8640 	int		ckey;
8641 	int		rval = FALSE;
8642 
8643 	NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
8644 	    ap->a_target, cap, value, tgtonly));
8645 
8646 	if (!tgtonly) {
8647 		return (rval);
8648 	}
8649 
8650 	mutex_enter(&mpt->m_mutex);
8651 
8652 	if ((mptsas_capchk(cap, tgtonly, &ckey)) != TRUE) {
8653 		mutex_exit(&mpt->m_mutex);
8654 		return (UNDEFINED);
8655 	}
8656 
8657 	switch (ckey) {
8658 	case SCSI_CAP_DMA_MAX:
8659 	case SCSI_CAP_MSG_OUT:
8660 	case SCSI_CAP_PARITY:
8661 	case SCSI_CAP_INITIATOR_ID:
8662 	case SCSI_CAP_LINKED_CMDS:
8663 	case SCSI_CAP_UNTAGGED_QING:
8664 	case SCSI_CAP_RESET_NOTIFICATION:
8665 		/*
8666 		 * None of these are settable via
8667 		 * the capability interface.
8668 		 */
8669 		break;
8670 	case SCSI_CAP_ARQ:
8671 		/*
8672 		 * We cannot turn off arq so return false if asked to
8673 		 */
8674 		if (value) {
8675 			rval = TRUE;
8676 		} else {
8677 			rval = FALSE;
8678 		}
8679 		break;
8680 	case SCSI_CAP_TAGGED_QING:
8681 		mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
8682 		    (ap->a_hba_tran->tran_tgt_private))->t_private,
8683 		    MAX_THROTTLE);
8684 		rval = TRUE;
8685 		break;
8686 	case SCSI_CAP_QFULL_RETRIES:
8687 		((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
8688 		    t_private->m_qfull_retries = (uchar_t)value;
8689 		rval = TRUE;
8690 		break;
8691 	case SCSI_CAP_QFULL_RETRY_INTERVAL:
8692 		((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
8693 		    t_private->m_qfull_retry_interval =
8694 		    drv_usectohz(value * 1000);
8695 		rval = TRUE;
8696 		break;
8697 	default:
8698 		rval = UNDEFINED;
8699 		break;
8700 	}
8701 	mutex_exit(&mpt->m_mutex);
8702 	return (rval);
8703 }
8704 
8705 /*
8706  * Utility routine for mptsas_ifsetcap/ifgetcap
8707  */
8708 /*ARGSUSED*/
8709 static int
8710 mptsas_capchk(char *cap, int tgtonly, int *cidxp)
8711 {
8712 	NDBG24(("mptsas_capchk: cap=%s", cap));
8713 
8714 	if (!cap)
8715 		return (FALSE);
8716 
8717 	*cidxp = scsi_hba_lookup_capstr(cap);
8718 	return (TRUE);
8719 }
8720 
8721 static int
8722 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
8723 {
8724 	mptsas_slots_t	*old_active = mpt->m_active;
8725 	mptsas_slots_t	*new_active;
8726 	size_t		size;
8727 	int		rval = -1;
8728 
8729 	if (mpt->m_ncmds) {
8730 		NDBG9(("cannot change size of active slots array"));
8731 		return (rval);
8732 	}
8733 
8734 	size = MPTSAS_SLOTS_SIZE(mpt);
8735 	new_active = kmem_zalloc(size, flag);
8736 	if (new_active == NULL) {
8737 		NDBG1(("new active alloc failed"));
8738 	} else {
8739 		/*
8740 		 * Since SMID 0 is reserved and the TM slot is reserved, the
8741 		 * number of slots that can be used at any one time is
8742 		 * m_max_requests - 2.
8743 		 */
8744 		mpt->m_active = new_active;
8745 		mpt->m_active->m_n_slots = (mpt->m_max_requests - 2);
8746 		mpt->m_active->m_size = size;
8747 		mpt->m_active->m_tags = 1;
8748 		if (old_active) {
8749 			kmem_free(old_active, old_active->m_size);
8750 		}
8751 		rval = 0;
8752 	}
8753 
8754 	return (rval);
8755 }
8756 
8757 /*
8758  * Error logging, printing, and debug print routines.
8759  */
8760 static char *mptsas_label = "mpt_sas";
8761 
8762 /*PRINTFLIKE3*/
8763 void
8764 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
8765 {
8766 	dev_info_t	*dev;
8767 	va_list		ap;
8768 
8769 	if (mpt) {
8770 		dev = mpt->m_dip;
8771 	} else {
8772 		dev = 0;
8773 	}
8774 
8775 	mutex_enter(&mptsas_log_mutex);
8776 
8777 	va_start(ap, fmt);
8778 	(void) vsprintf(mptsas_log_buf, fmt, ap);
8779 	va_end(ap);
8780 
8781 	if (level == CE_CONT) {
8782 		scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
8783 	} else {
8784 		scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
8785 	}
8786 
8787 	mutex_exit(&mptsas_log_mutex);
8788 }
8789 
8790 #ifdef MPTSAS_DEBUG
8791 /*PRINTFLIKE1*/
8792 void
8793 mptsas_printf(char *fmt, ...)
8794 {
8795 	dev_info_t	*dev = 0;
8796 	va_list		ap;
8797 
8798 	mutex_enter(&mptsas_log_mutex);
8799 
8800 	va_start(ap, fmt);
8801 	(void) vsprintf(mptsas_log_buf, fmt, ap);
8802 	va_end(ap);
8803 
8804 #ifdef PROM_PRINTF
8805 	prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
8806 #else
8807 	scsi_log(dev, mptsas_label, SCSI_DEBUG, "%s\n", mptsas_log_buf);
8808 #endif
8809 	mutex_exit(&mptsas_log_mutex);
8810 }
8811 #endif
8812 
8813 /*
8814  * timeout handling
8815  */
8816 static void
8817 mptsas_watch(void *arg)
8818 {
8819 #ifndef __lock_lint
8820 	_NOTE(ARGUNUSED(arg))
8821 #endif
8822 
8823 	mptsas_t	*mpt;
8824 
8825 	NDBG30(("mptsas_watch"));
8826 
8827 	rw_enter(&mptsas_global_rwlock, RW_READER);
8828 	for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
8829 
8830 		mutex_enter(&mpt->m_mutex);
8831 
8832 		/* Skip device if not powered on */
8833 		if (mpt->m_options & MPTSAS_OPT_PM) {
8834 			if (mpt->m_power_level == PM_LEVEL_D0) {
8835 				(void) pm_busy_component(mpt->m_dip, 0);
8836 				mpt->m_busy = 1;
8837 			} else {
8838 				mutex_exit(&mpt->m_mutex);
8839 				continue;
8840 			}
8841 		}
8842 
8843 		/*
8844 		 * For now, always call mptsas_watchsubr.
8845 		 */
8846 		mptsas_watchsubr(mpt);
8847 
8848 		if (mpt->m_options & MPTSAS_OPT_PM) {
8849 			mpt->m_busy = 0;
8850 			(void) pm_idle_component(mpt->m_dip, 0);
8851 		}
8852 
8853 		mutex_exit(&mpt->m_mutex);
8854 	}
8855 	rw_exit(&mptsas_global_rwlock);
8856 
8857 	mutex_enter(&mptsas_global_mutex);
8858 	if (mptsas_timeouts_enabled)
8859 		mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
8860 	mutex_exit(&mptsas_global_mutex);
8861 }
8862 
8863 static void
8864 mptsas_watchsubr(mptsas_t *mpt)
8865 {
8866 	int		i;
8867 	mptsas_cmd_t	*cmd;
8868 	mptsas_target_t	*ptgt = NULL;
8869 
8870 	NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
8871 
8872 #ifdef MPTSAS_TEST
8873 	if (mptsas_enable_untagged) {
8874 		mptsas_test_untagged++;
8875 	}
8876 #endif
8877 
8878 	/*
8879 	 * Check for commands stuck in active slot
8880 	 * Account for TM requests, which use the last SMID.
8881 	 */
8882 	for (i = 0; i <= mpt->m_active->m_n_slots; i++) {
8883 		if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
8884 			if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8885 				cmd->cmd_active_timeout -=
8886 				    mptsas_scsi_watchdog_tick;
8887 				if (cmd->cmd_active_timeout <= 0) {
8888 					/*
8889 					 * There seems to be a command stuck
8890 					 * in the active slot.  Drain throttle.
8891 					 */
8892 					mptsas_set_throttle(mpt,
8893 					    cmd->cmd_tgt_addr,
8894 					    DRAIN_THROTTLE);
8895 				}
8896 			}
8897 			if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8898 			    (cmd->cmd_flags & CFLAG_CONFIG)) {
8899 				cmd->cmd_active_timeout -=
8900 				    mptsas_scsi_watchdog_tick;
8901 				if (cmd->cmd_active_timeout <= 0) {
8902 					/*
8903 					 * passthrough command timeout
8904 					 */
8905 					cmd->cmd_flags |= (CFLAG_FINISHED |
8906 					    CFLAG_TIMEOUT);
8907 					cv_broadcast(&mpt->m_passthru_cv);
8908 					cv_broadcast(&mpt->m_config_cv);
8909 				}
8910 			}
8911 		}
8912 	}
8913 
8914 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8915 	    MPTSAS_HASH_FIRST);
8916 	while (ptgt != NULL) {
8917 		/*
8918 		 * If we were draining due to a qfull condition,
8919 		 * go back to full throttle.
8920 		 */
8921 		if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
8922 		    (ptgt->m_t_throttle > HOLD_THROTTLE) &&
8923 		    (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
8924 			mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8925 			mptsas_restart_hba(mpt);
8926 		}
8927 
8928 		if ((ptgt->m_t_ncmds > 0) &&
8929 		    (ptgt->m_timebase)) {
8930 
8931 			if (ptgt->m_timebase <=
8932 			    mptsas_scsi_watchdog_tick) {
8933 				ptgt->m_timebase +=
8934 				    mptsas_scsi_watchdog_tick;
8935 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8936 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8937 				continue;
8938 			}
8939 
8940 			ptgt->m_timeout -= mptsas_scsi_watchdog_tick;
8941 
8942 			if (ptgt->m_timeout < 0) {
8943 				mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
8944 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8945 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8946 				continue;
8947 			}
8948 
8949 			if ((ptgt->m_timeout) <=
8950 			    mptsas_scsi_watchdog_tick) {
8951 				NDBG23(("pending timeout"));
8952 				mptsas_set_throttle(mpt, ptgt,
8953 				    DRAIN_THROTTLE);
8954 			}
8955 		}
8956 
8957 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8958 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8959 	}
8960 }
8961 
8962 /*
8963  * timeout recovery
8964  */
8965 static void
8966 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl)
8967 {
8968 
8969 	NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
8970 	mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
8971 	    "Target %d", devhdl);
8972 
8973 	/*
8974 	 * If the current target is not the target passed in,
8975 	 * try to reset that target.
8976 	 */
8977 	NDBG29(("mptsas_cmd_timeout: device reset"));
8978 	if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
8979 		mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
8980 		    "recovery failed!", devhdl);
8981 	}
8982 }
8983 
8984 /*
8985  * Device / Hotplug control
8986  */
8987 static int
8988 mptsas_scsi_quiesce(dev_info_t *dip)
8989 {
8990 	mptsas_t	*mpt;
8991 	scsi_hba_tran_t	*tran;
8992 
8993 	tran = ddi_get_driver_private(dip);
8994 	if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
8995 		return (-1);
8996 
8997 	return (mptsas_quiesce_bus(mpt));
8998 }
8999 
9000 static int
9001 mptsas_scsi_unquiesce(dev_info_t *dip)
9002 {
9003 	mptsas_t		*mpt;
9004 	scsi_hba_tran_t	*tran;
9005 
9006 	tran = ddi_get_driver_private(dip);
9007 	if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9008 		return (-1);
9009 
9010 	return (mptsas_unquiesce_bus(mpt));
9011 }
9012 
9013 static int
9014 mptsas_quiesce_bus(mptsas_t *mpt)
9015 {
9016 	mptsas_target_t	*ptgt = NULL;
9017 
9018 	NDBG28(("mptsas_quiesce_bus"));
9019 	mutex_enter(&mpt->m_mutex);
9020 
9021 	/* Set all the throttles to zero */
9022 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9023 	    MPTSAS_HASH_FIRST);
9024 	while (ptgt != NULL) {
9025 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9026 
9027 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9028 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9029 	}
9030 
9031 	/* If there are any outstanding commands in the queue */
9032 	if (mpt->m_ncmds) {
9033 		mpt->m_softstate |= MPTSAS_SS_DRAINING;
9034 		mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9035 		    mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
9036 		if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
9037 			/*
9038 			 * Quiesce has been interrupted
9039 			 */
9040 			mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9041 			ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9042 			    &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9043 			while (ptgt != NULL) {
9044 				mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9045 
9046 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9047 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9048 			}
9049 			mptsas_restart_hba(mpt);
9050 			if (mpt->m_quiesce_timeid != 0) {
9051 				timeout_id_t tid = mpt->m_quiesce_timeid;
9052 				mpt->m_quiesce_timeid = 0;
9053 				mutex_exit(&mpt->m_mutex);
9054 				(void) untimeout(tid);
9055 				return (-1);
9056 			}
9057 			mutex_exit(&mpt->m_mutex);
9058 			return (-1);
9059 		} else {
9060 			/* Bus has been quiesced */
9061 			ASSERT(mpt->m_quiesce_timeid == 0);
9062 			mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9063 			mpt->m_softstate |= MPTSAS_SS_QUIESCED;
9064 			mutex_exit(&mpt->m_mutex);
9065 			return (0);
9066 		}
9067 	}
9068 	/* Bus was not busy - QUIESCED */
9069 	mutex_exit(&mpt->m_mutex);
9070 
9071 	return (0);
9072 }
9073 
9074 static int
9075 mptsas_unquiesce_bus(mptsas_t *mpt)
9076 {
9077 	mptsas_target_t	*ptgt = NULL;
9078 
9079 	NDBG28(("mptsas_unquiesce_bus"));
9080 	mutex_enter(&mpt->m_mutex);
9081 	mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
9082 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9083 	    MPTSAS_HASH_FIRST);
9084 	while (ptgt != NULL) {
9085 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9086 
9087 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9088 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9089 	}
9090 	mptsas_restart_hba(mpt);
9091 	mutex_exit(&mpt->m_mutex);
9092 	return (0);
9093 }
9094 
9095 static void
9096 mptsas_ncmds_checkdrain(void *arg)
9097 {
9098 	mptsas_t	*mpt = arg;
9099 	mptsas_target_t	*ptgt = NULL;
9100 
9101 	mutex_enter(&mpt->m_mutex);
9102 	if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
9103 		mpt->m_quiesce_timeid = 0;
9104 		if (mpt->m_ncmds == 0) {
9105 			/* Command queue has been drained */
9106 			cv_signal(&mpt->m_cv);
9107 		} else {
9108 			/*
9109 			 * The throttle may have been reset because
9110 			 * of a SCSI bus reset
9111 			 */
9112 			ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9113 			    &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9114 			while (ptgt != NULL) {
9115 				mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9116 
9117 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9118 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9119 			}
9120 
9121 			mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9122 			    mpt, (MPTSAS_QUIESCE_TIMEOUT *
9123 			    drv_usectohz(1000000)));
9124 		}
9125 	}
9126 	mutex_exit(&mpt->m_mutex);
9127 }
9128 
9129 static void
9130 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
9131 {
9132 	int	i;
9133 	uint8_t	*cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
9134 	char	buf[128];
9135 
9136 	buf[0] = '\0';
9137 	mptsas_log(mpt, CE_NOTE, "?Cmd (0x%p) dump for Target %d Lun %d:\n",
9138 	    (void *)cmd, Tgt(cmd), Lun(cmd));
9139 	(void) sprintf(&buf[0], "\tcdb=[");
9140 	for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
9141 		(void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9142 	}
9143 	(void) sprintf(&buf[strlen(buf)], " ]");
9144 	mptsas_log(mpt, CE_NOTE, "?%s\n", buf);
9145 	mptsas_log(mpt, CE_NOTE,
9146 	    "?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
9147 	    cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
9148 	    cmd->cmd_pkt->pkt_state);
9149 	mptsas_log(mpt, CE_NOTE, "?pkt_scbp=0x%x cmd_flags=0x%x\n",
9150 	    *(cmd->cmd_pkt->pkt_scbp), cmd->cmd_flags);
9151 }
9152 
9153 static void
9154 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
9155 {
9156 	caddr_t			memp;
9157 	pMPI2RequestHeader_t	request_hdrp;
9158 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
9159 	mptsas_pt_request_t	*pt = pkt->pkt_ha_private;
9160 	uint32_t		request_size, data_size, dataout_size;
9161 	uint32_t		direction;
9162 	ddi_dma_cookie_t	data_cookie;
9163 	ddi_dma_cookie_t	dataout_cookie;
9164 	uint32_t		request_desc_low, request_desc_high = 0;
9165 	uint32_t		i, sense_bufp;
9166 	uint8_t			desc_type;
9167 	uint8_t			*request, function;
9168 	ddi_dma_handle_t	dma_hdl = mpt->m_dma_req_frame_hdl;
9169 	ddi_acc_handle_t	acc_hdl = mpt->m_acc_req_frame_hdl;
9170 
9171 	desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
9172 
9173 	request = pt->request;
9174 	direction = pt->direction;
9175 	request_size = pt->request_size;
9176 	data_size = pt->data_size;
9177 	dataout_size = pt->dataout_size;
9178 	data_cookie = pt->data_cookie;
9179 	dataout_cookie = pt->dataout_cookie;
9180 
9181 	/*
9182 	 * Store the passthrough message in memory location
9183 	 * corresponding to our slot number
9184 	 */
9185 	memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
9186 	request_hdrp = (pMPI2RequestHeader_t)memp;
9187 	bzero(memp, mpt->m_req_frame_size);
9188 
9189 	for (i = 0; i < request_size; i++) {
9190 		bcopy(request + i, memp + i, 1);
9191 	}
9192 
9193 	if (data_size || dataout_size) {
9194 		pMpi2SGESimple64_t	sgep;
9195 		uint32_t		sge_flags;
9196 
9197 		sgep = (pMpi2SGESimple64_t)((uint8_t *)request_hdrp +
9198 		    request_size);
9199 		if (dataout_size) {
9200 
9201 			sge_flags = dataout_size |
9202 			    ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9203 			    MPI2_SGE_FLAGS_END_OF_BUFFER |
9204 			    MPI2_SGE_FLAGS_HOST_TO_IOC |
9205 			    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9206 			    MPI2_SGE_FLAGS_SHIFT);
9207 			ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
9208 			ddi_put32(acc_hdl, &sgep->Address.Low,
9209 			    (uint32_t)(dataout_cookie.dmac_laddress &
9210 			    0xffffffffull));
9211 			ddi_put32(acc_hdl, &sgep->Address.High,
9212 			    (uint32_t)(dataout_cookie.dmac_laddress
9213 			    >> 32));
9214 			sgep++;
9215 		}
9216 		sge_flags = data_size;
9217 		sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9218 		    MPI2_SGE_FLAGS_LAST_ELEMENT |
9219 		    MPI2_SGE_FLAGS_END_OF_BUFFER |
9220 		    MPI2_SGE_FLAGS_END_OF_LIST |
9221 		    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9222 		    MPI2_SGE_FLAGS_SHIFT);
9223 		if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9224 			sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
9225 			    MPI2_SGE_FLAGS_SHIFT);
9226 		} else {
9227 			sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
9228 			    MPI2_SGE_FLAGS_SHIFT);
9229 		}
9230 		ddi_put32(acc_hdl, &sgep->FlagsLength,
9231 		    sge_flags);
9232 		ddi_put32(acc_hdl, &sgep->Address.Low,
9233 		    (uint32_t)(data_cookie.dmac_laddress &
9234 		    0xffffffffull));
9235 		ddi_put32(acc_hdl, &sgep->Address.High,
9236 		    (uint32_t)(data_cookie.dmac_laddress >> 32));
9237 	}
9238 
9239 	function = request_hdrp->Function;
9240 	if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9241 	    (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9242 		pMpi2SCSIIORequest_t	scsi_io_req;
9243 
9244 		scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
9245 		/*
9246 		 * Put SGE for data and data_out buffer at the end of
9247 		 * scsi_io_request message header.(64 bytes in total)
9248 		 * Following above SGEs, the residual space will be
9249 		 * used by sense data.
9250 		 */
9251 		ddi_put8(acc_hdl,
9252 		    &scsi_io_req->SenseBufferLength,
9253 		    (uint8_t)(request_size - 64));
9254 
9255 		sense_bufp = mpt->m_req_frame_dma_addr +
9256 		    (mpt->m_req_frame_size * cmd->cmd_slot);
9257 		sense_bufp += 64;
9258 		ddi_put32(acc_hdl,
9259 		    &scsi_io_req->SenseBufferLowAddress, sense_bufp);
9260 
9261 		/*
9262 		 * Set SGLOffset0 value
9263 		 */
9264 		ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
9265 		    offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
9266 
9267 		/*
9268 		 * Setup descriptor info
9269 		 */
9270 		desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
9271 		request_desc_high = (ddi_get16(acc_hdl,
9272 		    &scsi_io_req->DevHandle) << 16);
9273 	}
9274 
9275 	/*
9276 	 * We must wait till the message has been completed before
9277 	 * beginning the next message so we wait for this one to
9278 	 * finish.
9279 	 */
9280 	(void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
9281 	request_desc_low = (cmd->cmd_slot << 16) + desc_type;
9282 	cmd->cmd_rfm = NULL;
9283 	MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
9284 	if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
9285 	    (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
9286 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9287 	}
9288 }
9289 
9290 
9291 
9292 static int
9293 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
9294     uint8_t *data, uint32_t request_size, uint32_t reply_size,
9295     uint32_t data_size, uint32_t direction, uint8_t *dataout,
9296     uint32_t dataout_size, short timeout, int mode)
9297 {
9298 	mptsas_pt_request_t		pt;
9299 	mptsas_dma_alloc_state_t	data_dma_state;
9300 	mptsas_dma_alloc_state_t	dataout_dma_state;
9301 	caddr_t				memp;
9302 	mptsas_cmd_t			*cmd = NULL;
9303 	struct scsi_pkt			*pkt;
9304 	uint32_t			reply_len = 0, sense_len = 0;
9305 	pMPI2RequestHeader_t		request_hdrp;
9306 	pMPI2RequestHeader_t		request_msg;
9307 	pMPI2DefaultReply_t		reply_msg;
9308 	Mpi2SCSIIOReply_t		rep_msg;
9309 	int				i, status = 0, pt_flags = 0, rv = 0;
9310 	int				rvalue;
9311 	uint32_t			reply_index;
9312 	uint8_t				function;
9313 
9314 	ASSERT(mutex_owned(&mpt->m_mutex));
9315 
9316 	reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
9317 	bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
9318 	request_msg = kmem_zalloc(request_size, KM_SLEEP);
9319 
9320 	mutex_exit(&mpt->m_mutex);
9321 	/*
9322 	 * copy in the request buffer since it could be used by
9323 	 * another thread when the pt request into waitq
9324 	 */
9325 	if (ddi_copyin(request, request_msg, request_size, mode)) {
9326 		mutex_enter(&mpt->m_mutex);
9327 		status = EFAULT;
9328 		mptsas_log(mpt, CE_WARN, "failed to copy request data");
9329 		goto out;
9330 	}
9331 	mutex_enter(&mpt->m_mutex);
9332 
9333 	function = request_msg->Function;
9334 	if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
9335 		pMpi2SCSITaskManagementRequest_t	task;
9336 		task = (pMpi2SCSITaskManagementRequest_t)request_msg;
9337 		mptsas_setup_bus_reset_delay(mpt);
9338 		rv = mptsas_ioc_task_management(mpt, task->TaskType,
9339 		    task->DevHandle, (int)task->LUN[1]);
9340 
9341 		if (rv != TRUE) {
9342 			status = EIO;
9343 			mptsas_log(mpt, CE_WARN, "task management failed");
9344 		}
9345 		goto out;
9346 	}
9347 
9348 	if (data_size != 0) {
9349 		data_dma_state.size = data_size;
9350 		if (mptsas_passthru_dma_alloc(mpt, &data_dma_state) !=
9351 		    DDI_SUCCESS) {
9352 			status = ENOMEM;
9353 			mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9354 			    "resource");
9355 			goto out;
9356 		}
9357 		pt_flags |= MPTSAS_DATA_ALLOCATED;
9358 		if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9359 			mutex_exit(&mpt->m_mutex);
9360 			for (i = 0; i < data_size; i++) {
9361 				if (ddi_copyin(data + i, (uint8_t *)
9362 				    data_dma_state.memp + i, 1, mode)) {
9363 					mutex_enter(&mpt->m_mutex);
9364 					status = EFAULT;
9365 					mptsas_log(mpt, CE_WARN, "failed to "
9366 					    "copy read data");
9367 					goto out;
9368 				}
9369 			}
9370 			mutex_enter(&mpt->m_mutex);
9371 		}
9372 	}
9373 
9374 	if (dataout_size != 0) {
9375 		dataout_dma_state.size = dataout_size;
9376 		if (mptsas_passthru_dma_alloc(mpt, &dataout_dma_state) !=
9377 		    DDI_SUCCESS) {
9378 			status = ENOMEM;
9379 			mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9380 			    "resource");
9381 			goto out;
9382 		}
9383 		pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
9384 		mutex_exit(&mpt->m_mutex);
9385 		for (i = 0; i < dataout_size; i++) {
9386 			if (ddi_copyin(dataout + i, (uint8_t *)
9387 			    dataout_dma_state.memp + i, 1, mode)) {
9388 				mutex_enter(&mpt->m_mutex);
9389 				mptsas_log(mpt, CE_WARN, "failed to copy out"
9390 				    " data");
9391 				status = EFAULT;
9392 				goto out;
9393 			}
9394 		}
9395 		mutex_enter(&mpt->m_mutex);
9396 	}
9397 
9398 	if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
9399 		status = EAGAIN;
9400 		mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
9401 		goto out;
9402 	}
9403 	pt_flags |= MPTSAS_REQUEST_POOL_CMD;
9404 
9405 	bzero((caddr_t)cmd, sizeof (*cmd));
9406 	bzero((caddr_t)pkt, scsi_pkt_size());
9407 	bzero((caddr_t)&pt, sizeof (pt));
9408 
9409 	cmd->ioc_cmd_slot = (uint32_t)(rvalue);
9410 
9411 	pt.request = (uint8_t *)request_msg;
9412 	pt.direction = direction;
9413 	pt.request_size = request_size;
9414 	pt.data_size = data_size;
9415 	pt.dataout_size = dataout_size;
9416 	pt.data_cookie = data_dma_state.cookie;
9417 	pt.dataout_cookie = dataout_dma_state.cookie;
9418 
9419 	/*
9420 	 * Form a blank cmd/pkt to store the acknowledgement message
9421 	 */
9422 	pkt->pkt_cdbp		= (opaque_t)&cmd->cmd_cdb[0];
9423 	pkt->pkt_scbp		= (opaque_t)&cmd->cmd_scb;
9424 	pkt->pkt_ha_private	= (opaque_t)&pt;
9425 	pkt->pkt_flags		= FLAG_HEAD;
9426 	pkt->pkt_time		= timeout;
9427 	cmd->cmd_pkt		= pkt;
9428 	cmd->cmd_flags		= CFLAG_CMDIOC | CFLAG_PASSTHRU;
9429 
9430 	/*
9431 	 * Save the command in a slot
9432 	 */
9433 	if (mptsas_save_cmd(mpt, cmd) == TRUE) {
9434 		/*
9435 		 * Once passthru command get slot, set cmd_flags
9436 		 * CFLAG_PREPARED.
9437 		 */
9438 		cmd->cmd_flags |= CFLAG_PREPARED;
9439 		mptsas_start_passthru(mpt, cmd);
9440 	} else {
9441 		mptsas_waitq_add(mpt, cmd);
9442 	}
9443 
9444 	while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
9445 		cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
9446 	}
9447 
9448 	if (cmd->cmd_flags & CFLAG_PREPARED) {
9449 		memp = mpt->m_req_frame + (mpt->m_req_frame_size *
9450 		    cmd->cmd_slot);
9451 		request_hdrp = (pMPI2RequestHeader_t)memp;
9452 	}
9453 
9454 	if (cmd->cmd_flags & CFLAG_TIMEOUT) {
9455 		status = ETIMEDOUT;
9456 		mptsas_log(mpt, CE_WARN, "passthrough command timeout");
9457 		pt_flags |= MPTSAS_CMD_TIMEOUT;
9458 		goto out;
9459 	}
9460 
9461 	if (cmd->cmd_rfm) {
9462 		/*
9463 		 * cmd_rfm is zero means the command reply is a CONTEXT
9464 		 * reply and no PCI Write to post the free reply SMFA
9465 		 * because no reply message frame is used.
9466 		 * cmd_rfm is non-zero means the reply is a ADDRESS
9467 		 * reply and reply message frame is used.
9468 		 */
9469 		pt_flags |= MPTSAS_ADDRESS_REPLY;
9470 		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
9471 		    DDI_DMA_SYNC_FORCPU);
9472 		reply_msg = (pMPI2DefaultReply_t)
9473 		    (mpt->m_reply_frame + (cmd->cmd_rfm -
9474 		    mpt->m_reply_frame_dma_addr));
9475 	}
9476 
9477 	mptsas_fma_check(mpt, cmd);
9478 	if (pkt->pkt_reason == CMD_TRAN_ERR) {
9479 		status = EAGAIN;
9480 		mptsas_log(mpt, CE_WARN, "passthru fma error");
9481 		goto out;
9482 	}
9483 	if (pkt->pkt_reason == CMD_RESET) {
9484 		status = EAGAIN;
9485 		mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
9486 		goto out;
9487 	}
9488 
9489 	if (pkt->pkt_reason == CMD_INCOMPLETE) {
9490 		status = EIO;
9491 		mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
9492 		goto out;
9493 	}
9494 
9495 	mutex_exit(&mpt->m_mutex);
9496 	if (cmd->cmd_flags & CFLAG_PREPARED) {
9497 		function = request_hdrp->Function;
9498 		if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9499 		    (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9500 			reply_len = sizeof (MPI2_SCSI_IO_REPLY);
9501 			sense_len = reply_size - reply_len;
9502 		} else {
9503 			reply_len = reply_size;
9504 			sense_len = 0;
9505 		}
9506 
9507 		for (i = 0; i < reply_len; i++) {
9508 			if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
9509 			    mode)) {
9510 				mutex_enter(&mpt->m_mutex);
9511 				status = EFAULT;
9512 				mptsas_log(mpt, CE_WARN, "failed to copy out "
9513 				    "reply data");
9514 				goto out;
9515 			}
9516 		}
9517 		for (i = 0; i < sense_len; i++) {
9518 			if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
9519 			    reply + reply_len + i, 1, mode)) {
9520 				mutex_enter(&mpt->m_mutex);
9521 				status = EFAULT;
9522 				mptsas_log(mpt, CE_WARN, "failed to copy out "
9523 				    "sense data");
9524 				goto out;
9525 			}
9526 		}
9527 	}
9528 
9529 	if (data_size) {
9530 		if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9531 			(void) ddi_dma_sync(data_dma_state.handle, 0, 0,
9532 			    DDI_DMA_SYNC_FORCPU);
9533 			for (i = 0; i < data_size; i++) {
9534 				if (ddi_copyout((uint8_t *)(
9535 				    data_dma_state.memp + i), data + i,  1,
9536 				    mode)) {
9537 					mutex_enter(&mpt->m_mutex);
9538 					status = EFAULT;
9539 					mptsas_log(mpt, CE_WARN, "failed to "
9540 					    "copy out the reply data");
9541 					goto out;
9542 				}
9543 			}
9544 		}
9545 	}
9546 	mutex_enter(&mpt->m_mutex);
9547 out:
9548 	/*
9549 	 * Put the reply frame back on the free queue, increment the free
9550 	 * index, and write the new index to the free index register.  But only
9551 	 * if this reply is an ADDRESS reply.
9552 	 */
9553 	if (pt_flags & MPTSAS_ADDRESS_REPLY) {
9554 		reply_index = mpt->m_free_index;
9555 		ddi_put32(mpt->m_acc_free_queue_hdl,
9556 		    &((uint32_t *)(void *)mpt->m_free_queue)[reply_index],
9557 		    cmd->cmd_rfm);
9558 		(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
9559 		    DDI_DMA_SYNC_FORDEV);
9560 		if (++reply_index == mpt->m_free_queue_depth) {
9561 			reply_index = 0;
9562 		}
9563 		mpt->m_free_index = reply_index;
9564 		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
9565 		    reply_index);
9566 	}
9567 	if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
9568 		mptsas_remove_cmd(mpt, cmd);
9569 		pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
9570 	}
9571 	if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
9572 		mptsas_return_to_pool(mpt, cmd);
9573 	if (pt_flags & MPTSAS_DATA_ALLOCATED) {
9574 		if (mptsas_check_dma_handle(data_dma_state.handle) !=
9575 		    DDI_SUCCESS) {
9576 			ddi_fm_service_impact(mpt->m_dip,
9577 			    DDI_SERVICE_UNAFFECTED);
9578 			status = EFAULT;
9579 		}
9580 		mptsas_passthru_dma_free(&data_dma_state);
9581 	}
9582 	if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
9583 		if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
9584 		    DDI_SUCCESS) {
9585 			ddi_fm_service_impact(mpt->m_dip,
9586 			    DDI_SERVICE_UNAFFECTED);
9587 			status = EFAULT;
9588 		}
9589 		mptsas_passthru_dma_free(&dataout_dma_state);
9590 	}
9591 	if (pt_flags & MPTSAS_CMD_TIMEOUT) {
9592 		if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9593 			mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
9594 		}
9595 	}
9596 	if (request_msg)
9597 		kmem_free(request_msg, request_size);
9598 
9599 	return (status);
9600 }
9601 
9602 static int
9603 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
9604 {
9605 	/*
9606 	 * If timeout is 0, set timeout to default of 60 seconds.
9607 	 */
9608 	if (data->Timeout == 0) {
9609 		data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
9610 	}
9611 
9612 	if (((data->DataSize == 0) &&
9613 	    (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
9614 	    ((data->DataSize != 0) &&
9615 	    ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
9616 	    (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
9617 	    ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
9618 	    (data->DataOutSize != 0))))) {
9619 		if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
9620 			data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
9621 		} else {
9622 			data->DataOutSize = 0;
9623 		}
9624 		/*
9625 		 * Send passthru request messages
9626 		 */
9627 		return (mptsas_do_passthru(mpt,
9628 		    (uint8_t *)((uintptr_t)data->PtrRequest),
9629 		    (uint8_t *)((uintptr_t)data->PtrReply),
9630 		    (uint8_t *)((uintptr_t)data->PtrData),
9631 		    data->RequestSize, data->ReplySize,
9632 		    data->DataSize, data->DataDirection,
9633 		    (uint8_t *)((uintptr_t)data->PtrDataOut),
9634 		    data->DataOutSize, data->Timeout, mode));
9635 	} else {
9636 		return (EINVAL);
9637 	}
9638 }
9639 
9640 /*
9641  * This routine handles the "event query" ioctl.
9642  */
9643 static int
9644 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
9645     int *rval)
9646 {
9647 	int			status;
9648 	mptsas_event_query_t	driverdata;
9649 	uint8_t			i;
9650 
9651 	driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
9652 
9653 	mutex_enter(&mpt->m_mutex);
9654 	for (i = 0; i < 4; i++) {
9655 		driverdata.Types[i] = mpt->m_event_mask[i];
9656 	}
9657 	mutex_exit(&mpt->m_mutex);
9658 
9659 	if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
9660 		status = EFAULT;
9661 	} else {
9662 		*rval = MPTIOCTL_STATUS_GOOD;
9663 		status = 0;
9664 	}
9665 
9666 	return (status);
9667 }
9668 
9669 /*
9670  * This routine handles the "event enable" ioctl.
9671  */
9672 static int
9673 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
9674     int *rval)
9675 {
9676 	int			status;
9677 	mptsas_event_enable_t	driverdata;
9678 	uint8_t			i;
9679 
9680 	if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
9681 		mutex_enter(&mpt->m_mutex);
9682 		for (i = 0; i < 4; i++) {
9683 			mpt->m_event_mask[i] = driverdata.Types[i];
9684 		}
9685 		mutex_exit(&mpt->m_mutex);
9686 
9687 		*rval = MPTIOCTL_STATUS_GOOD;
9688 		status = 0;
9689 	} else {
9690 		status = EFAULT;
9691 	}
9692 	return (status);
9693 }
9694 
9695 /*
9696  * This routine handles the "event report" ioctl.
9697  */
9698 static int
9699 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
9700     int *rval)
9701 {
9702 	int			status;
9703 	mptsas_event_report_t	driverdata;
9704 
9705 	mutex_enter(&mpt->m_mutex);
9706 
9707 	if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
9708 	    mode) == 0) {
9709 		if (driverdata.Size >= sizeof (mpt->m_events)) {
9710 			if (ddi_copyout(mpt->m_events, data->Events,
9711 			    sizeof (mpt->m_events), mode) != 0) {
9712 				status = EFAULT;
9713 			} else {
9714 				if (driverdata.Size > sizeof (mpt->m_events)) {
9715 					driverdata.Size =
9716 					    sizeof (mpt->m_events);
9717 					if (ddi_copyout(&driverdata.Size,
9718 					    &data->Size,
9719 					    sizeof (driverdata.Size),
9720 					    mode) != 0) {
9721 						status = EFAULT;
9722 					} else {
9723 						*rval = MPTIOCTL_STATUS_GOOD;
9724 						status = 0;
9725 					}
9726 				} else {
9727 					*rval = MPTIOCTL_STATUS_GOOD;
9728 					status = 0;
9729 				}
9730 			}
9731 		} else {
9732 			*rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
9733 			status = 0;
9734 		}
9735 	} else {
9736 		status = EFAULT;
9737 	}
9738 
9739 	mutex_exit(&mpt->m_mutex);
9740 	return (status);
9741 }
9742 
9743 static void
9744 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
9745 {
9746 	int	*reg_data;
9747 	uint_t	reglen;
9748 	char	*fw_rev;
9749 
9750 	/*
9751 	 * Lookup the 'reg' property and extract the other data
9752 	 */
9753 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
9754 	    DDI_PROP_DONTPASS, "reg", &reg_data, &reglen) ==
9755 	    DDI_PROP_SUCCESS) {
9756 		/*
9757 		 * Extract the PCI data from the 'reg' property first DWORD.
9758 		 * The entry looks like the following:
9759 		 * First DWORD:
9760 		 * Bits 0 - 7 8-bit Register number
9761 		 * Bits 8 - 10 3-bit Function number
9762 		 * Bits 11 - 15 5-bit Device number
9763 		 * Bits 16 - 23 8-bit Bus number
9764 		 * Bits 24 - 25 2-bit Address Space type identifier
9765 		 *
9766 		 * Store the device number in PCIDeviceHwId.
9767 		 * Store the function number in MpiPortNumber.
9768 		 * PciInformation stores bus, device, and function together
9769 		 */
9770 		adapter_data->PCIDeviceHwId = (reg_data[0] & 0x0000F800) >> 11;
9771 		adapter_data->MpiPortNumber = (reg_data[0] & 0x00000700) >> 8;
9772 		adapter_data->PciInformation = (reg_data[0] & 0x00FFFF00) >> 8;
9773 		ddi_prop_free((void *)reg_data);
9774 	} else {
9775 		/*
9776 		 * If we can't determine the PCI data then we fill in FF's for
9777 		 * the data to indicate this.
9778 		 */
9779 		adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
9780 		adapter_data->MpiPortNumber = 0xFFFFFFFF;
9781 		adapter_data->PciInformation = 0xFFFFFFFF;
9782 	}
9783 
9784 	/*
9785 	 * Lookup the 'firmware-version' property and extract the data
9786 	 */
9787 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, mpt->m_dip,
9788 	    DDI_PROP_DONTPASS, "firmware-version", &fw_rev) ==
9789 	    DDI_PROP_SUCCESS) {
9790 		/*
9791 		 * Version is a string of 4 bytes which fits into the DWORD
9792 		 */
9793 		(void) strcpy((char *)&adapter_data->MpiFirmwareVersion,
9794 		    fw_rev);
9795 		ddi_prop_free(fw_rev);
9796 	} else {
9797 		/*
9798 		 * If we can't determine the PCI data then we fill in FF's for
9799 		 * the data to indicate this.
9800 		 */
9801 		adapter_data->MpiFirmwareVersion = 0xFFFFFFFF;
9802 	}
9803 }
9804 
9805 static void
9806 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
9807 {
9808 	char	*driver_verstr = MPTSAS_MOD_STRING;
9809 
9810 	mptsas_lookup_pci_data(mpt, adapter_data);
9811 	adapter_data->AdapterType = MPTIOCTL_ADAPTER_TYPE_SAS2;
9812 	adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
9813 	adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
9814 	adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
9815 	(void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
9816 }
9817 
9818 static void
9819 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
9820 {
9821 	int	*reg_data, i;
9822 	uint_t	reglen;
9823 
9824 	/*
9825 	 * Lookup the 'reg' property and extract the other data
9826 	 */
9827 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
9828 	    DDI_PROP_DONTPASS, "reg", &reg_data, &reglen) ==
9829 	    DDI_PROP_SUCCESS) {
9830 		/*
9831 		 * Extract the PCI data from the 'reg' property first DWORD.
9832 		 * The entry looks like the following:
9833 		 * First DWORD:
9834 		 * Bits 8 - 10 3-bit Function number
9835 		 * Bits 11 - 15 5-bit Device number
9836 		 * Bits 16 - 23 8-bit Bus number
9837 		 */
9838 		pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
9839 		pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
9840 		pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
9841 		ddi_prop_free((void *)reg_data);
9842 	} else {
9843 		/*
9844 		 * If we can't determine the PCI info then we fill in FF's for
9845 		 * the data to indicate this.
9846 		 */
9847 		pci_info->BusNumber = 0xFFFFFFFF;
9848 		pci_info->DeviceNumber = 0xFF;
9849 		pci_info->FunctionNumber = 0xFF;
9850 	}
9851 
9852 	/*
9853 	 * Now get the interrupt vector and the pci header.  The vector can
9854 	 * only be 0 right now.  The header is the first 256 bytes of config
9855 	 * space.
9856 	 */
9857 	pci_info->InterruptVector = 0;
9858 	for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
9859 		pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
9860 		    i);
9861 	}
9862 }
9863 
9864 static int
9865 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
9866     int *rval)
9867 {
9868 	int			status = 0;
9869 	mptsas_t		*mpt;
9870 	mptsas_update_flash_t	flashdata;
9871 	mptsas_pass_thru_t	passthru_data;
9872 	mptsas_adapter_data_t   adapter_data;
9873 	mptsas_pci_info_t	pci_info;
9874 	int			copylen;
9875 
9876 	*rval = MPTIOCTL_STATUS_GOOD;
9877 	mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
9878 	if (mpt == NULL) {
9879 		return (scsi_hba_ioctl(dev, cmd, data, mode, credp, rval));
9880 	}
9881 	if (secpolicy_sys_config(credp, B_FALSE) != 0) {
9882 		return (EPERM);
9883 	}
9884 
9885 	/* Make sure power level is D0 before accessing registers */
9886 	mutex_enter(&mpt->m_mutex);
9887 	if (mpt->m_options & MPTSAS_OPT_PM) {
9888 		(void) pm_busy_component(mpt->m_dip, 0);
9889 		if (mpt->m_power_level != PM_LEVEL_D0) {
9890 			mutex_exit(&mpt->m_mutex);
9891 			if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
9892 			    DDI_SUCCESS) {
9893 				mptsas_log(mpt, CE_WARN,
9894 				    "mptsas%d: mptsas_ioctl: Raise power "
9895 				    "request failed.", mpt->m_instance);
9896 				(void) pm_idle_component(mpt->m_dip, 0);
9897 				return (ENXIO);
9898 			}
9899 		} else {
9900 			mutex_exit(&mpt->m_mutex);
9901 		}
9902 	} else {
9903 		mutex_exit(&mpt->m_mutex);
9904 	}
9905 
9906 	switch (cmd) {
9907 		case MPTIOCTL_UPDATE_FLASH:
9908 			if (ddi_copyin((void *)data, &flashdata,
9909 				sizeof (struct mptsas_update_flash), mode)) {
9910 				status = EFAULT;
9911 				break;
9912 			}
9913 
9914 			mutex_enter(&mpt->m_mutex);
9915 			if (mptsas_update_flash(mpt,
9916 			    (caddr_t)(long)flashdata.PtrBuffer,
9917 			    flashdata.ImageSize, flashdata.ImageType, mode)) {
9918 				status = EFAULT;
9919 			}
9920 
9921 			/*
9922 			 * Reset the chip to start using the new
9923 			 * firmware.  Reset if failed also.
9924 			 */
9925 			if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
9926 				status = EFAULT;
9927 			}
9928 			mutex_exit(&mpt->m_mutex);
9929 			break;
9930 		case MPTIOCTL_PASS_THRU:
9931 			/*
9932 			 * The user has requested to pass through a command to
9933 			 * be executed by the MPT firmware.  Call our routine
9934 			 * which does this.  Only allow one passthru IOCTL at
9935 			 * one time.
9936 			 */
9937 			if (ddi_copyin((void *)data, &passthru_data,
9938 			    sizeof (mptsas_pass_thru_t), mode)) {
9939 				status = EFAULT;
9940 				break;
9941 			}
9942 			mutex_enter(&mpt->m_mutex);
9943 			if (mpt->m_passthru_in_progress) {
9944 				mutex_exit(&mpt->m_mutex);
9945 				return (EBUSY);
9946 			}
9947 			mpt->m_passthru_in_progress = 1;
9948 			status = mptsas_pass_thru(mpt, &passthru_data, mode);
9949 			mpt->m_passthru_in_progress = 0;
9950 			mutex_exit(&mpt->m_mutex);
9951 
9952 			break;
9953 		case MPTIOCTL_GET_ADAPTER_DATA:
9954 			/*
9955 			 * The user has requested to read adapter data.  Call
9956 			 * our routine which does this.
9957 			 */
9958 			bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
9959 			if (ddi_copyin((void *)data, (void *)&adapter_data,
9960 			    sizeof (mptsas_adapter_data_t), mode)) {
9961 				status = EFAULT;
9962 				break;
9963 			}
9964 			if (adapter_data.StructureLength >=
9965 			    sizeof (mptsas_adapter_data_t)) {
9966 				adapter_data.StructureLength = (uint32_t)
9967 				    sizeof (mptsas_adapter_data_t);
9968 				copylen = sizeof (mptsas_adapter_data_t);
9969 				mutex_enter(&mpt->m_mutex);
9970 				mptsas_read_adapter_data(mpt, &adapter_data);
9971 				mutex_exit(&mpt->m_mutex);
9972 			} else {
9973 				adapter_data.StructureLength = (uint32_t)
9974 				    sizeof (mptsas_adapter_data_t);
9975 				copylen = sizeof (adapter_data.StructureLength);
9976 				*rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
9977 			}
9978 			if (ddi_copyout((void *)(&adapter_data), (void *)data,
9979 			    copylen, mode) != 0) {
9980 				status = EFAULT;
9981 			}
9982 			break;
9983 		case MPTIOCTL_GET_PCI_INFO:
9984 			/*
9985 			 * The user has requested to read pci info.  Call
9986 			 * our routine which does this.
9987 			 */
9988 			bzero(&pci_info, sizeof (mptsas_pci_info_t));
9989 			mutex_enter(&mpt->m_mutex);
9990 			mptsas_read_pci_info(mpt, &pci_info);
9991 			mutex_exit(&mpt->m_mutex);
9992 			if (ddi_copyout((void *)(&pci_info), (void *)data,
9993 			    sizeof (mptsas_pci_info_t), mode) != 0) {
9994 				status = EFAULT;
9995 			}
9996 			break;
9997 		case MPTIOCTL_RESET_ADAPTER:
9998 			mutex_enter(&mpt->m_mutex);
9999 			if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10000 				mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
10001 				    "failed");
10002 				status = EFAULT;
10003 			}
10004 			mutex_exit(&mpt->m_mutex);
10005 			break;
10006 		case MPTIOCTL_EVENT_QUERY:
10007 			/*
10008 			 * The user has done an event query. Call our routine
10009 			 * which does this.
10010 			 */
10011 			status = mptsas_event_query(mpt,
10012 			    (mptsas_event_query_t *)data, mode, rval);
10013 			break;
10014 		case MPTIOCTL_EVENT_ENABLE:
10015 			/*
10016 			 * The user has done an event enable. Call our routine
10017 			 * which does this.
10018 			 */
10019 			status = mptsas_event_enable(mpt,
10020 			    (mptsas_event_enable_t *)data, mode, rval);
10021 			break;
10022 		case MPTIOCTL_EVENT_REPORT:
10023 			/*
10024 			 * The user has done an event report. Call our routine
10025 			 * which does this.
10026 			 */
10027 			status = mptsas_event_report(mpt,
10028 			    (mptsas_event_report_t *)data, mode, rval);
10029 			break;
10030 		default:
10031 			status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
10032 			    rval);
10033 			break;
10034 	}
10035 
10036 	/*
10037 	 * Report idle status to pm after grace period because
10038 	 * multiple ioctls may be queued and raising power
10039 	 * for every ioctl is time consuming.  If a timeout is
10040 	 * pending for the previous ioctl, cancel the timeout and
10041 	 * report idle status to pm because calls to pm_busy_component(9F)
10042 	 * are stacked.
10043 	 */
10044 	mutex_enter(&mpt->m_mutex);
10045 	if (mpt->m_options & MPTSAS_OPT_PM) {
10046 		if (mpt->m_pm_timeid != 0) {
10047 			timeout_id_t tid = mpt->m_pm_timeid;
10048 			mpt->m_pm_timeid = 0;
10049 			mutex_exit(&mpt->m_mutex);
10050 			(void) untimeout(tid);
10051 			/*
10052 			 * Report idle status for previous ioctl since
10053 			 * calls to pm_busy_component(9F) are stacked.
10054 			 */
10055 			(void) pm_idle_component(mpt->m_dip, 0);
10056 			mutex_enter(&mpt->m_mutex);
10057 		}
10058 		mpt->m_pm_timeid = timeout(mptsas_idle_pm, mpt,
10059 		    drv_usectohz((clock_t)mpt->m_pm_idle_delay * 1000000));
10060 	}
10061 	mutex_exit(&mpt->m_mutex);
10062 
10063 	return (status);
10064 }
10065 
10066 int
10067 mptsas_restart_ioc(mptsas_t *mpt) {
10068 	int		rval = DDI_SUCCESS;
10069 	mptsas_target_t	*ptgt = NULL;
10070 
10071 	ASSERT(mutex_owned(&mpt->m_mutex));
10072 
10073 	/*
10074 	 * Set all throttles to HOLD
10075 	 */
10076 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
10077 	    MPTSAS_HASH_FIRST);
10078 	while (ptgt != NULL) {
10079 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10080 
10081 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10082 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10083 	}
10084 
10085 	/*
10086 	 * Disable interrupts
10087 	 */
10088 	MPTSAS_DISABLE_INTR(mpt);
10089 
10090 	/*
10091 	 * Abort all commands: outstanding commands, commands in waitq and
10092 	 * tx_waitq.
10093 	 */
10094 	mptsas_flush_hba(mpt);
10095 
10096 	/*
10097 	 * Reinitialize the chip.
10098 	 */
10099 	if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
10100 		rval = DDI_FAILURE;
10101 	}
10102 
10103 	/*
10104 	 * Enable interrupts again
10105 	 */
10106 	MPTSAS_ENABLE_INTR(mpt);
10107 
10108 	/*
10109 	 * If mptsas_init_chip was successful, update the driver data.
10110 	 */
10111 	if (rval == DDI_SUCCESS) {
10112 		mptsas_update_driver_data(mpt);
10113 	}
10114 
10115 	/*
10116 	 * Reset the throttles
10117 	 */
10118 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
10119 	    MPTSAS_HASH_FIRST);
10120 	while (ptgt != NULL) {
10121 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10122 
10123 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10124 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10125 	}
10126 
10127 	mptsas_doneq_empty(mpt);
10128 
10129 	if (rval != DDI_SUCCESS) {
10130 		mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
10131 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
10132 	}
10133 	return (rval);
10134 }
10135 
10136 int
10137 mptsas_init_chip(mptsas_t *mpt, int first_time)
10138 {
10139 	ddi_dma_cookie_t	cookie;
10140 	uint32_t		i;
10141 
10142 	if (first_time == FALSE) {
10143 		/*
10144 		 * Setup configuration space
10145 		 */
10146 		if (mptsas_config_space_init(mpt) == FALSE) {
10147 			mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
10148 			    "failed!");
10149 			goto fail;
10150 		}
10151 	}
10152 
10153 	/*
10154 	 * Check to see if the firmware image is valid
10155 	 */
10156 	if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
10157 	    MPI2_DIAG_FLASH_BAD_SIG) {
10158 		mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
10159 		goto fail;
10160 	}
10161 
10162 	/*
10163 	 * Reset the chip
10164 	 */
10165 	if (mptsas_ioc_reset(mpt) == MPTSAS_RESET_FAIL) {
10166 		mptsas_log(mpt, CE_WARN, "hard reset failed!");
10167 		return (DDI_FAILURE);
10168 	}
10169 	/*
10170 	 * Do some initilization only needed during attach
10171 	 */
10172 	if (first_time) {
10173 		/*
10174 		 * Get ioc facts from adapter
10175 		 */
10176 		if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
10177 			mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts "
10178 			    "failed");
10179 			goto fail;
10180 		}
10181 
10182 		/*
10183 		 * Allocate request message frames
10184 		 */
10185 		if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
10186 			mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames "
10187 			    "failed");
10188 			goto fail;
10189 		}
10190 		/*
10191 		 * Allocate reply free queue
10192 		 */
10193 		if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
10194 			mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue "
10195 			    "failed!");
10196 			goto fail;
10197 		}
10198 		/*
10199 		 * Allocate reply descriptor post queue
10200 		 */
10201 		if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
10202 			mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue "
10203 			    "failed!");
10204 			goto fail;
10205 		}
10206 		/*
10207 		 * Allocate reply message frames
10208 		 */
10209 		if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
10210 			mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames "
10211 			    "failed!");
10212 			goto fail;
10213 		}
10214 	}
10215 
10216 	/*
10217 	 * Re-Initialize ioc to operational state
10218 	 */
10219 	if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
10220 		mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
10221 		goto fail;
10222 	}
10223 
10224 	mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
10225 	    mpt->m_max_replies, KM_SLEEP);
10226 
10227 	/*
10228 	 * Initialize reply post index and request index.  Reply free index is
10229 	 * initialized after the next loop.  m_tags must only be initialized if
10230 	 * this is not the first time because m_active is not allocated if this
10231 	 * is the first time.
10232 	 */
10233 	mpt->m_post_index = 0;
10234 	if (!first_time) {
10235 		mpt->m_active->m_tags = 1;
10236 	}
10237 
10238 	/*
10239 	 * Initialize the Reply Free Queue with the physical addresses of our
10240 	 * reply frames.
10241 	 */
10242 	cookie.dmac_address = mpt->m_reply_frame_dma_addr;
10243 	for (i = 0; i < mpt->m_free_queue_depth - 1; i++) {
10244 		ddi_put32(mpt->m_acc_free_queue_hdl,
10245 		    &((uint32_t *)(void *)mpt->m_free_queue)[i],
10246 		    cookie.dmac_address);
10247 		cookie.dmac_address += mpt->m_reply_frame_size;
10248 	}
10249 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10250 	    DDI_DMA_SYNC_FORDEV);
10251 
10252 	/*
10253 	 * Initialize the reply free index to one past the last frame on the
10254 	 * queue.  This will signify that the queue is empty to start with.
10255 	 */
10256 	mpt->m_free_index = i;
10257 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
10258 
10259 	/*
10260 	 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
10261 	 */
10262 	for (i = 0; i < mpt->m_post_queue_depth; i++) {
10263 		ddi_put64(mpt->m_acc_post_queue_hdl,
10264 		    &((uint64_t *)(void *)mpt->m_post_queue)[i],
10265 		    0xFFFFFFFFFFFFFFFF);
10266 	}
10267 	(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
10268 	    DDI_DMA_SYNC_FORDEV);
10269 
10270 	/*
10271 	 * Enable ports
10272 	 */
10273 	if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
10274 		mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
10275 		goto fail;
10276 	}
10277 
10278 	/*
10279 	 * First, make sure the HBA is set in "initiator" mode.  Once that
10280 	 * is complete, get the base WWID.
10281 	 */
10282 
10283 	if (first_time) {
10284 		if (mptsas_set_initiator_mode(mpt)) {
10285 			mptsas_log(mpt, CE_WARN, "mptsas_set_initiator_mode "
10286 			    "failed!");
10287 			goto fail;
10288 		}
10289 
10290 		if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
10291 			mptsas_log(mpt, CE_WARN,
10292 			    "mptsas_get_manufacture_page5 failed!");
10293 			goto fail;
10294 		}
10295 	}
10296 
10297 	/*
10298 	 * enable events
10299 	 */
10300 	if (first_time != TRUE) {
10301 		if (mptsas_ioc_enable_event_notification(mpt)) {
10302 			goto fail;
10303 		}
10304 	}
10305 
10306 	/*
10307 	 * We need checks in attach and these.
10308 	 * chip_init is called in mult. places
10309 	 */
10310 
10311 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10312 	    DDI_SUCCESS) ||
10313 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
10314 	    DDI_SUCCESS) ||
10315 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
10316 	    DDI_SUCCESS) ||
10317 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
10318 	    DDI_SUCCESS) ||
10319 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
10320 	    DDI_SUCCESS)) {
10321 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10322 		goto fail;
10323 	}
10324 
10325 	/* Check all acc handles */
10326 	if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
10327 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10328 	    DDI_SUCCESS) ||
10329 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
10330 	    DDI_SUCCESS) ||
10331 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
10332 	    DDI_SUCCESS) ||
10333 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
10334 	    DDI_SUCCESS) ||
10335 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
10336 	    DDI_SUCCESS) ||
10337 	    (mptsas_check_acc_handle(mpt->m_config_handle) !=
10338 	    DDI_SUCCESS)) {
10339 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10340 		goto fail;
10341 	}
10342 
10343 	return (DDI_SUCCESS);
10344 
10345 fail:
10346 	return (DDI_FAILURE);
10347 }
10348 
10349 static int
10350 mptsas_init_pm(mptsas_t *mpt)
10351 {
10352 	char		pmc_name[16];
10353 	char		*pmc[] = {
10354 				NULL,
10355 				"0=Off (PCI D3 State)",
10356 				"3=On (PCI D0 State)",
10357 				NULL
10358 			};
10359 	uint16_t	pmcsr_stat;
10360 
10361 	/*
10362 	 * If power management is supported by this chip, create
10363 	 * pm-components property for the power management framework
10364 	 */
10365 	(void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
10366 	pmc[0] = pmc_name;
10367 	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
10368 	    "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
10369 		mpt->m_options &= ~MPTSAS_OPT_PM;
10370 		mptsas_log(mpt, CE_WARN,
10371 		    "mptsas%d: pm-component property creation failed.",
10372 		    mpt->m_instance);
10373 		return (DDI_FAILURE);
10374 	}
10375 
10376 	/*
10377 	 * Power on device.
10378 	 */
10379 	(void) pm_busy_component(mpt->m_dip, 0);
10380 	pmcsr_stat = pci_config_get16(mpt->m_config_handle,
10381 	    mpt->m_pmcsr_offset);
10382 	if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
10383 		mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
10384 		    mpt->m_instance);
10385 		pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
10386 		    PCI_PMCSR_D0);
10387 	}
10388 	if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
10389 		mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
10390 		return (DDI_FAILURE);
10391 	}
10392 	mpt->m_power_level = PM_LEVEL_D0;
10393 	/*
10394 	 * Set pm idle delay.
10395 	 */
10396 	mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
10397 	    mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
10398 
10399 	return (DDI_SUCCESS);
10400 }
10401 
10402 /*
10403  * mptsas_add_intrs:
10404  *
10405  * Register FIXED or MSI interrupts.
10406  */
10407 static int
10408 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
10409 {
10410 	dev_info_t	*dip = mpt->m_dip;
10411 	int		avail, actual, count = 0;
10412 	int		i, flag, ret;
10413 
10414 	NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
10415 
10416 	/* Get number of interrupts */
10417 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
10418 	if ((ret != DDI_SUCCESS) || (count <= 0)) {
10419 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
10420 		    "ret %d count %d\n", ret, count);
10421 
10422 		return (DDI_FAILURE);
10423 	}
10424 
10425 	/* Get number of available interrupts */
10426 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
10427 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
10428 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
10429 		    "ret %d avail %d\n", ret, avail);
10430 
10431 		return (DDI_FAILURE);
10432 	}
10433 
10434 	if (avail < count) {
10435 		mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
10436 		    "navail() returned %d", count, avail);
10437 	}
10438 
10439 	/* Mpt only have one interrupt routine */
10440 	if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
10441 		count = 1;
10442 	}
10443 
10444 	/* Allocate an array of interrupt handles */
10445 	mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
10446 	mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
10447 
10448 	flag = DDI_INTR_ALLOC_NORMAL;
10449 
10450 	/* call ddi_intr_alloc() */
10451 	ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
10452 	    count, &actual, flag);
10453 
10454 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
10455 		mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
10456 		    ret);
10457 		kmem_free(mpt->m_htable, mpt->m_intr_size);
10458 		return (DDI_FAILURE);
10459 	}
10460 
10461 	/* use interrupt count returned or abort? */
10462 	if (actual < count) {
10463 		mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
10464 		    count, actual);
10465 	}
10466 
10467 	mpt->m_intr_cnt = actual;
10468 
10469 	/*
10470 	 * Get priority for first msi, assume remaining are all the same
10471 	 */
10472 	if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
10473 	    &mpt->m_intr_pri)) != DDI_SUCCESS) {
10474 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
10475 
10476 		/* Free already allocated intr */
10477 		for (i = 0; i < actual; i++) {
10478 			(void) ddi_intr_free(mpt->m_htable[i]);
10479 		}
10480 
10481 		kmem_free(mpt->m_htable, mpt->m_intr_size);
10482 		return (DDI_FAILURE);
10483 	}
10484 
10485 	/* Test for high level mutex */
10486 	if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
10487 		mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
10488 		    "Hi level interrupt not supported\n");
10489 
10490 		/* Free already allocated intr */
10491 		for (i = 0; i < actual; i++) {
10492 			(void) ddi_intr_free(mpt->m_htable[i]);
10493 		}
10494 
10495 		kmem_free(mpt->m_htable, mpt->m_intr_size);
10496 		return (DDI_FAILURE);
10497 	}
10498 
10499 	/* Call ddi_intr_add_handler() */
10500 	for (i = 0; i < actual; i++) {
10501 		if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
10502 		    (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
10503 			mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
10504 			    "failed %d\n", ret);
10505 
10506 			/* Free already allocated intr */
10507 			for (i = 0; i < actual; i++) {
10508 				(void) ddi_intr_free(mpt->m_htable[i]);
10509 			}
10510 
10511 			kmem_free(mpt->m_htable, mpt->m_intr_size);
10512 			return (DDI_FAILURE);
10513 		}
10514 	}
10515 
10516 	if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
10517 	    != DDI_SUCCESS) {
10518 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
10519 
10520 		/* Free already allocated intr */
10521 		for (i = 0; i < actual; i++) {
10522 			(void) ddi_intr_free(mpt->m_htable[i]);
10523 		}
10524 
10525 		kmem_free(mpt->m_htable, mpt->m_intr_size);
10526 		return (DDI_FAILURE);
10527 	}
10528 
10529 	return (DDI_SUCCESS);
10530 }
10531 
10532 /*
10533  * mptsas_rem_intrs:
10534  *
10535  * Unregister FIXED or MSI interrupts
10536  */
10537 static void
10538 mptsas_rem_intrs(mptsas_t *mpt)
10539 {
10540 	int	i;
10541 
10542 	NDBG6(("mptsas_rem_intrs"));
10543 
10544 	/* Disable all interrupts */
10545 	if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
10546 		/* Call ddi_intr_block_disable() */
10547 		(void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
10548 	} else {
10549 		for (i = 0; i < mpt->m_intr_cnt; i++) {
10550 			(void) ddi_intr_disable(mpt->m_htable[i]);
10551 		}
10552 	}
10553 
10554 	/* Call ddi_intr_remove_handler() */
10555 	for (i = 0; i < mpt->m_intr_cnt; i++) {
10556 		(void) ddi_intr_remove_handler(mpt->m_htable[i]);
10557 		(void) ddi_intr_free(mpt->m_htable[i]);
10558 	}
10559 
10560 	kmem_free(mpt->m_htable, mpt->m_intr_size);
10561 }
10562 
10563 /*
10564  * The IO fault service error handling callback function
10565  */
10566 /*ARGSUSED*/
10567 static int
10568 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
10569 {
10570 	/*
10571 	 * as the driver can always deal with an error in any dma or
10572 	 * access handle, we can just return the fme_status value.
10573 	 */
10574 	pci_ereport_post(dip, err, NULL);
10575 	return (err->fme_status);
10576 }
10577 
10578 /*
10579  * mptsas_fm_init - initialize fma capabilities and register with IO
10580  *               fault services.
10581  */
10582 static void
10583 mptsas_fm_init(mptsas_t *mpt)
10584 {
10585 	/*
10586 	 * Need to change iblock to priority for new MSI intr
10587 	 */
10588 	ddi_iblock_cookie_t	fm_ibc;
10589 
10590 	/* Only register with IO Fault Services if we have some capability */
10591 	if (mpt->m_fm_capabilities) {
10592 		/* Adjust access and dma attributes for FMA */
10593 		mpt->m_dev_acc_attr.devacc_attr_access |= DDI_FLAGERR_ACC;
10594 		mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
10595 		mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
10596 
10597 		/*
10598 		 * Register capabilities with IO Fault Services.
10599 		 * mpt->m_fm_capabilities will be updated to indicate
10600 		 * capabilities actually supported (not requested.)
10601 		 */
10602 		ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
10603 
10604 		/*
10605 		 * Initialize pci ereport capabilities if ereport
10606 		 * capable (should always be.)
10607 		 */
10608 		if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
10609 		    DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
10610 			pci_ereport_setup(mpt->m_dip);
10611 		}
10612 
10613 		/*
10614 		 * Register error callback if error callback capable.
10615 		 */
10616 		if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
10617 			ddi_fm_handler_register(mpt->m_dip,
10618 			    mptsas_fm_error_cb, (void *) mpt);
10619 		}
10620 	}
10621 }
10622 
10623 /*
10624  * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
10625  *               fault services.
10626  *
10627  */
10628 static void
10629 mptsas_fm_fini(mptsas_t *mpt)
10630 {
10631 	/* Only unregister FMA capabilities if registered */
10632 	if (mpt->m_fm_capabilities) {
10633 
10634 		/*
10635 		 * Un-register error callback if error callback capable.
10636 		 */
10637 
10638 		if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
10639 			ddi_fm_handler_unregister(mpt->m_dip);
10640 		}
10641 
10642 		/*
10643 		 * Release any resources allocated by pci_ereport_setup()
10644 		 */
10645 
10646 		if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
10647 		    DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
10648 			pci_ereport_teardown(mpt->m_dip);
10649 		}
10650 
10651 		/* Unregister from IO Fault Services */
10652 		ddi_fm_fini(mpt->m_dip);
10653 
10654 		/* Adjust access and dma attributes for FMA */
10655 		mpt->m_dev_acc_attr.devacc_attr_access &= ~DDI_FLAGERR_ACC;
10656 		mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
10657 		mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
10658 
10659 	}
10660 }
10661 
10662 int
10663 mptsas_check_acc_handle(ddi_acc_handle_t handle)
10664 {
10665 	ddi_fm_error_t	de;
10666 
10667 	if (handle == NULL)
10668 		return (DDI_FAILURE);
10669 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
10670 	return (de.fme_status);
10671 }
10672 
10673 int
10674 mptsas_check_dma_handle(ddi_dma_handle_t handle)
10675 {
10676 	ddi_fm_error_t	de;
10677 
10678 	if (handle == NULL)
10679 		return (DDI_FAILURE);
10680 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
10681 	return (de.fme_status);
10682 }
10683 
10684 void
10685 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
10686 {
10687 	uint64_t	ena;
10688 	char		buf[FM_MAX_CLASS];
10689 
10690 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
10691 	ena = fm_ena_generate(0, FM_ENA_FMT1);
10692 	if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
10693 		ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
10694 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
10695 	}
10696 }
10697 
10698 static int
10699 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
10700     uint16_t *dev_handle, mptsas_target_t **pptgt)
10701 {
10702 	int		rval;
10703 	uint32_t	dev_info;
10704 	uint64_t	sas_wwn;
10705 	uint8_t		physport, phymask;
10706 	uint8_t		phynum, config, disk;
10707 	mptsas_slots_t	*slots = mpt->m_active;
10708 	uint64_t		devicename;
10709 	mptsas_target_t		*tmp_tgt = NULL;
10710 
10711 	ASSERT(*pptgt == NULL);
10712 
10713 	rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
10714 	    &sas_wwn, &dev_info, &physport, &phynum);
10715 	if (rval != DDI_SUCCESS) {
10716 		rval = DEV_INFO_FAIL_PAGE0;
10717 		return (rval);
10718 	}
10719 
10720 	if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
10721 	    MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
10722 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
10723 		rval = DEV_INFO_WRONG_DEVICE_TYPE;
10724 		return (rval);
10725 	}
10726 
10727 	/*
10728 	 * Get SATA Device Name from SAS device page0 for
10729 	 * sata device, if device name doesn't exist, set m_sas_wwn to
10730 	 * 0 for direct attached SATA. For the device behind the expander
10731 	 * we still can use STP address assigned by expander.
10732 	 */
10733 	if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
10734 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
10735 		mutex_exit(&mpt->m_mutex);
10736 		/* alloc a tmp_tgt to send the cmd */
10737 		tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
10738 		    KM_SLEEP);
10739 		tmp_tgt->m_devhdl = *dev_handle;
10740 		tmp_tgt->m_deviceinfo = dev_info;
10741 		tmp_tgt->m_qfull_retries = QFULL_RETRIES;
10742 		tmp_tgt->m_qfull_retry_interval =
10743 		    drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
10744 		tmp_tgt->m_t_throttle = MAX_THROTTLE;
10745 		devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
10746 		kmem_free(tmp_tgt, sizeof (struct mptsas_target));
10747 		mutex_enter(&mpt->m_mutex);
10748 		if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
10749 			sas_wwn = devicename;
10750 		} else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
10751 			sas_wwn = 0;
10752 		}
10753 	}
10754 
10755 	/*
10756 	 * Check if the dev handle is for a Phys Disk. If so, set return value
10757 	 * and exit.  Don't add Phys Disks to hash.
10758 	 */
10759 	for (config = 0; config < slots->m_num_raid_configs; config++) {
10760 		for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
10761 			if (*dev_handle == slots->m_raidconfig[config].
10762 			    m_physdisk_devhdl[disk]) {
10763 				rval = DEV_INFO_PHYS_DISK;
10764 				return (rval);
10765 			}
10766 		}
10767 	}
10768 
10769 	phymask = mptsas_physport_to_phymask(mpt, physport);
10770 	*pptgt = mptsas_tgt_alloc(&slots->m_tgttbl, *dev_handle, sas_wwn,
10771 	    dev_info, phymask, phynum);
10772 	if (*pptgt == NULL) {
10773 		mptsas_log(mpt, CE_WARN, "Failed to allocated target"
10774 		    "structure!");
10775 		rval = DEV_INFO_FAIL_ALLOC;
10776 		return (rval);
10777 	}
10778 	return (DEV_INFO_SUCCESS);
10779 }
10780 
10781 uint64_t
10782 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
10783 {
10784 	uint64_t	sata_guid = 0, *pwwn = NULL;
10785 	int		target = ptgt->m_devhdl;
10786 	uchar_t		*inq83 = NULL;
10787 	int		inq83_len = 0xFF;
10788 	uchar_t		*dblk = NULL;
10789 	int		inq83_retry = 3;
10790 	int		rval = DDI_FAILURE;
10791 
10792 	inq83	= kmem_zalloc(inq83_len, KM_SLEEP);
10793 
10794 inq83_retry:
10795 	rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
10796 	    inq83_len, NULL, 1);
10797 	if (rval != DDI_SUCCESS) {
10798 		mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
10799 		    "0x83 for target:%x, lun:%x failed!", target, lun);
10800 		goto out;
10801 	}
10802 	/* According to SAT2, the first descriptor is logic unit name */
10803 	dblk = &inq83[4];
10804 	if ((dblk[1] & 0x30) != 0) {
10805 		mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
10806 		goto out;
10807 	}
10808 	pwwn = (uint64_t *)(void *)(&dblk[4]);
10809 	if ((dblk[4] & 0xf0) == 0x50) {
10810 		sata_guid = BE_64(*pwwn);
10811 		goto out;
10812 	} else if (dblk[4] == 'A') {
10813 		NDBG20(("SATA drive has no NAA format GUID."));
10814 		goto out;
10815 	} else {
10816 		/* The data is not ready, wait and retry */
10817 		inq83_retry--;
10818 		if (inq83_retry <= 0) {
10819 			goto out;
10820 		}
10821 		NDBG20(("The GUID is not ready, retry..."));
10822 		delay(1 * drv_usectohz(1000000));
10823 		goto inq83_retry;
10824 	}
10825 out:
10826 	kmem_free(inq83, inq83_len);
10827 	return (sata_guid);
10828 }
10829 static int
10830 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
10831     unsigned char *buf, int len, int *reallen, uchar_t evpd)
10832 {
10833 	uchar_t			cdb[CDB_GROUP0];
10834 	struct scsi_address	ap;
10835 	struct buf		*data_bp = NULL;
10836 	int			resid = 0;
10837 	int			ret = DDI_FAILURE;
10838 
10839 	ASSERT(len <= 0xffff);
10840 
10841 	ap.a_target = MPTSAS_INVALID_DEVHDL;
10842 	ap.a_lun = (uchar_t)(lun);
10843 	ap.a_hba_tran = mpt->m_tran;
10844 
10845 	data_bp = scsi_alloc_consistent_buf(&ap,
10846 	    (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
10847 	if (data_bp == NULL) {
10848 		return (ret);
10849 	}
10850 	bzero(cdb, CDB_GROUP0);
10851 	cdb[0] = SCMD_INQUIRY;
10852 	cdb[1] = evpd;
10853 	cdb[2] = page;
10854 	cdb[3] = (len & 0xff00) >> 8;
10855 	cdb[4] = (len & 0x00ff);
10856 	cdb[5] = 0;
10857 
10858 	ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
10859 	    &resid);
10860 	if (ret == DDI_SUCCESS) {
10861 		if (reallen) {
10862 			*reallen = len - resid;
10863 		}
10864 		bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
10865 	}
10866 	if (data_bp) {
10867 		scsi_free_consistent_buf(data_bp);
10868 	}
10869 	return (ret);
10870 }
10871 
10872 static int
10873 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
10874     mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
10875     int *resid)
10876 {
10877 	struct scsi_pkt		*pktp = NULL;
10878 	scsi_hba_tran_t		*tran_clone = NULL;
10879 	mptsas_tgt_private_t	*tgt_private = NULL;
10880 	int			ret = DDI_FAILURE;
10881 
10882 	/*
10883 	 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
10884 	 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
10885 	 * to simulate the cmds from sd
10886 	 */
10887 	tran_clone = kmem_alloc(
10888 	    sizeof (scsi_hba_tran_t), KM_SLEEP);
10889 	if (tran_clone == NULL) {
10890 		goto out;
10891 	}
10892 	bcopy((caddr_t)mpt->m_tran,
10893 	    (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
10894 	tgt_private = kmem_alloc(
10895 	    sizeof (mptsas_tgt_private_t), KM_SLEEP);
10896 	if (tgt_private == NULL) {
10897 		goto out;
10898 	}
10899 	tgt_private->t_lun = ap->a_lun;
10900 	tgt_private->t_private = ptgt;
10901 	tran_clone->tran_tgt_private = tgt_private;
10902 	ap->a_hba_tran = tran_clone;
10903 
10904 	pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
10905 	    data_bp, cdblen, sizeof (struct scsi_arq_status),
10906 	    0, PKT_CONSISTENT, NULL, NULL);
10907 	if (pktp == NULL) {
10908 		goto out;
10909 	}
10910 	bcopy(cdb, pktp->pkt_cdbp, cdblen);
10911 	pktp->pkt_flags = FLAG_NOPARITY;
10912 	if (scsi_poll(pktp) < 0) {
10913 		goto out;
10914 	}
10915 	if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
10916 		goto out;
10917 	}
10918 	if (resid != NULL) {
10919 		*resid = pktp->pkt_resid;
10920 	}
10921 
10922 	ret = DDI_SUCCESS;
10923 out:
10924 	if (pktp) {
10925 		scsi_destroy_pkt(pktp);
10926 	}
10927 	if (tran_clone) {
10928 		kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
10929 	}
10930 	if (tgt_private) {
10931 		kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
10932 	}
10933 	return (ret);
10934 }
10935 static int
10936 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
10937 {
10938 	char	*cp = NULL;
10939 	char	*ptr = NULL;
10940 	size_t	s = 0;
10941 	char	*wwid_str = NULL;
10942 	char	*lun_str = NULL;
10943 	long	lunnum;
10944 	long	phyid = -1;
10945 	int	rc = DDI_FAILURE;
10946 
10947 	ptr = name;
10948 	ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
10949 	ptr++;
10950 	if ((cp = strchr(ptr, ',')) == NULL) {
10951 		return (DDI_FAILURE);
10952 	}
10953 
10954 	wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
10955 	s = (uintptr_t)cp - (uintptr_t)ptr;
10956 
10957 	bcopy(ptr, wwid_str, s);
10958 	wwid_str[s] = '\0';
10959 
10960 	ptr = ++cp;
10961 
10962 	if ((cp = strchr(ptr, '\0')) == NULL) {
10963 		goto out;
10964 	}
10965 	lun_str =  kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
10966 	s = (uintptr_t)cp - (uintptr_t)ptr;
10967 
10968 	bcopy(ptr, lun_str, s);
10969 	lun_str[s] = '\0';
10970 
10971 	if (name[0] == 'p') {
10972 		rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
10973 	} else {
10974 		rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
10975 	}
10976 	if (rc != DDI_SUCCESS)
10977 		goto out;
10978 
10979 	if (phyid != -1) {
10980 		ASSERT(phyid < 8);
10981 		*phy = (uint8_t)phyid;
10982 	}
10983 	rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
10984 	if (rc != 0)
10985 		goto out;
10986 
10987 	*lun = (int)lunnum;
10988 	rc = DDI_SUCCESS;
10989 out:
10990 	if (wwid_str)
10991 		kmem_free(wwid_str, SCSI_MAXNAMELEN);
10992 	if (lun_str)
10993 		kmem_free(lun_str, SCSI_MAXNAMELEN);
10994 
10995 	return (rc);
10996 }
10997 
10998 /*
10999  * mptsas_parse_smp_name() is to parse sas wwn string
11000  * which format is "wWWN"
11001  */
11002 static int
11003 mptsas_parse_smp_name(char *name, uint64_t *wwn)
11004 {
11005 	char	*ptr = name;
11006 
11007 	if (*ptr != 'w') {
11008 		return (DDI_FAILURE);
11009 	}
11010 
11011 	ptr++;
11012 	if (scsi_wwnstr_to_wwn(ptr, wwn)) {
11013 		return (DDI_FAILURE);
11014 	}
11015 	return (DDI_SUCCESS);
11016 }
11017 
11018 static int
11019 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
11020     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
11021 {
11022 	int		ret = NDI_FAILURE;
11023 	int		circ = 0;
11024 	int		circ1 = 0;
11025 	mptsas_t	*mpt;
11026 	char		*ptr = NULL;
11027 	char		*devnm = NULL;
11028 	uint64_t	wwid = 0;
11029 	uint8_t		phy = 0xFF;
11030 	int		lun = 0;
11031 	uint_t		mflags = flag;
11032 
11033 	if (scsi_hba_iport_unit_address(pdip) == 0) {
11034 		return (DDI_FAILURE);
11035 	}
11036 
11037 	mpt = DIP2MPT(pdip);
11038 	if (!mpt) {
11039 		return (DDI_FAILURE);
11040 	}
11041 
11042 	/*
11043 	 * Hold the nexus across the bus_config
11044 	 */
11045 	ndi_devi_enter(scsi_vhci_dip, &circ);
11046 	ndi_devi_enter(pdip, &circ1);
11047 	switch (op) {
11048 	case BUS_CONFIG_ONE:
11049 		/* parse wwid/target name out of name given */
11050 		if ((ptr = strchr((char *)arg, '@')) == NULL) {
11051 			ret = NDI_FAILURE;
11052 			break;
11053 		}
11054 		ptr++;
11055 		if (strncmp((char *)arg, "smp", 3) == 0) {
11056 			/*
11057 			 * This is a SMP target device
11058 			 */
11059 			ret = mptsas_parse_smp_name(ptr, &wwid);
11060 			if (ret != DDI_SUCCESS) {
11061 				ret = NDI_FAILURE;
11062 				break;
11063 			}
11064 			ret = mptsas_config_smp(pdip, wwid, childp);
11065 		} else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
11066 			/*
11067 			 * OBP could pass down a non-canonical form
11068 			 * bootpath without LUN part when LUN is 0.
11069 			 * So driver need adjust the string.
11070 			 */
11071 			if (strchr(ptr, ',') == NULL) {
11072 				devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
11073 				(void) sprintf(devnm, "%s,0", (char *)arg);
11074 				ptr = strchr(devnm, '@');
11075 				ptr++;
11076 			}
11077 
11078 			/*
11079 			 * The device path is wWWID format and the device
11080 			 * is not SMP target device.
11081 			 */
11082 			ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
11083 			if (ret != DDI_SUCCESS) {
11084 				ret = NDI_FAILURE;
11085 				break;
11086 			}
11087 			if (ptr[0] == 'w') {
11088 				ret = mptsas_config_one_addr(pdip, wwid,
11089 				    lun, childp);
11090 			} else if (ptr[0] == 'p') {
11091 				ret = mptsas_config_one_phy(pdip, phy, lun,
11092 				    childp);
11093 			}
11094 		} else {
11095 			ret = NDI_FAILURE;
11096 			break;
11097 		}
11098 
11099 		/*
11100 		 * DDI group instructed us to use this flag.
11101 		 */
11102 		mflags |= NDI_MDI_FALLBACK;
11103 		break;
11104 	case BUS_CONFIG_DRIVER:
11105 	case BUS_CONFIG_ALL:
11106 		mptsas_config_all(pdip);
11107 		ret = NDI_SUCCESS;
11108 		break;
11109 	}
11110 
11111 	if (ret == NDI_SUCCESS) {
11112 		ret = ndi_busop_bus_config(pdip, mflags, op,
11113 		    (devnm == NULL) ? arg : devnm, childp, 0);
11114 	}
11115 
11116 	ndi_devi_exit(pdip, circ1);
11117 	ndi_devi_exit(scsi_vhci_dip, circ);
11118 	if (devnm != NULL)
11119 		kmem_free(devnm, SCSI_MAXNAMELEN);
11120 	return (ret);
11121 }
11122 
11123 static int
11124 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
11125     mptsas_target_t *ptgt)
11126 {
11127 	int			rval = DDI_FAILURE;
11128 	struct scsi_inquiry	*sd_inq = NULL;
11129 	mptsas_t		*mpt = DIP2MPT(pdip);
11130 
11131 	sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
11132 
11133 	rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
11134 	    SUN_INQSIZE, 0, (uchar_t)0);
11135 
11136 	if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
11137 		rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
11138 	} else {
11139 		rval = DDI_FAILURE;
11140 	}
11141 out:
11142 	kmem_free(sd_inq, SUN_INQSIZE);
11143 	return (rval);
11144 }
11145 
11146 static int
11147 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
11148     dev_info_t **lundip)
11149 {
11150 	int		rval;
11151 	mptsas_t		*mpt = DIP2MPT(pdip);
11152 	int		phymask;
11153 	mptsas_target_t	*ptgt = NULL;
11154 
11155 	/*
11156 	 * Get the physical port associated to the iport
11157 	 */
11158 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
11159 	    "phymask", 0);
11160 
11161 	ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
11162 	if (ptgt == NULL) {
11163 		/*
11164 		 * didn't match any device by searching
11165 		 */
11166 		return (DDI_FAILURE);
11167 	}
11168 	/*
11169 	 * If the LUN already exists and the status is online,
11170 	 * we just return the pointer to dev_info_t directly.
11171 	 * For the mdi_pathinfo node, we'll handle it in
11172 	 * mptsas_create_virt_lun()
11173 	 * TODO should be also in mptsas_handle_dr
11174 	 */
11175 
11176 	*lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
11177 	if (*lundip != NULL) {
11178 		/*
11179 		 * TODO Another senario is, we hotplug the same disk
11180 		 * on the same slot, the devhdl changed, is this
11181 		 * possible?
11182 		 * tgt_private->t_private != ptgt
11183 		 */
11184 		if (sasaddr != ptgt->m_sas_wwn) {
11185 			/*
11186 			 * The device has changed although the devhdl is the
11187 			 * same (Enclosure mapping mode, change drive on the
11188 			 * same slot)
11189 			 */
11190 			return (DDI_FAILURE);
11191 		}
11192 		return (DDI_SUCCESS);
11193 	}
11194 
11195 	if (phymask == 0) {
11196 		/*
11197 		 * Configure IR volume
11198 		 */
11199 		rval =  mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
11200 		return (rval);
11201 	}
11202 	rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
11203 
11204 	return (rval);
11205 }
11206 
11207 static int
11208 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
11209     dev_info_t **lundip)
11210 {
11211 	int		rval;
11212 	mptsas_target_t	*ptgt = NULL;
11213 
11214 	ptgt = mptsas_phy_to_tgt(pdip, phy);
11215 	if (ptgt == NULL) {
11216 		/*
11217 		 * didn't match any device by searching
11218 		 */
11219 		return (DDI_FAILURE);
11220 	}
11221 
11222 	/*
11223 	 * If the LUN already exists and the status is online,
11224 	 * we just return the pointer to dev_info_t directly.
11225 	 * For the mdi_pathinfo node, we'll handle it in
11226 	 * mptsas_create_virt_lun().
11227 	 */
11228 
11229 	*lundip = mptsas_find_child_phy(pdip, phy);
11230 	if (*lundip != NULL) {
11231 		return (DDI_SUCCESS);
11232 	}
11233 
11234 	rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
11235 
11236 	return (rval);
11237 }
11238 
11239 static int
11240 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
11241     uint8_t *lun_addr_type)
11242 {
11243 	uint32_t	lun_idx = 0;
11244 
11245 	ASSERT(lun_num != NULL);
11246 	ASSERT(lun_addr_type != NULL);
11247 
11248 	lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
11249 	/* determine report luns addressing type */
11250 	switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
11251 		/*
11252 		 * Vendors in the field have been found to be concatenating
11253 		 * bus/target/lun to equal the complete lun value instead
11254 		 * of switching to flat space addressing
11255 		 */
11256 		/* 00b - peripheral device addressing method */
11257 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
11258 		/* FALLTHRU */
11259 		/* 10b - logical unit addressing method */
11260 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
11261 		/* FALLTHRU */
11262 		/* 01b - flat space addressing method */
11263 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
11264 		/* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
11265 		*lun_addr_type = (buf[lun_idx] &
11266 		    MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
11267 		*lun_num = (buf[lun_idx] & 0x3F) << 8;
11268 		*lun_num |= buf[lun_idx + 1];
11269 		return (DDI_SUCCESS);
11270 	default:
11271 		return (DDI_FAILURE);
11272 	}
11273 }
11274 
11275 static int
11276 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
11277 {
11278 	struct buf		*repluns_bp = NULL;
11279 	struct scsi_address	ap;
11280 	uchar_t			cdb[CDB_GROUP5];
11281 	int			ret = DDI_FAILURE;
11282 	int			retry = 0;
11283 	int			lun_list_len = 0;
11284 	uint16_t		lun_num = 0;
11285 	uint8_t			lun_addr_type = 0;
11286 	uint32_t		lun_cnt = 0;
11287 	uint32_t		lun_total = 0;
11288 	dev_info_t		*cdip = NULL;
11289 	uint16_t		*saved_repluns = NULL;
11290 	char			*buffer = NULL;
11291 	int			buf_len = 128;
11292 	mptsas_t		*mpt = DIP2MPT(pdip);
11293 	uint64_t		sas_wwn = 0;
11294 	uint8_t			phy = 0xFF;
11295 	uint32_t		dev_info = 0;
11296 
11297 	mutex_enter(&mpt->m_mutex);
11298 	sas_wwn = ptgt->m_sas_wwn;
11299 	phy = ptgt->m_phynum;
11300 	dev_info = ptgt->m_deviceinfo;
11301 	mutex_exit(&mpt->m_mutex);
11302 
11303 	if (sas_wwn == 0) {
11304 		/*
11305 		 * It's a SATA without Device Name
11306 		 * So don't try multi-LUNs
11307 		 */
11308 		if (mptsas_find_child_phy(pdip, phy)) {
11309 			return (DDI_SUCCESS);
11310 		} else {
11311 			/*
11312 			 * need configure and create node
11313 			 */
11314 			return (DDI_FAILURE);
11315 		}
11316 	}
11317 
11318 	/*
11319 	 * WWN (SAS address or Device Name exist)
11320 	 */
11321 	if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
11322 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
11323 		/*
11324 		 * SATA device with Device Name
11325 		 * So don't try multi-LUNs
11326 		 */
11327 		if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
11328 			return (DDI_SUCCESS);
11329 		} else {
11330 			return (DDI_FAILURE);
11331 		}
11332 	}
11333 
11334 	do {
11335 		ap.a_target = MPTSAS_INVALID_DEVHDL;
11336 		ap.a_lun = 0;
11337 		ap.a_hba_tran = mpt->m_tran;
11338 		repluns_bp = scsi_alloc_consistent_buf(&ap,
11339 		    (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
11340 		if (repluns_bp == NULL) {
11341 			retry++;
11342 			continue;
11343 		}
11344 		bzero(cdb, CDB_GROUP5);
11345 		cdb[0] = SCMD_REPORT_LUNS;
11346 		cdb[6] = (buf_len & 0xff000000) >> 24;
11347 		cdb[7] = (buf_len & 0x00ff0000) >> 16;
11348 		cdb[8] = (buf_len & 0x0000ff00) >> 8;
11349 		cdb[9] = (buf_len & 0x000000ff);
11350 
11351 		ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
11352 		    repluns_bp, NULL);
11353 		if (ret != DDI_SUCCESS) {
11354 			scsi_free_consistent_buf(repluns_bp);
11355 			retry++;
11356 			continue;
11357 		}
11358 		lun_list_len = BE_32(*(int *)((void *)(
11359 		    repluns_bp->b_un.b_addr)));
11360 		if (buf_len >= lun_list_len + 8) {
11361 			ret = DDI_SUCCESS;
11362 			break;
11363 		}
11364 		scsi_free_consistent_buf(repluns_bp);
11365 		buf_len = lun_list_len + 8;
11366 
11367 	} while (retry < 3);
11368 
11369 	if (ret != DDI_SUCCESS)
11370 		return (ret);
11371 	buffer = (char *)repluns_bp->b_un.b_addr;
11372 	/*
11373 	 * find out the number of luns returned by the SCSI ReportLun call
11374 	 * and allocate buffer space
11375 	 */
11376 	lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
11377 	saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
11378 	if (saved_repluns == NULL) {
11379 		scsi_free_consistent_buf(repluns_bp);
11380 		return (DDI_FAILURE);
11381 	}
11382 	for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
11383 		if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
11384 		    &lun_num, &lun_addr_type) != DDI_SUCCESS) {
11385 			continue;
11386 		}
11387 		saved_repluns[lun_cnt] = lun_num;
11388 		if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
11389 			ret = DDI_SUCCESS;
11390 		else
11391 			ret = mptsas_probe_lun(pdip, lun_num, &cdip,
11392 			    ptgt);
11393 		if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
11394 			(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
11395 			    MPTSAS_DEV_GONE);
11396 		}
11397 	}
11398 	mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
11399 	kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
11400 	scsi_free_consistent_buf(repluns_bp);
11401 	return (DDI_SUCCESS);
11402 }
11403 
11404 static int
11405 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
11406 {
11407 	int			rval = DDI_FAILURE;
11408 	struct scsi_inquiry	*sd_inq = NULL;
11409 	mptsas_t		*mpt = DIP2MPT(pdip);
11410 	mptsas_target_t		*ptgt = NULL;
11411 
11412 	mutex_enter(&mpt->m_mutex);
11413 	ptgt = mptsas_search_by_devhdl(&mpt->m_active->m_tgttbl, target);
11414 	mutex_exit(&mpt->m_mutex);
11415 	if (ptgt == NULL) {
11416 		mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
11417 		    "not found.", target);
11418 		return (rval);
11419 	}
11420 
11421 	sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
11422 	rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
11423 	    SUN_INQSIZE, 0, (uchar_t)0);
11424 
11425 	if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
11426 		rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
11427 		    0);
11428 	} else {
11429 		rval = DDI_FAILURE;
11430 	}
11431 
11432 out:
11433 	kmem_free(sd_inq, SUN_INQSIZE);
11434 	return (rval);
11435 }
11436 
11437 /*
11438  * configure all RAID volumes for virtual iport
11439  */
11440 static void
11441 mptsas_config_all_viport(dev_info_t *pdip)
11442 {
11443 	mptsas_t	*mpt = DIP2MPT(pdip);
11444 	int		config, vol;
11445 	int		target;
11446 	dev_info_t	*lundip = NULL;
11447 	mptsas_slots_t	*slots = mpt->m_active;
11448 
11449 	/*
11450 	 * Get latest RAID info and search for any Volume DevHandles.  If any
11451 	 * are found, configure the volume.
11452 	 */
11453 	mutex_enter(&mpt->m_mutex);
11454 	for (config = 0; config < slots->m_num_raid_configs; config++) {
11455 		for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
11456 			if (slots->m_raidconfig[config].m_raidvol[vol].m_israid
11457 			    == 1) {
11458 				target = slots->m_raidconfig[config].
11459 				    m_raidvol[vol].m_raidhandle;
11460 				mutex_exit(&mpt->m_mutex);
11461 				(void) mptsas_config_raid(pdip, target,
11462 				    &lundip);
11463 				mutex_enter(&mpt->m_mutex);
11464 			}
11465 		}
11466 	}
11467 	mutex_exit(&mpt->m_mutex);
11468 }
11469 
11470 static void
11471 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
11472     int lun_cnt, mptsas_target_t *ptgt)
11473 {
11474 	dev_info_t	*child = NULL, *savechild = NULL;
11475 	mdi_pathinfo_t	*pip = NULL, *savepip = NULL;
11476 	uint64_t	sas_wwn, wwid;
11477 	uint8_t		phy;
11478 	int		lun;
11479 	int		i;
11480 	int		find;
11481 	char		*addr;
11482 	char		*nodename;
11483 	mptsas_t	*mpt = DIP2MPT(pdip);
11484 
11485 	mutex_enter(&mpt->m_mutex);
11486 	wwid = ptgt->m_sas_wwn;
11487 	mutex_exit(&mpt->m_mutex);
11488 
11489 	child = ddi_get_child(pdip);
11490 	while (child) {
11491 		find = 0;
11492 		savechild = child;
11493 		child = ddi_get_next_sibling(child);
11494 
11495 		nodename = ddi_node_name(savechild);
11496 		if (strcmp(nodename, "smp") == 0) {
11497 			continue;
11498 		}
11499 
11500 		addr = ddi_get_name_addr(savechild);
11501 		if (addr == NULL) {
11502 			continue;
11503 		}
11504 
11505 		if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
11506 		    DDI_SUCCESS) {
11507 			continue;
11508 		}
11509 
11510 		if (wwid == sas_wwn) {
11511 			for (i = 0; i < lun_cnt; i++) {
11512 				if (repluns[i] == lun) {
11513 					find = 1;
11514 					break;
11515 				}
11516 			}
11517 		} else {
11518 			continue;
11519 		}
11520 		if (find == 0) {
11521 			/*
11522 			 * The lun has not been there already
11523 			 */
11524 			(void) mptsas_offline_lun(pdip, savechild, NULL,
11525 			    NDI_DEVI_REMOVE);
11526 		}
11527 	}
11528 
11529 	pip = mdi_get_next_client_path(pdip, NULL);
11530 	while (pip) {
11531 		find = 0;
11532 		savepip = pip;
11533 		addr = MDI_PI(pip)->pi_addr;
11534 
11535 		pip = mdi_get_next_client_path(pdip, pip);
11536 
11537 		if (addr == NULL) {
11538 			continue;
11539 		}
11540 
11541 		if (mptsas_parse_address(addr, &sas_wwn, &phy,
11542 		    &lun) != DDI_SUCCESS) {
11543 			continue;
11544 		}
11545 
11546 		if (sas_wwn == wwid) {
11547 			for (i = 0; i < lun_cnt; i++) {
11548 				if (repluns[i] == lun) {
11549 					find = 1;
11550 					break;
11551 				}
11552 			}
11553 		} else {
11554 			continue;
11555 		}
11556 
11557 		if (find == 0) {
11558 			/*
11559 			 * The lun has not been there already
11560 			 */
11561 			(void) mptsas_offline_lun(pdip, NULL, savepip,
11562 			    NDI_DEVI_REMOVE);
11563 		}
11564 	}
11565 }
11566 
11567 void
11568 mptsas_update_hashtab(struct mptsas *mpt)
11569 {
11570 	uint32_t	page_address;
11571 	int		rval = 0;
11572 	uint16_t	dev_handle;
11573 	mptsas_target_t	*ptgt = NULL;
11574 	mptsas_smp_t	smp_node;
11575 
11576 	/*
11577 	 * Get latest RAID info.
11578 	 */
11579 	(void) mptsas_get_raid_info(mpt);
11580 
11581 	dev_handle = mpt->m_smp_devhdl;
11582 	for (; mpt->m_done_traverse_smp == 0; ) {
11583 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
11584 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
11585 		if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
11586 		    != DDI_SUCCESS) {
11587 			break;
11588 		}
11589 		mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
11590 		(void) mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
11591 	}
11592 
11593 	/*
11594 	 * Config target devices
11595 	 */
11596 	dev_handle = mpt->m_dev_handle;
11597 
11598 	/*
11599 	 * Do loop to get sas device page 0 by GetNextHandle till the
11600 	 * the last handle. If the sas device is a SATA/SSP target,
11601 	 * we try to config it.
11602 	 */
11603 	for (; mpt->m_done_traverse_dev == 0; ) {
11604 		ptgt = NULL;
11605 		page_address =
11606 		    (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
11607 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
11608 		    (uint32_t)dev_handle;
11609 		rval = mptsas_get_target_device_info(mpt, page_address,
11610 		    &dev_handle, &ptgt);
11611 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
11612 		    (rval == DEV_INFO_FAIL_ALLOC)) {
11613 			break;
11614 		}
11615 
11616 		mpt->m_dev_handle = dev_handle;
11617 	}
11618 
11619 }
11620 
11621 void
11622 mptsas_invalid_hashtab(mptsas_hash_table_t *hashtab)
11623 {
11624 	mptsas_hash_data_t *data;
11625 	data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
11626 	while (data != NULL) {
11627 		data->devhdl = MPTSAS_INVALID_DEVHDL;
11628 		data->device_info = 0;
11629 		/*
11630 		 * For tgttbl, clear dr_flag.
11631 		 */
11632 		data->dr_flag = MPTSAS_DR_INACTIVE;
11633 		data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
11634 	}
11635 }
11636 
11637 void
11638 mptsas_update_driver_data(struct mptsas *mpt)
11639 {
11640 	/*
11641 	 * TODO after hard reset, update the driver data structures
11642 	 * 1. update port/phymask mapping table mpt->m_phy_info
11643 	 * 2. invalid all the entries in hash table
11644 	 *    m_devhdl = 0xffff and m_deviceinfo = 0
11645 	 * 3. call sas_device_page/expander_page to update hash table
11646 	 */
11647 	mptsas_update_phymask(mpt);
11648 	/*
11649 	 * Invalid the existing entries
11650 	 */
11651 	mptsas_invalid_hashtab(&mpt->m_active->m_tgttbl);
11652 	mptsas_invalid_hashtab(&mpt->m_active->m_smptbl);
11653 	mpt->m_done_traverse_dev = 0;
11654 	mpt->m_done_traverse_smp = 0;
11655 	mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
11656 	mptsas_update_hashtab(mpt);
11657 }
11658 
11659 static void
11660 mptsas_config_all(dev_info_t *pdip)
11661 {
11662 	dev_info_t	*smpdip = NULL;
11663 	mptsas_t	*mpt = DIP2MPT(pdip);
11664 	int		phymask = 0;
11665 	uint8_t		phy_mask;
11666 	mptsas_target_t	*ptgt = NULL;
11667 	mptsas_smp_t	*psmp;
11668 
11669 	/*
11670 	 * Get the phymask associated to the iport
11671 	 */
11672 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
11673 	    "phymask", 0);
11674 
11675 	/*
11676 	 * Enumerate RAID volumes here (phymask == 0).
11677 	 */
11678 	if (phymask == 0) {
11679 		mptsas_config_all_viport(pdip);
11680 		return;
11681 	}
11682 
11683 	mutex_enter(&mpt->m_mutex);
11684 
11685 	if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
11686 		mptsas_update_hashtab(mpt);
11687 	}
11688 
11689 	psmp = (mptsas_smp_t *)mptsas_hash_traverse(&mpt->m_active->m_smptbl,
11690 	    MPTSAS_HASH_FIRST);
11691 	while (psmp != NULL) {
11692 		phy_mask = psmp->m_phymask;
11693 		if (phy_mask == phymask) {
11694 			smpdip = NULL;
11695 			mutex_exit(&mpt->m_mutex);
11696 			(void) mptsas_online_smp(pdip, psmp, &smpdip);
11697 			mutex_enter(&mpt->m_mutex);
11698 		}
11699 		psmp = (mptsas_smp_t *)mptsas_hash_traverse(
11700 		    &mpt->m_active->m_smptbl, MPTSAS_HASH_NEXT);
11701 	}
11702 
11703 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11704 	    MPTSAS_HASH_FIRST);
11705 	while (ptgt != NULL) {
11706 		phy_mask = ptgt->m_phymask;
11707 		if (phy_mask == phymask) {
11708 			mutex_exit(&mpt->m_mutex);
11709 			(void) mptsas_config_target(pdip, ptgt);
11710 			mutex_enter(&mpt->m_mutex);
11711 		}
11712 
11713 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11714 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11715 	}
11716 	mutex_exit(&mpt->m_mutex);
11717 }
11718 
11719 static int
11720 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
11721 {
11722 	int		rval = DDI_FAILURE;
11723 	dev_info_t	*tdip;
11724 
11725 	rval = mptsas_config_luns(pdip, ptgt);
11726 	if (rval != DDI_SUCCESS) {
11727 		/*
11728 		 * The return value means the SCMD_REPORT_LUNS
11729 		 * did not execute successfully. The target maybe
11730 		 * doesn't support such command.
11731 		 */
11732 		rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
11733 	}
11734 	return (rval);
11735 }
11736 
11737 /*
11738  * Return fail if not all the childs/paths are freed.
11739  * if there is any path under the HBA, the return value will be always fail
11740  * because we didn't call mdi_pi_free for path
11741  */
11742 static int
11743 mptsas_offline_target(dev_info_t *pdip, char *name)
11744 {
11745 	dev_info_t		*child = NULL, *prechild = NULL;
11746 	mdi_pathinfo_t		*pip = NULL, *savepip = NULL;
11747 	int			tmp_rval, rval = DDI_SUCCESS;
11748 	char			*addr, *cp;
11749 	size_t			s;
11750 	mptsas_t		*mpt = DIP2MPT(pdip);
11751 
11752 	child = ddi_get_child(pdip);
11753 	while (child) {
11754 		addr = ddi_get_name_addr(child);
11755 		prechild = child;
11756 		child = ddi_get_next_sibling(child);
11757 
11758 		if (addr == NULL) {
11759 			continue;
11760 		}
11761 		if ((cp = strchr(addr, ',')) == NULL) {
11762 			continue;
11763 		}
11764 
11765 		s = (uintptr_t)cp - (uintptr_t)addr;
11766 
11767 		if (strncmp(addr, name, s) != 0) {
11768 			continue;
11769 		}
11770 
11771 		tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
11772 		    NDI_DEVI_REMOVE);
11773 		if (tmp_rval != DDI_SUCCESS) {
11774 			rval = DDI_FAILURE;
11775 			if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
11776 			    prechild, MPTSAS_DEV_GONE) !=
11777 			    DDI_PROP_SUCCESS) {
11778 				mptsas_log(mpt, CE_WARN, "mptsas driver "
11779 				    "unable to create property for "
11780 				    "SAS %s (MPTSAS_DEV_GONE)", addr);
11781 			}
11782 		}
11783 	}
11784 
11785 	pip = mdi_get_next_client_path(pdip, NULL);
11786 	while (pip) {
11787 		addr = MDI_PI(pip)->pi_addr;
11788 		savepip = pip;
11789 		pip = mdi_get_next_client_path(pdip, pip);
11790 		if (addr == NULL) {
11791 			continue;
11792 		}
11793 
11794 		if ((cp = strchr(addr, ',')) == NULL) {
11795 			continue;
11796 		}
11797 
11798 		s = (uintptr_t)cp - (uintptr_t)addr;
11799 
11800 		if (strncmp(addr, name, s) != 0) {
11801 			continue;
11802 		}
11803 
11804 		(void) mptsas_offline_lun(pdip, NULL, savepip,
11805 		    NDI_DEVI_REMOVE);
11806 		/*
11807 		 * driver will not invoke mdi_pi_free, so path will not
11808 		 * be freed forever, return DDI_FAILURE.
11809 		 */
11810 		rval = DDI_FAILURE;
11811 	}
11812 	return (rval);
11813 }
11814 
11815 static int
11816 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
11817     mdi_pathinfo_t *rpip, uint_t flags)
11818 {
11819 	int		rval = DDI_FAILURE;
11820 	char		*devname;
11821 	dev_info_t	*cdip, *parent;
11822 
11823 	if (rpip != NULL) {
11824 		parent = scsi_vhci_dip;
11825 		cdip = mdi_pi_get_client(rpip);
11826 	} else if (rdip != NULL) {
11827 		parent = pdip;
11828 		cdip = rdip;
11829 	} else {
11830 		return (DDI_FAILURE);
11831 	}
11832 
11833 	/*
11834 	 * Make sure node is attached otherwise
11835 	 * it won't have related cache nodes to
11836 	 * clean up.  i_ddi_devi_attached is
11837 	 * similiar to i_ddi_node_state(cdip) >=
11838 	 * DS_ATTACHED.
11839 	 */
11840 	if (i_ddi_devi_attached(cdip)) {
11841 
11842 		/* Get full devname */
11843 		devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
11844 		(void) ddi_deviname(cdip, devname);
11845 		/* Clean cache */
11846 		(void) devfs_clean(parent, devname + 1,
11847 		    DV_CLEAN_FORCE);
11848 		kmem_free(devname, MAXNAMELEN + 1);
11849 	}
11850 	if (rpip != NULL) {
11851 		if (MDI_PI_IS_OFFLINE(rpip)) {
11852 			rval = DDI_SUCCESS;
11853 		} else {
11854 			rval = mdi_pi_offline(rpip, 0);
11855 		}
11856 	} else {
11857 		rval = ndi_devi_offline(cdip, flags);
11858 	}
11859 
11860 	return (rval);
11861 }
11862 
11863 static dev_info_t *
11864 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
11865 {
11866 	dev_info_t	*child = NULL;
11867 	char		*smp_wwn = NULL;
11868 
11869 	child = ddi_get_child(parent);
11870 	while (child) {
11871 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
11872 		    DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
11873 		    != DDI_SUCCESS) {
11874 			child = ddi_get_next_sibling(child);
11875 			continue;
11876 		}
11877 
11878 		if (strcmp(smp_wwn, str_wwn) == 0) {
11879 			ddi_prop_free(smp_wwn);
11880 			break;
11881 		}
11882 		child = ddi_get_next_sibling(child);
11883 		ddi_prop_free(smp_wwn);
11884 	}
11885 	return (child);
11886 }
11887 
11888 static int
11889 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
11890 {
11891 	int		rval = DDI_FAILURE;
11892 	char		*devname;
11893 	char		wwn_str[MPTSAS_WWN_STRLEN];
11894 	dev_info_t	*cdip;
11895 
11896 	(void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
11897 
11898 	cdip = mptsas_find_smp_child(pdip, wwn_str);
11899 
11900 	if (cdip == NULL)
11901 		return (DDI_SUCCESS);
11902 
11903 	/*
11904 	 * Make sure node is attached otherwise
11905 	 * it won't have related cache nodes to
11906 	 * clean up.  i_ddi_devi_attached is
11907 	 * similiar to i_ddi_node_state(cdip) >=
11908 	 * DS_ATTACHED.
11909 	 */
11910 	if (i_ddi_devi_attached(cdip)) {
11911 
11912 		/* Get full devname */
11913 		devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
11914 		(void) ddi_deviname(cdip, devname);
11915 		/* Clean cache */
11916 		(void) devfs_clean(pdip, devname + 1,
11917 		    DV_CLEAN_FORCE);
11918 		kmem_free(devname, MAXNAMELEN + 1);
11919 	}
11920 
11921 	rval = ndi_devi_offline(cdip, flags);
11922 
11923 	return (rval);
11924 }
11925 
11926 static dev_info_t *
11927 mptsas_find_child(dev_info_t *pdip, char *name)
11928 {
11929 	dev_info_t	*child = NULL;
11930 	char		*rname = NULL;
11931 	int		rval = DDI_FAILURE;
11932 
11933 	rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
11934 
11935 	child = ddi_get_child(pdip);
11936 	while (child) {
11937 		rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
11938 		if (rval != DDI_SUCCESS) {
11939 			child = ddi_get_next_sibling(child);
11940 			bzero(rname, SCSI_MAXNAMELEN);
11941 			continue;
11942 		}
11943 
11944 		if (strcmp(rname, name) == 0) {
11945 			break;
11946 		}
11947 		child = ddi_get_next_sibling(child);
11948 		bzero(rname, SCSI_MAXNAMELEN);
11949 	}
11950 
11951 	kmem_free(rname, SCSI_MAXNAMELEN);
11952 
11953 	return (child);
11954 }
11955 
11956 
11957 static dev_info_t *
11958 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
11959 {
11960 	dev_info_t	*child = NULL;
11961 	char		*name = NULL;
11962 	char		*addr = NULL;
11963 
11964 	name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
11965 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
11966 	(void) sprintf(name, "%016"PRIx64, sasaddr);
11967 	(void) sprintf(addr, "w%s,%x", name, lun);
11968 	child = mptsas_find_child(pdip, addr);
11969 	kmem_free(name, SCSI_MAXNAMELEN);
11970 	kmem_free(addr, SCSI_MAXNAMELEN);
11971 	return (child);
11972 }
11973 
11974 static dev_info_t *
11975 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
11976 {
11977 	dev_info_t	*child;
11978 	char		*addr;
11979 
11980 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
11981 	(void) sprintf(addr, "p%x,0", phy);
11982 	child = mptsas_find_child(pdip, addr);
11983 	kmem_free(addr, SCSI_MAXNAMELEN);
11984 	return (child);
11985 }
11986 
11987 static mdi_pathinfo_t *
11988 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
11989 {
11990 	mdi_pathinfo_t	*path;
11991 	char		*addr = NULL;
11992 
11993 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
11994 	(void) sprintf(addr, "p%x,0", phy);
11995 	path = mdi_pi_find(pdip, NULL, addr);
11996 	kmem_free(addr, SCSI_MAXNAMELEN);
11997 	return (path);
11998 }
11999 
12000 static mdi_pathinfo_t *
12001 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
12002 {
12003 	mdi_pathinfo_t	*path;
12004 	char		*name = NULL;
12005 	char		*addr = NULL;
12006 
12007 	name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12008 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12009 	(void) sprintf(name, "%016"PRIx64, sasaddr);
12010 	(void) sprintf(addr, "w%s,%x", name, lun);
12011 	path = mdi_pi_find(parent, NULL, addr);
12012 	kmem_free(name, SCSI_MAXNAMELEN);
12013 	kmem_free(addr, SCSI_MAXNAMELEN);
12014 
12015 	return (path);
12016 }
12017 
12018 static int
12019 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
12020     dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
12021 {
12022 	int			i = 0;
12023 	uchar_t			*inq83 = NULL;
12024 	int			inq83_len1 = 0xFF;
12025 	int			inq83_len = 0;
12026 	int			rval = DDI_FAILURE;
12027 	ddi_devid_t		devid;
12028 	char			*guid = NULL;
12029 	int			target = ptgt->m_devhdl;
12030 	mdi_pathinfo_t		*pip = NULL;
12031 	mptsas_t		*mpt = DIP2MPT(pdip);
12032 
12033 	/*
12034 	 * For DVD/CD ROM and tape devices and optical
12035 	 * devices, we won't try to enumerate them under
12036 	 * scsi_vhci, so no need to try page83
12037 	 */
12038 	if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
12039 	    sd_inq->inq_dtype == DTYPE_OPTICAL))
12040 		goto create_lun;
12041 
12042 	/*
12043 	 * The LCA returns good SCSI status, but corrupt page 83 data the first
12044 	 * time it is queried. The solution is to keep trying to request page83
12045 	 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
12046 	 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
12047 	 * give up to get VPD page at this stage and fail the enumeration.
12048 	 */
12049 
12050 	inq83	= kmem_zalloc(inq83_len1, KM_SLEEP);
12051 
12052 	for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
12053 		rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
12054 		    inq83_len1, &inq83_len, 1);
12055 		if (rval != 0) {
12056 			mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
12057 			    "0x83 for target:%x, lun:%x failed!", target, lun);
12058 			goto out;
12059 		}
12060 		/*
12061 		 * create DEVID from inquiry data
12062 		 */
12063 		if ((rval = ddi_devid_scsi_encode(
12064 		    DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
12065 		    sizeof (struct scsi_inquiry), NULL, 0, inq83,
12066 		    (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
12067 			/*
12068 			 * extract GUID from DEVID
12069 			 */
12070 			guid = ddi_devid_to_guid(devid);
12071 
12072 			/*
12073 			 * Do not enable MPXIO if the strlen(guid) is greater
12074 			 * than MPTSAS_MAX_GUID_LEN, this constrain would be
12075 			 * handled by framework later.
12076 			 */
12077 			if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
12078 				ddi_devid_free_guid(guid);
12079 				guid = NULL;
12080 				if (mpt->m_mpxio_enable == TRUE) {
12081 					mptsas_log(mpt, CE_NOTE, "!Target:%x, "
12082 					    "lun:%x doesn't have a valid GUID, "
12083 					    "multipathing for this drive is "
12084 					    "not enabled", target, lun);
12085 				}
12086 			}
12087 
12088 			/*
12089 			 * devid no longer needed
12090 			 */
12091 			ddi_devid_free(devid);
12092 			break;
12093 		} else if (rval == DDI_NOT_WELL_FORMED) {
12094 			/*
12095 			 * return value of ddi_devid_scsi_encode equal to
12096 			 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
12097 			 * to retry inquiry page 0x83 and get GUID.
12098 			 */
12099 			NDBG20(("Not well formed devid, retry..."));
12100 			delay(1 * drv_usectohz(1000000));
12101 			continue;
12102 		} else {
12103 			mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
12104 			    "path target:%x, lun:%x", target, lun);
12105 			rval = DDI_FAILURE;
12106 			goto create_lun;
12107 		}
12108 	}
12109 
12110 	if (i == mptsas_inq83_retry_timeout) {
12111 		mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
12112 		    "for path target:%x, lun:%x", target, lun);
12113 	}
12114 
12115 	rval = DDI_FAILURE;
12116 
12117 create_lun:
12118 	if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
12119 		rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
12120 		    ptgt, lun);
12121 	}
12122 	if (rval != DDI_SUCCESS) {
12123 		rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
12124 		    ptgt, lun);
12125 	}
12126 out:
12127 	if (guid != NULL) {
12128 		/*
12129 		 * guid no longer needed
12130 		 */
12131 		ddi_devid_free_guid(guid);
12132 	}
12133 	if (inq83 != NULL)
12134 		kmem_free(inq83, inq83_len1);
12135 	return (rval);
12136 }
12137 
12138 static int
12139 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
12140     dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
12141 {
12142 	int			target;
12143 	char			*nodename = NULL;
12144 	char			**compatible = NULL;
12145 	int			ncompatible	= 0;
12146 	int			mdi_rtn = MDI_FAILURE;
12147 	int			rval = DDI_FAILURE;
12148 	char			*old_guid = NULL;
12149 	mptsas_t		*mpt = DIP2MPT(pdip);
12150 	char			*lun_addr = NULL;
12151 	char			*wwn_str = NULL;
12152 	char			*component = NULL;
12153 	uint8_t			phy = 0xFF;
12154 	uint64_t		sas_wwn;
12155 	uint32_t		devinfo;
12156 
12157 	mutex_enter(&mpt->m_mutex);
12158 	target = ptgt->m_devhdl;
12159 	sas_wwn = ptgt->m_sas_wwn;
12160 	devinfo = ptgt->m_deviceinfo;
12161 	phy = ptgt->m_phynum;
12162 	mutex_exit(&mpt->m_mutex);
12163 
12164 	if (sas_wwn) {
12165 		*pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
12166 	} else {
12167 		*pip = mptsas_find_path_phy(pdip, phy);
12168 	}
12169 
12170 	if (*pip != NULL) {
12171 		*lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
12172 		ASSERT(*lun_dip != NULL);
12173 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
12174 		    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
12175 		    MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
12176 			if (strncmp(guid, old_guid, strlen(guid)) == 0) {
12177 				/*
12178 				 * Same path back online again.
12179 				 */
12180 				(void) ddi_prop_free(old_guid);
12181 				if (!MDI_PI_IS_ONLINE(*pip) &&
12182 				    !MDI_PI_IS_STANDBY(*pip)) {
12183 					rval = mdi_pi_online(*pip, 0);
12184 				} else {
12185 					rval = DDI_SUCCESS;
12186 				}
12187 				if (rval != DDI_SUCCESS) {
12188 					mptsas_log(mpt, CE_WARN, "path:target: "
12189 					    "%x, lun:%x online failed!", target,
12190 					    lun);
12191 					*pip = NULL;
12192 					*lun_dip = NULL;
12193 				}
12194 				return (rval);
12195 			} else {
12196 				/*
12197 				 * The GUID of the LUN has changed which maybe
12198 				 * because customer mapped another volume to the
12199 				 * same LUN.
12200 				 */
12201 				mptsas_log(mpt, CE_WARN, "The GUID of the "
12202 				    "target:%x, lun:%x was changed, maybe "
12203 				    "because someone mapped another volume "
12204 				    "to the same LUN", target, lun);
12205 				(void) ddi_prop_free(old_guid);
12206 				if (!MDI_PI_IS_OFFLINE(*pip)) {
12207 					rval = mdi_pi_offline(*pip, 0);
12208 					if (rval != MDI_SUCCESS) {
12209 						mptsas_log(mpt, CE_WARN, "path:"
12210 						    "target:%x, lun:%x offline "
12211 						    "failed!", target, lun);
12212 						*pip = NULL;
12213 						*lun_dip = NULL;
12214 						return (DDI_FAILURE);
12215 					}
12216 				}
12217 				if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
12218 					mptsas_log(mpt, CE_WARN, "path:target:"
12219 					    "%x, lun:%x free failed!", target,
12220 					    lun);
12221 					*pip = NULL;
12222 					*lun_dip = NULL;
12223 					return (DDI_FAILURE);
12224 				}
12225 			}
12226 		} else {
12227 			mptsas_log(mpt, CE_WARN, "Can't get client-guid "
12228 			    "property for path:target:%x, lun:%x", target, lun);
12229 			*pip = NULL;
12230 			*lun_dip = NULL;
12231 			return (DDI_FAILURE);
12232 		}
12233 	}
12234 	scsi_hba_nodename_compatible_get(inq, NULL,
12235 	    inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
12236 
12237 	/*
12238 	 * if nodename can't be determined then print a message and skip it
12239 	 */
12240 	if (nodename == NULL) {
12241 		mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
12242 		    "driver for target%d lun %d dtype:0x%02x", target, lun,
12243 		    inq->inq_dtype);
12244 		return (DDI_FAILURE);
12245 	}
12246 
12247 	wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
12248 	/* The property is needed by MPAPI */
12249 	(void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
12250 
12251 	lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12252 	if (sas_wwn)
12253 		(void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
12254 	else
12255 		(void) sprintf(lun_addr, "p%x,%x", phy, lun);
12256 
12257 	mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
12258 	    guid, lun_addr, compatible, ncompatible,
12259 	    0, pip);
12260 	if (mdi_rtn == MDI_SUCCESS) {
12261 
12262 		if (mdi_prop_update_string(*pip, MDI_GUID,
12263 		    guid) != DDI_SUCCESS) {
12264 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
12265 			    "create property for target %d lun %d (MDI_GUID)",
12266 			    target, lun);
12267 			mdi_rtn = MDI_FAILURE;
12268 			goto virt_create_done;
12269 		}
12270 
12271 		if (mdi_prop_update_int(*pip, LUN_PROP,
12272 		    lun) != DDI_SUCCESS) {
12273 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
12274 			    "create property for target %d lun %d (LUN_PROP)",
12275 			    target, lun);
12276 			mdi_rtn = MDI_FAILURE;
12277 			goto virt_create_done;
12278 		}
12279 		if (mdi_prop_update_string_array(*pip, "compatible",
12280 		    compatible, ncompatible) !=
12281 		    DDI_PROP_SUCCESS) {
12282 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
12283 			    "create property for target %d lun %d (COMPATIBLE)",
12284 			    target, lun);
12285 			mdi_rtn = MDI_FAILURE;
12286 			goto virt_create_done;
12287 		}
12288 		if (sas_wwn && (mdi_prop_update_string(*pip, "target-port",
12289 		    wwn_str) != DDI_PROP_SUCCESS)) {
12290 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
12291 			    "create property for target %d lun %d "
12292 			    "(target-port)", target, lun);
12293 			mdi_rtn = MDI_FAILURE;
12294 			goto virt_create_done;
12295 		} else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
12296 		    "sata-phy", phy) != DDI_PROP_SUCCESS)) {
12297 			/*
12298 			 * Direct attached SATA device without DeviceName
12299 			 */
12300 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
12301 			    "create property for SAS target %d lun %d "
12302 			    "(sata-phy)", target, lun);
12303 			mdi_rtn = NDI_FAILURE;
12304 			goto virt_create_done;
12305 		}
12306 
12307 		if (inq->inq_dtype == 0) {
12308 			component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
12309 			/*
12310 			 * set obp path for pathinfo
12311 			 */
12312 			(void) snprintf(component, MAXPATHLEN,
12313 			    "disk@%s", lun_addr);
12314 
12315 			if (mdi_pi_pathname_obp_set(*pip, component) !=
12316 			    DDI_SUCCESS) {
12317 				mptsas_log(mpt, CE_WARN, "mpt_sas driver "
12318 				    "unable to set obp-path for object %s",
12319 				    component);
12320 				mdi_rtn = MDI_FAILURE;
12321 				goto virt_create_done;
12322 			}
12323 		}
12324 
12325 		*lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
12326 		if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12327 		    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12328 			if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
12329 			    "pm-capable", 1)) !=
12330 			    DDI_PROP_SUCCESS) {
12331 				mptsas_log(mpt, CE_WARN, "mptsas driver"
12332 				    "failed to create pm-capable "
12333 				    "property, target %d", target);
12334 				mdi_rtn = MDI_FAILURE;
12335 				goto virt_create_done;
12336 			}
12337 		}
12338 		NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
12339 		mdi_rtn = mdi_pi_online(*pip, 0);
12340 		if (mdi_rtn == MDI_NOT_SUPPORTED) {
12341 			mdi_rtn = MDI_FAILURE;
12342 		}
12343 virt_create_done:
12344 		if (*pip && mdi_rtn != MDI_SUCCESS) {
12345 			(void) mdi_pi_free(*pip, 0);
12346 			*pip = NULL;
12347 			*lun_dip = NULL;
12348 		}
12349 	}
12350 
12351 	scsi_hba_nodename_compatible_free(nodename, compatible);
12352 	if (lun_addr != NULL) {
12353 		kmem_free(lun_addr, SCSI_MAXNAMELEN);
12354 	}
12355 	if (wwn_str != NULL) {
12356 		kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
12357 	}
12358 	if (component != NULL) {
12359 		kmem_free(component, MAXPATHLEN);
12360 	}
12361 
12362 	return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
12363 }
12364 
12365 static int
12366 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
12367     char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
12368 {
12369 	int			target;
12370 	int			ndi_rtn = NDI_FAILURE;
12371 	uint64_t		be_sas_wwn;
12372 	char			*nodename = NULL;
12373 	char			**compatible = NULL;
12374 	int			ncompatible = 0;
12375 	int			instance = 0;
12376 	mptsas_t		*mpt = DIP2MPT(pdip);
12377 	char			*wwn_str = NULL;
12378 	char			*component = NULL;
12379 	uint8_t			phy = 0xFF;
12380 	uint64_t		sas_wwn;
12381 	uint32_t		devinfo;
12382 
12383 	mutex_enter(&mpt->m_mutex);
12384 	target = ptgt->m_devhdl;
12385 	sas_wwn = ptgt->m_sas_wwn;
12386 	devinfo = ptgt->m_deviceinfo;
12387 	phy = ptgt->m_phynum;
12388 	mutex_exit(&mpt->m_mutex);
12389 
12390 	/*
12391 	 * generate compatible property with binding-set "mpt"
12392 	 */
12393 	scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
12394 	    &nodename, &compatible, &ncompatible);
12395 
12396 	/*
12397 	 * if nodename can't be determined then print a message and skip it
12398 	 */
12399 	if (nodename == NULL) {
12400 		mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
12401 		    "for target %d lun %d", target, lun);
12402 		return (DDI_FAILURE);
12403 	}
12404 
12405 	ndi_rtn = ndi_devi_alloc(pdip, nodename,
12406 	    DEVI_SID_NODEID, lun_dip);
12407 
12408 	/*
12409 	 * if lun alloc success, set props
12410 	 */
12411 	if (ndi_rtn == NDI_SUCCESS) {
12412 
12413 		if (ndi_prop_update_int(DDI_DEV_T_NONE,
12414 		    *lun_dip, LUN_PROP, lun) !=
12415 		    DDI_PROP_SUCCESS) {
12416 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
12417 			    "property for target %d lun %d (LUN_PROP)",
12418 			    target, lun);
12419 			ndi_rtn = NDI_FAILURE;
12420 			goto phys_create_done;
12421 		}
12422 
12423 		if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
12424 		    *lun_dip, "compatible", compatible, ncompatible)
12425 		    != DDI_PROP_SUCCESS) {
12426 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
12427 			    "property for target %d lun %d (COMPATIBLE)",
12428 			    target, lun);
12429 			ndi_rtn = NDI_FAILURE;
12430 			goto phys_create_done;
12431 		}
12432 
12433 		/*
12434 		 * We need the SAS WWN for non-multipath devices, so
12435 		 * we'll use the same property as that multipathing
12436 		 * devices need to present for MPAPI. If we don't have
12437 		 * a WWN (e.g. parallel SCSI), don't create the prop.
12438 		 */
12439 		wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
12440 		(void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
12441 		if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
12442 		    *lun_dip, "target-port", wwn_str)
12443 		    != DDI_PROP_SUCCESS) {
12444 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
12445 			    "create property for SAS target %d lun %d "
12446 			    "(target-port)", target, lun);
12447 			ndi_rtn = NDI_FAILURE;
12448 			goto phys_create_done;
12449 		}
12450 		be_sas_wwn = BE_64(sas_wwn);
12451 		if (sas_wwn && ndi_prop_update_byte_array(
12452 		    DDI_DEV_T_NONE, *lun_dip, "port-wwn",
12453 		    (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
12454 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
12455 			    "create property for SAS target %d lun %d "
12456 			    "(port-wwn)", target, lun);
12457 			ndi_rtn = NDI_FAILURE;
12458 			goto phys_create_done;
12459 		} else if ((sas_wwn == 0) && (ndi_prop_update_int(
12460 		    DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
12461 		    DDI_PROP_SUCCESS)) {
12462 			/*
12463 			 * Direct attached SATA device without DeviceName
12464 			 */
12465 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
12466 			    "create property for SAS target %d lun %d "
12467 			    "(sata-phy)", target, lun);
12468 			ndi_rtn = NDI_FAILURE;
12469 			goto phys_create_done;
12470 		}
12471 		if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
12472 		    *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
12473 			mptsas_log(mpt, CE_WARN, "mptsas unable to"
12474 			    "create property for SAS target %d lun %d"
12475 			    " (SAS_PROP)", target, lun);
12476 			ndi_rtn = NDI_FAILURE;
12477 			goto phys_create_done;
12478 		}
12479 		if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
12480 		    *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
12481 			mptsas_log(mpt, CE_WARN, "mptsas unable "
12482 			    "to create guid property for target %d "
12483 			    "lun %d", target, lun);
12484 			ndi_rtn = NDI_FAILURE;
12485 			goto phys_create_done;
12486 		}
12487 
12488 		/*
12489 		 * if this is a SAS controller, and the target is a SATA
12490 		 * drive, set the 'pm-capable' property for sd and if on
12491 		 * an OPL platform, also check if this is an ATAPI
12492 		 * device.
12493 		 */
12494 		instance = ddi_get_instance(mpt->m_dip);
12495 		if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12496 		    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12497 			NDBG2(("mptsas%d: creating pm-capable property, "
12498 			    "target %d", instance, target));
12499 
12500 			if ((ndi_prop_update_int(DDI_DEV_T_NONE,
12501 			    *lun_dip, "pm-capable", 1)) !=
12502 			    DDI_PROP_SUCCESS) {
12503 				mptsas_log(mpt, CE_WARN, "mptsas "
12504 				    "failed to create pm-capable "
12505 				    "property, target %d", target);
12506 				ndi_rtn = NDI_FAILURE;
12507 				goto phys_create_done;
12508 			}
12509 
12510 		}
12511 
12512 		if (inq->inq_dtype == 0) {
12513 			/*
12514 			 * add 'obp-path' properties for devinfo
12515 			 */
12516 			component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
12517 			if (sas_wwn) {
12518 				(void) snprintf(component, MAXPATHLEN,
12519 				    "disk@w%s,%x", wwn_str, lun);
12520 			} else {
12521 				(void) snprintf(component, MAXPATHLEN,
12522 				    "disk@p%x,%x", phy, lun);
12523 			}
12524 			if (ddi_pathname_obp_set(*lun_dip, component)
12525 			    != DDI_SUCCESS) {
12526 				mptsas_log(mpt, CE_WARN, "mpt_sas driver "
12527 				    "unable to set obp-path for SAS "
12528 				    "object %s", component);
12529 				ndi_rtn = NDI_FAILURE;
12530 				goto phys_create_done;
12531 			}
12532 		}
12533 
12534 phys_create_done:
12535 		/*
12536 		 * If props were setup ok, online the lun
12537 		 */
12538 		if (ndi_rtn == NDI_SUCCESS) {
12539 			/*
12540 			 * Try to online the new node
12541 			 */
12542 			ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
12543 		}
12544 
12545 		/*
12546 		 * If success set rtn flag, else unwire alloc'd lun
12547 		 */
12548 		if (ndi_rtn != NDI_SUCCESS) {
12549 			NDBG12(("mptsas driver unable to online "
12550 			    "target %d lun %d", target, lun));
12551 			ndi_prop_remove_all(*lun_dip);
12552 			(void) ndi_devi_free(*lun_dip);
12553 			*lun_dip = NULL;
12554 		}
12555 	}
12556 
12557 	scsi_hba_nodename_compatible_free(nodename, compatible);
12558 
12559 	if (wwn_str != NULL) {
12560 		kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
12561 	}
12562 	if (component != NULL) {
12563 		kmem_free(component, MAXPATHLEN);
12564 	}
12565 
12566 	return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
12567 }
12568 
12569 static int
12570 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
12571 {
12572 	struct smp_device smp;
12573 
12574 	bzero(&smp, sizeof (struct smp_device));
12575 	smp.smp_addr.a_hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SMP);
12576 	bcopy(&wwn, smp.smp_addr.a_wwn, SAS_WWN_BYTE_SIZE);
12577 
12578 	if (sas_hba_probe_smp(&smp) != DDI_PROBE_SUCCESS) {
12579 		return (NDI_FAILURE);
12580 	}
12581 	return (NDI_SUCCESS);
12582 }
12583 
12584 static int
12585 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
12586 {
12587 	mptsas_t	*mpt = DIP2MPT(pdip);
12588 	mptsas_smp_t	*psmp = NULL;
12589 	int		rval;
12590 	int		phymask;
12591 
12592 	/*
12593 	 * Get the physical port associated to the iport
12594 	 * PHYMASK TODO
12595 	 */
12596 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12597 	    "phymask", 0);
12598 	/*
12599 	 * Find the smp node in hash table with specified sas address and
12600 	 * physical port
12601 	 */
12602 	psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
12603 	if (psmp == NULL) {
12604 		return (DDI_FAILURE);
12605 	}
12606 
12607 	rval = mptsas_online_smp(pdip, psmp, smp_dip);
12608 
12609 	return (rval);
12610 }
12611 
12612 static int
12613 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
12614     dev_info_t **smp_dip)
12615 {
12616 	char		wwn_str[MPTSAS_WWN_STRLEN];
12617 	int		ndi_rtn = NDI_FAILURE;
12618 	mptsas_t	*mpt = DIP2MPT(pdip);
12619 
12620 	(void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
12621 
12622 	/*
12623 	 * Probe smp device, prevent the node of removed device from being
12624 	 * configured succesfully
12625 	 */
12626 	if (mptsas_probe_smp(pdip, smp_node->m_sasaddr) != NDI_SUCCESS) {
12627 		return (DDI_FAILURE);
12628 	}
12629 
12630 	if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
12631 		return (DDI_SUCCESS);
12632 	}
12633 
12634 	ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
12635 
12636 	/*
12637 	 * if lun alloc success, set props
12638 	 */
12639 	if (ndi_rtn == NDI_SUCCESS) {
12640 		/*
12641 		 * Set the flavor of the child to be SMP flavored
12642 		 */
12643 		ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
12644 
12645 		if (ndi_prop_update_string(DDI_DEV_T_NONE,
12646 		    *smp_dip, SMP_WWN, wwn_str) !=
12647 		    DDI_PROP_SUCCESS) {
12648 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
12649 			    "property for smp device %s (sas_wwn)",
12650 			    wwn_str);
12651 			ndi_rtn = NDI_FAILURE;
12652 			goto smp_create_done;
12653 		}
12654 
12655 		if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
12656 		    *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
12657 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
12658 			    "create property for SMP %s (SMP_PROP) ",
12659 			    wwn_str);
12660 			ndi_rtn = NDI_FAILURE;
12661 			goto smp_create_done;
12662 		}
12663 
12664 smp_create_done:
12665 		/*
12666 		 * If props were setup ok, online the lun
12667 		 */
12668 		if (ndi_rtn == NDI_SUCCESS) {
12669 			/*
12670 			 * Try to online the new node
12671 			 */
12672 			ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
12673 		}
12674 
12675 		/*
12676 		 * If success set rtn flag, else unwire alloc'd lun
12677 		 */
12678 		if (ndi_rtn != NDI_SUCCESS) {
12679 			NDBG12(("mptsas unable to online "
12680 			    "SMP target %s", wwn_str));
12681 			ndi_prop_remove_all(*smp_dip);
12682 			(void) ndi_devi_free(*smp_dip);
12683 		}
12684 	}
12685 
12686 	return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
12687 }
12688 
12689 /*ARGSUSED*/
12690 static int mptsas_getcap(struct sas_addr *ap, char *cap)
12691 {
12692 	int	ckey = -1;
12693 	int	ret = EINVAL;
12694 
12695 	ckey = sas_hba_lookup_capstr(cap);
12696 	if (ckey == -1)
12697 		return (EINVAL);
12698 
12699 	switch (ckey) {
12700 	case SAS_CAP_SMP_CRC:
12701 		/*
12702 		 * mpt controller support generate CRC for
12703 		 * SMP passthrough frame and handle CRC by
12704 		 * IOC itself.
12705 		 */
12706 		ret = 0;
12707 		break;
12708 	default:
12709 		ret = EINVAL;
12710 		break;
12711 	}
12712 	return (ret);
12713 }
12714 
12715 /* smp transport routine */
12716 static int mptsas_smp_start(struct smp_pkt *pktp)
12717 {
12718 	uint64_t			wwn;
12719 	Mpi2SmpPassthroughRequest_t	req;
12720 	Mpi2SmpPassthroughReply_t	rep;
12721 	uint32_t			direction = 0;
12722 	mptsas_t			*mpt;
12723 	int				ret;
12724 	uint64_t			tmp64;
12725 
12726 	mpt = (mptsas_t *)pktp->pkt_address->a_hba_tran->tran_hba_private;
12727 
12728 	bcopy(pktp->pkt_address->a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
12729 	/*
12730 	 * Need to compose a SMP request message
12731 	 * and call mptsas_do_passthru() function
12732 	 */
12733 	bzero(&req, sizeof (req));
12734 	bzero(&rep, sizeof (rep));
12735 	req.PassthroughFlags = 0;
12736 	req.PhysicalPort = 0xff;
12737 	req.ChainOffset = 0;
12738 	req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
12739 
12740 	if ((pktp->pkt_reqsize & 0xffff0000ul) != 0) {
12741 		pktp->pkt_reason = ERANGE;
12742 		return (DDI_FAILURE);
12743 	}
12744 	req.RequestDataLength = LE_16((uint16_t)(pktp->pkt_reqsize - 4));
12745 
12746 	req.MsgFlags = 0;
12747 	tmp64 = LE_64(wwn);
12748 	bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
12749 	if (pktp->pkt_rspsize > 0) {
12750 		direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
12751 	}
12752 	if (pktp->pkt_reqsize > 0) {
12753 		direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
12754 	}
12755 
12756 	mutex_enter(&mpt->m_mutex);
12757 	ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
12758 	    (uint8_t *)pktp->pkt_rsp, offsetof(Mpi2SmpPassthroughRequest_t,
12759 	    SGL), sizeof (rep), pktp->pkt_rspsize - 4, direction,
12760 	    (uint8_t *)pktp->pkt_req, pktp->pkt_reqsize - 4,
12761 	    pktp->pkt_timeout, FKIOCTL);
12762 	mutex_exit(&mpt->m_mutex);
12763 	if (ret != 0) {
12764 		cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
12765 		pktp->pkt_reason = (uchar_t)(ret);
12766 		return (DDI_FAILURE);
12767 	}
12768 	/* do passthrough success, check the smp status */
12769 	if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
12770 		switch (LE_16(rep.IOCStatus)) {
12771 		case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
12772 			pktp->pkt_reason = ENODEV;
12773 			break;
12774 		case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
12775 			pktp->pkt_reason = EOVERFLOW;
12776 			break;
12777 		case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
12778 			pktp->pkt_reason = EIO;
12779 			break;
12780 		default:
12781 			mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
12782 			    "status:%x", LE_16(rep.IOCStatus));
12783 			pktp->pkt_reason = EIO;
12784 			break;
12785 		}
12786 		return (DDI_FAILURE);
12787 	}
12788 	if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
12789 		mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
12790 		    rep.SASStatus);
12791 		pktp->pkt_reason = EIO;
12792 		return (DDI_FAILURE);
12793 	}
12794 
12795 	return (DDI_SUCCESS);
12796 }
12797 
12798 static void
12799 mptsas_idle_pm(void *arg)
12800 {
12801 	mptsas_t	*mpt = arg;
12802 
12803 	(void) pm_idle_component(mpt->m_dip, 0);
12804 	mutex_enter(&mpt->m_mutex);
12805 	mpt->m_pm_timeid = 0;
12806 	mutex_exit(&mpt->m_mutex);
12807 }
12808 
12809 /*
12810  * If we didn't get a match, we need to get sas page0 for each device, and
12811  * untill we get a match. If failed, return NULL
12812  * TODO should be implemented similar to mptsas_wwid_to_ptgt?
12813  */
12814 static mptsas_target_t *
12815 mptsas_phy_to_tgt(dev_info_t *pdip, uint8_t phy)
12816 {
12817 	int		i, j = 0;
12818 	int		rval = 0;
12819 	uint16_t	cur_handle;
12820 	uint32_t	page_address;
12821 	mptsas_target_t	*ptgt = NULL;
12822 	mptsas_t	*mpt = DIP2MPT(pdip);
12823 	int		phymask;
12824 
12825 	/*
12826 	 * Get the physical port associated to the iport
12827 	 */
12828 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12829 	    "phymask", 0);
12830 
12831 	if (phymask == 0)
12832 		return (NULL);
12833 
12834 	/*
12835 	 * PHY named device must be direct attached and attaches to
12836 	 * narrow port, if the iport is not parent of the device which
12837 	 * we are looking for.
12838 	 */
12839 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
12840 		if ((1 << i) & phymask)
12841 			j++;
12842 	}
12843 
12844 	if (j > 1)
12845 		return (NULL);
12846 
12847 	/*
12848 	 * Must be a narrow port and single device attached to the narrow port
12849 	 * So the physical port num of device  which is equal to the iport's
12850 	 * port num is the device what we are looking for.
12851 	 */
12852 
12853 	if (mpt->m_phy_info[phy].phy_mask != phymask)
12854 		return (NULL);
12855 
12856 	mutex_enter(&mpt->m_mutex);
12857 
12858 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
12859 	    MPTSAS_HASH_FIRST);
12860 	while (ptgt != NULL) {
12861 			if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
12862 			mutex_exit(&mpt->m_mutex);
12863 			return (ptgt);
12864 		}
12865 
12866 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
12867 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
12868 	}
12869 
12870 	if (mpt->m_done_traverse_dev) {
12871 		mutex_exit(&mpt->m_mutex);
12872 		return (NULL);
12873 	}
12874 
12875 	/* If didn't get a match, come here */
12876 	cur_handle = mpt->m_dev_handle;
12877 	for (; ; ) {
12878 		ptgt = NULL;
12879 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
12880 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
12881 		rval = mptsas_get_target_device_info(mpt, page_address,
12882 		    &cur_handle, &ptgt);
12883 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
12884 		    (rval == DEV_INFO_FAIL_ALLOC)) {
12885 			break;
12886 		}
12887 		if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
12888 		    (rval == DEV_INFO_PHYS_DISK)) {
12889 			continue;
12890 		}
12891 		mpt->m_dev_handle = cur_handle;
12892 
12893 		if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
12894 			break;
12895 		}
12896 	}
12897 
12898 	mutex_exit(&mpt->m_mutex);
12899 	return (ptgt);
12900 }
12901 
12902 /*
12903  * The ptgt->m_sas_wwn contains the wwid for each disk.
12904  * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
12905  * If we didn't get a match, we need to get sas page0 for each device, and
12906  * untill we get a match
12907  * If failed, return NULL
12908  */
12909 static mptsas_target_t *
12910 mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask, uint64_t wwid)
12911 {
12912 	int		rval = 0;
12913 	uint16_t	cur_handle;
12914 	uint32_t	page_address;
12915 	mptsas_target_t	*tmp_tgt = NULL;
12916 
12917 	mutex_enter(&mpt->m_mutex);
12918 	tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
12919 	    &mpt->m_active->m_tgttbl, wwid, phymask);
12920 	if (tmp_tgt != NULL) {
12921 		mutex_exit(&mpt->m_mutex);
12922 		return (tmp_tgt);
12923 	}
12924 
12925 	if (phymask == 0) {
12926 		/*
12927 		 * It's IR volume
12928 		 */
12929 		rval = mptsas_get_raid_info(mpt);
12930 		if (rval) {
12931 			tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
12932 			    &mpt->m_active->m_tgttbl, wwid, phymask);
12933 		}
12934 		mutex_exit(&mpt->m_mutex);
12935 		return (tmp_tgt);
12936 	}
12937 
12938 	if (mpt->m_done_traverse_dev) {
12939 		mutex_exit(&mpt->m_mutex);
12940 		return (NULL);
12941 	}
12942 
12943 	/* If didn't get a match, come here */
12944 	cur_handle = mpt->m_dev_handle;
12945 	for (; ; ) {
12946 		tmp_tgt = NULL;
12947 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
12948 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
12949 		rval = mptsas_get_target_device_info(mpt, page_address,
12950 		    &cur_handle, &tmp_tgt);
12951 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
12952 		    (rval == DEV_INFO_FAIL_ALLOC)) {
12953 			tmp_tgt = NULL;
12954 			break;
12955 		}
12956 		if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
12957 		    (rval == DEV_INFO_PHYS_DISK)) {
12958 			continue;
12959 		}
12960 		mpt->m_dev_handle = cur_handle;
12961 		if ((tmp_tgt->m_sas_wwn) && (tmp_tgt->m_sas_wwn == wwid) &&
12962 		    (tmp_tgt->m_phymask == phymask)) {
12963 			break;
12964 		}
12965 	}
12966 
12967 	mutex_exit(&mpt->m_mutex);
12968 	return (tmp_tgt);
12969 }
12970 
12971 static mptsas_smp_t *
12972 mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask, uint64_t wwid)
12973 {
12974 	int		rval = 0;
12975 	uint16_t	cur_handle;
12976 	uint32_t	page_address;
12977 	mptsas_smp_t	smp_node, *psmp = NULL;
12978 
12979 	mutex_enter(&mpt->m_mutex);
12980 	psmp = (struct mptsas_smp *)mptsas_hash_search(&mpt->m_active->m_smptbl,
12981 	    wwid, phymask);
12982 	if (psmp != NULL) {
12983 		mutex_exit(&mpt->m_mutex);
12984 		return (psmp);
12985 	}
12986 
12987 	if (mpt->m_done_traverse_smp) {
12988 		mutex_exit(&mpt->m_mutex);
12989 		return (NULL);
12990 	}
12991 
12992 	/* If didn't get a match, come here */
12993 	cur_handle = mpt->m_smp_devhdl;
12994 	for (; ; ) {
12995 		psmp = NULL;
12996 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
12997 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
12998 		rval = mptsas_get_sas_expander_page0(mpt, page_address,
12999 		    &smp_node);
13000 		if (rval != DDI_SUCCESS) {
13001 			break;
13002 		}
13003 		mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
13004 		psmp = mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
13005 		ASSERT(psmp);
13006 		if ((psmp->m_sasaddr) && (psmp->m_sasaddr == wwid) &&
13007 		    (psmp->m_phymask == phymask)) {
13008 			break;
13009 		}
13010 	}
13011 
13012 	mutex_exit(&mpt->m_mutex);
13013 	return (psmp);
13014 }
13015 
13016 /* helper functions using hash */
13017 
13018 /*
13019  * Can't have duplicate entries for same devhdl,
13020  * if there are invalid entries, the devhdl should be set to 0xffff
13021  */
13022 static void *
13023 mptsas_search_by_devhdl(mptsas_hash_table_t *hashtab, uint16_t devhdl)
13024 {
13025 	mptsas_hash_data_t *data;
13026 
13027 	data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
13028 	while (data != NULL) {
13029 		if (data->devhdl == devhdl) {
13030 			break;
13031 		}
13032 		data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
13033 	}
13034 	return (data);
13035 }
13036 
13037 mptsas_target_t *
13038 mptsas_tgt_alloc(mptsas_hash_table_t *hashtab, uint16_t devhdl, uint64_t wwid,
13039     uint32_t devinfo, uint8_t phymask, uint8_t phynum)
13040 {
13041 	mptsas_target_t *tmp_tgt = NULL;
13042 
13043 	tmp_tgt = mptsas_hash_search(hashtab, wwid, phymask);
13044 	if (tmp_tgt != NULL) {
13045 		NDBG20(("Hash item already exist"));
13046 		tmp_tgt->m_deviceinfo = devinfo;
13047 		tmp_tgt->m_devhdl = devhdl;
13048 		return (tmp_tgt);
13049 	}
13050 	tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
13051 	if (tmp_tgt == NULL) {
13052 		cmn_err(CE_WARN, "Fatal, allocated tgt failed");
13053 		return (NULL);
13054 	}
13055 	tmp_tgt->m_devhdl = devhdl;
13056 	tmp_tgt->m_sas_wwn = wwid;
13057 	tmp_tgt->m_deviceinfo = devinfo;
13058 	tmp_tgt->m_phymask = phymask;
13059 	tmp_tgt->m_phynum = phynum;
13060 	/* Initialized the tgt structure */
13061 	tmp_tgt->m_qfull_retries = QFULL_RETRIES;
13062 	tmp_tgt->m_qfull_retry_interval =
13063 	    drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
13064 	tmp_tgt->m_t_throttle = MAX_THROTTLE;
13065 
13066 	mptsas_hash_add(hashtab, tmp_tgt);
13067 
13068 	return (tmp_tgt);
13069 }
13070 
13071 static void
13072 mptsas_tgt_free(mptsas_hash_table_t *hashtab, uint64_t wwid, uint8_t phymask)
13073 {
13074 	mptsas_target_t *tmp_tgt;
13075 	tmp_tgt = mptsas_hash_rem(hashtab, wwid, phymask);
13076 	if (tmp_tgt == NULL) {
13077 		cmn_err(CE_WARN, "Tgt not found, nothing to free");
13078 	} else {
13079 		kmem_free(tmp_tgt, sizeof (struct mptsas_target));
13080 	}
13081 }
13082 
13083 /*
13084  * Return the entry in the hash table
13085  */
13086 static mptsas_smp_t *
13087 mptsas_smp_alloc(mptsas_hash_table_t *hashtab, mptsas_smp_t *data)
13088 {
13089 	uint64_t key1 = data->m_sasaddr;
13090 	uint8_t key2 = data->m_phymask;
13091 	mptsas_smp_t *ret_data;
13092 
13093 	ret_data = mptsas_hash_search(hashtab, key1, key2);
13094 	if (ret_data != NULL) {
13095 		bcopy(data, ret_data, sizeof (mptsas_smp_t));
13096 		return (ret_data);
13097 	}
13098 
13099 	ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
13100 	bcopy(data, ret_data, sizeof (mptsas_smp_t));
13101 	mptsas_hash_add(hashtab, ret_data);
13102 	return (ret_data);
13103 }
13104 
13105 static void
13106 mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid, uint8_t phymask)
13107 {
13108 	mptsas_smp_t *tmp_smp;
13109 	tmp_smp = mptsas_hash_rem(hashtab, wwid, phymask);
13110 	if (tmp_smp == NULL) {
13111 		cmn_err(CE_WARN, "Smp element not found, nothing to free");
13112 	} else {
13113 		kmem_free(tmp_smp, sizeof (struct mptsas_smp));
13114 	}
13115 }
13116 
13117 /*
13118  * Hash operation functions
13119  * key1 is the sas_wwn, key2 is the phymask
13120  */
13121 static void
13122 mptsas_hash_init(mptsas_hash_table_t *hashtab)
13123 {
13124 	if (hashtab == NULL) {
13125 		return;
13126 	}
13127 	bzero(hashtab->head, sizeof (mptsas_hash_node_t) *
13128 	    MPTSAS_HASH_ARRAY_SIZE);
13129 	hashtab->cur = NULL;
13130 	hashtab->line = 0;
13131 }
13132 
13133 static void
13134 mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen)
13135 {
13136 	uint16_t line = 0;
13137 	mptsas_hash_node_t *cur = NULL, *last = NULL;
13138 
13139 	if (hashtab == NULL) {
13140 		return;
13141 	}
13142 	for (line = 0; line < MPTSAS_HASH_ARRAY_SIZE; line++) {
13143 		cur = hashtab->head[line];
13144 		while (cur != NULL) {
13145 			last = cur;
13146 			cur = cur->next;
13147 			kmem_free(last->data, datalen);
13148 			kmem_free(last, sizeof (mptsas_hash_node_t));
13149 		}
13150 	}
13151 }
13152 
13153 /*
13154  * You must guarantee the element doesn't exist in the hash table
13155  * before you call mptsas_hash_add()
13156  */
13157 static void
13158 mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data)
13159 {
13160 	uint64_t key1 = ((mptsas_hash_data_t *)data)->key1;
13161 	uint8_t	key2 = ((mptsas_hash_data_t *)data)->key2;
13162 	mptsas_hash_node_t **head = NULL;
13163 	mptsas_hash_node_t *node = NULL;
13164 
13165 	if (hashtab == NULL) {
13166 		return;
13167 	}
13168 	ASSERT(mptsas_hash_search(hashtab, key1, key2) == NULL);
13169 	node = kmem_zalloc(sizeof (mptsas_hash_node_t), KM_NOSLEEP);
13170 	node->data = data;
13171 
13172 	head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
13173 	if (*head == NULL) {
13174 		*head = node;
13175 	} else {
13176 		node->next = *head;
13177 		*head = node;
13178 	}
13179 }
13180 
13181 static void *
13182 mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1, uint8_t key2)
13183 {
13184 	mptsas_hash_node_t **head = NULL;
13185 	mptsas_hash_node_t *last = NULL, *cur = NULL;
13186 	mptsas_hash_data_t *data;
13187 	if (hashtab == NULL) {
13188 		return (NULL);
13189 	}
13190 	head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
13191 	cur = *head;
13192 	while (cur != NULL) {
13193 		data = cur->data;
13194 		if ((data->key1 == key1) && (data->key2 == key2)) {
13195 			if (last == NULL) {
13196 				(*head) = cur->next;
13197 			} else {
13198 				last->next = cur->next;
13199 			}
13200 			kmem_free(cur, sizeof (mptsas_hash_node_t));
13201 			return (data);
13202 		} else {
13203 			last = cur;
13204 			cur = cur->next;
13205 		}
13206 	}
13207 	return (NULL);
13208 }
13209 
13210 static void *
13211 mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1, uint8_t key2)
13212 {
13213 	mptsas_hash_node_t *cur = NULL;
13214 	mptsas_hash_data_t *data;
13215 	if (hashtab == NULL) {
13216 		return (NULL);
13217 	}
13218 	cur = hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE];
13219 	while (cur != NULL) {
13220 		data = cur->data;
13221 		if ((data->key1 == key1) && (data->key2 == key2)) {
13222 			return (data);
13223 		} else {
13224 			cur = cur->next;
13225 		}
13226 	}
13227 	return (NULL);
13228 }
13229 
13230 static void *
13231 mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos)
13232 {
13233 	mptsas_hash_node_t *this = NULL;
13234 
13235 	if (hashtab == NULL) {
13236 		return (NULL);
13237 	}
13238 
13239 	if (pos == MPTSAS_HASH_FIRST) {
13240 		hashtab->line = 0;
13241 		hashtab->cur = NULL;
13242 		this = hashtab->head[0];
13243 	} else {
13244 		if (hashtab->cur == NULL) {
13245 			return (NULL);
13246 		} else {
13247 			this = hashtab->cur->next;
13248 		}
13249 	}
13250 
13251 	while (this == NULL) {
13252 		hashtab->line++;
13253 		if (hashtab->line >= MPTSAS_HASH_ARRAY_SIZE) {
13254 			/* the traverse reaches the end */
13255 			hashtab->cur = NULL;
13256 			return (NULL);
13257 		} else {
13258 			this = hashtab->head[hashtab->line];
13259 		}
13260 	}
13261 	hashtab->cur = this;
13262 	return (this->data);
13263 }
13264